repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
kenshay/ImageScripter | Script_Runner/PYTHON/Lib/asyncio/format_helpers.py | 35 | 2404 | import functools
import inspect
import reprlib
import sys
import traceback
from . import constants
def _get_function_source(func):
func = inspect.unwrap(func)
if inspect.isfunction(func):
code = func.__code__
return (code.co_filename, code.co_firstlineno)
if isinstance(func, functools.partial):
return _get_function_source(func.func)
if isinstance(func, functools.partialmethod):
return _get_function_source(func.func)
return None
def _format_callback_source(func, args):
func_repr = _format_callback(func, args, None)
source = _get_function_source(func)
if source:
func_repr += f' at {source[0]}:{source[1]}'
return func_repr
def _format_args_and_kwargs(args, kwargs):
"""Format function arguments and keyword arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello').
"""
# use reprlib to limit the length of the output
items = []
if args:
items.extend(reprlib.repr(arg) for arg in args)
if kwargs:
items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
return '({})'.format(', '.join(items))
def _format_callback(func, args, kwargs, suffix=''):
if isinstance(func, functools.partial):
suffix = _format_args_and_kwargs(args, kwargs) + suffix
return _format_callback(func.func, func.args, func.keywords, suffix)
if hasattr(func, '__qualname__') and func.__qualname__:
func_repr = func.__qualname__
elif hasattr(func, '__name__') and func.__name__:
func_repr = func.__name__
else:
func_repr = repr(func)
func_repr += _format_args_and_kwargs(args, kwargs)
if suffix:
func_repr += suffix
return func_repr
def extract_stack(f=None, limit=None):
"""Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode.
"""
if f is None:
f = sys._getframe().f_back
if limit is None:
# Limit the amount of work to a reasonable amount, as extract_stack()
# can be called for each coroutine and future in debug mode.
limit = constants.DEBUG_STACK_DEPTH
stack = traceback.StackSummary.extract(traceback.walk_stack(f),
limit=limit,
lookup_lines=False)
stack.reverse()
return stack
| gpl-3.0 |
evlyapin/ansible-modules-core | files/stat.py | 94 | 13076 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: stat
version_added: "1.3"
short_description: retrieve file or file system status
description:
- Retrieves facts for a file similar to the linux/unix 'stat' command.
options:
path:
description:
- The full path of the file/object to get the facts of
required: true
default: null
aliases: []
follow:
description:
- Whether to follow symlinks
required: false
default: no
aliases: []
get_md5:
description:
- Whether to return the md5 sum of the file. Will return None if we're unable to use md5 (Common for FIPS-140 compliant systems)
required: false
default: yes
aliases: []
get_checksum:
description:
- Whether to return a checksum of the file (currently sha1)
required: false
default: yes
aliases: []
version_added: "1.8"
author: "Bruce Pennypacker (@bpennypacker)"
'''
EXAMPLES = '''
# Obtain the stats of /etc/foo.conf, and check that the file still belongs
# to 'root'. Fail otherwise.
- stat: path=/etc/foo.conf
register: st
- fail: msg="Whoops! file ownership has changed"
when: st.stat.pw_name != 'root'
# Determine if a path exists and is a symlink. Note that if the path does
# not exist, and we test sym.stat.islnk, it will fail with an error. So
# therefore, we must test whether it is defined.
# Run this to understand the structure, the skipped ones do not pass the
# check performed by 'when'
- stat: path=/path/to/something
register: sym
- debug: msg="islnk isn't defined (path doesn't exist)"
when: sym.stat.islnk is not defined
- debug: msg="islnk is defined (path must exist)"
when: sym.stat.islnk is defined
- debug: msg="Path exists and is a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk
- debug: msg="Path exists and isn't a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk == False
# Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true.
- stat: path=/path/to/something
register: p
- debug: msg="Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir
# Don't do md5 checksum
- stat: path=/path/to/myhugefile get_md5=no
'''
RETURN = '''
stat:
description: dictionary containing all the stat data
returned: success
type: dictionary
contains:
exists:
description: if the destination path actually exists or not
returned: success
type: boolean
sample: True
path:
description: The full path of the file/object to get the facts of
returned: success and if path exists
type: boolean
sample: '/path/to/file'
mode:
description: Unix permissions of the file in octal
returned: success, path exists and user can read stats
type: octal
sample: 1755
isdir:
description: Tells you if the path is a directory
returned: success, path exists and user can read stats
type: boolean
sample: False
ischr:
description: Tells you if the path is a character device
returned: success, path exists and user can read stats
type: boolean
sample: False
isblk:
description: Tells you if the path is a block device
returned: success, path exists and user can read stats
type: boolean
sample: False
isreg:
description: Tells you if the path is a regular file
returned: success, path exists and user can read stats
type: boolean
sample: True
isfifo:
description: Tells you if the path is a named pipe
returned: success, path exists and user can read stats
type: boolean
sample: False
islnk:
description: Tells you if the path is a symbolic link
returned: success, path exists and user can read stats
type: boolean
sample: False
issock:
description: Tells you if the path is a unix domain socket
returned: success, path exists and user can read stats
type: boolean
sample: False
uid:
description: Numeric id representing the file owner
returned: success, path exists and user can read stats
type: int
sample: 1003
gid:
description: Numeric id representing the group of the owner
returned: success, path exists and user can read stats
type: int
sample: 1003
size:
description: Size in bytes for a plain file, ammount of data for some special files
returned: success, path exists and user can read stats
type: int
sample: 203
inode:
description: Inode number of the path
returned: success, path exists and user can read stats
type: int
sample: 12758
dev:
description: Device the inode resides on
returned: success, path exists and user can read stats
type: int
sample: 33
nlink:
description: Number of links to the inode (hard links)
returned: success, path exists and user can read stats
type: int
sample: 1
atime:
description: Time of last access
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
mtime:
description: Time of last modification
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
ctime:
description: Time of last metadata update or creation (depends on OS)
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
wusr:
description: Tells you if the owner has write permission
returned: success, path exists and user can read stats
type: boolean
sample: True
rusr:
description: Tells you if the owner has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xusr:
description: Tells you if the owner has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
wgrp:
description: Tells you if the owner's group has write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
rgrp:
description: Tells you if the owner's group has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xgrp:
description: Tells you if the owner's group has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
woth:
description: Tells you if others have write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
roth:
description: Tells you if others have read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xoth:
description: Tells you if others have execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
isuid:
description: Tells you if the invoking user's id matches the owner's id
returned: success, path exists and user can read stats
type: boolean
sample: False
isgid:
description: Tells you if the invoking user's group id matches the owner's group id
returned: success, path exists and user can read stats
type: boolean
sample: False
lnk_source:
description: Original path
returned: success, path exists and user can read stats and the path is a symbolic link
type: boolean
sample: True
md5:
description: md5 hash of the path
returned: success, path exists and user can read stats and path supports hashing and md5 is supported
type: string
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
checksum:
description: hash of the path
returned: success, path exists and user can read stats and path supports hashing
type: string
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
pw_name:
description: User name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: httpd
gr_name:
description: Group name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: www-data
'''
import os
import sys
from stat import *
import pwd
import grp
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=True),
follow = dict(default='no', type='bool'),
get_md5 = dict(default='yes', type='bool'),
get_checksum = dict(default='yes', type='bool')
),
supports_check_mode = True
)
path = module.params.get('path')
path = os.path.expanduser(path)
follow = module.params.get('follow')
get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
try:
if follow:
st = os.stat(path)
else:
st = os.lstat(path)
except OSError, e:
if e.errno == errno.ENOENT:
d = { 'exists' : False }
module.exit_json(changed=False, stat=d)
module.fail_json(msg = e.strerror)
mode = st.st_mode
# back to ansible
d = {
'exists' : True,
'path' : path,
'mode' : "%04o" % S_IMODE(mode),
'isdir' : S_ISDIR(mode),
'ischr' : S_ISCHR(mode),
'isblk' : S_ISBLK(mode),
'isreg' : S_ISREG(mode),
'isfifo' : S_ISFIFO(mode),
'islnk' : S_ISLNK(mode),
'issock' : S_ISSOCK(mode),
'uid' : st.st_uid,
'gid' : st.st_gid,
'size' : st.st_size,
'inode' : st.st_ino,
'dev' : st.st_dev,
'nlink' : st.st_nlink,
'atime' : st.st_atime,
'mtime' : st.st_mtime,
'ctime' : st.st_ctime,
'wusr' : bool(mode & stat.S_IWUSR),
'rusr' : bool(mode & stat.S_IRUSR),
'xusr' : bool(mode & stat.S_IXUSR),
'wgrp' : bool(mode & stat.S_IWGRP),
'rgrp' : bool(mode & stat.S_IRGRP),
'xgrp' : bool(mode & stat.S_IXGRP),
'woth' : bool(mode & stat.S_IWOTH),
'roth' : bool(mode & stat.S_IROTH),
'xoth' : bool(mode & stat.S_IXOTH),
'isuid' : bool(mode & stat.S_ISUID),
'isgid' : bool(mode & stat.S_ISGID),
}
if S_ISLNK(mode):
d['lnk_source'] = os.path.realpath(path)
if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK):
# Will fail on FIPS-140 compliant systems
try:
d['md5'] = module.md5(path)
except ValueError:
d['md5'] = None
if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK):
d['checksum'] = module.sha1(path)
try:
pw = pwd.getpwuid(st.st_uid)
d['pw_name'] = pw.pw_name
grp_info = grp.getgrgid(st.st_gid)
d['gr_name'] = grp_info.gr_name
except:
pass
module.exit_json(changed=False, stat=d)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
funtoo/portage-funtoo | pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py | 17 | 2349 | # Copyright 2010-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import re
import portage
from portage import os
from portage.const import PORTAGE_PYM_PATH
from portage.tests import TestCase
from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.PipeReader import PipeReader
from _emerge.SpawnProcess import SpawnProcess
class LazyImportPortageBaselineTestCase(TestCase):
_module_re = re.compile(r'^(portage|repoman|_emerge)\.')
_baseline_imports = frozenset([
'portage.const', 'portage.localization',
'portage.proxy', 'portage.proxy.lazyimport',
'portage.proxy.objectproxy',
'portage._selinux',
])
_baseline_import_cmd = [portage._python_interpreter, '-c', '''
import os
import sys
sys.path.insert(0, os.environ["PORTAGE_PYM_PATH"])
import portage
sys.stdout.write(" ".join(k for k in sys.modules
if sys.modules[k] is not None))
''']
def testLazyImportPortageBaseline(self):
"""
Check what modules are imported by a baseline module import.
"""
env = os.environ.copy()
pythonpath = env.get('PYTHONPATH')
if pythonpath is not None and not pythonpath.strip():
pythonpath = None
if pythonpath is None:
pythonpath = ''
else:
pythonpath = ':' + pythonpath
pythonpath = PORTAGE_PYM_PATH + pythonpath
env['PYTHONPATH'] = pythonpath
# If python is patched to insert the path of the
# currently installed portage module into sys.path,
# then the above PYTHONPATH override doesn't help.
env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
scheduler = global_event_loop()
master_fd, slave_fd = os.pipe()
master_file = os.fdopen(master_fd, 'rb', 0)
slave_file = os.fdopen(slave_fd, 'wb')
producer = SpawnProcess(
args=self._baseline_import_cmd,
env=env, fd_pipes={1:slave_fd},
scheduler=scheduler)
producer.start()
slave_file.close()
consumer = PipeReader(
input_files={"producer" : master_file},
scheduler=scheduler)
consumer.start()
consumer.wait()
self.assertEqual(producer.wait(), os.EX_OK)
self.assertEqual(consumer.wait(), os.EX_OK)
output = consumer.getvalue().decode('ascii', 'replace').split()
unexpected_modules = " ".join(sorted(x for x in output \
if self._module_re.match(x) is not None and \
x not in self._baseline_imports))
self.assertEqual("", unexpected_modules)
| gpl-2.0 |
Fullbiter/EECS-293 | pa3/serial_number.py | 2 | 1766 | # Kevin Nash (kjn33)
# EECS 293
# Assignment 3
from fractions import gcd
from product_error import ProductError
class SerialNumber:
"""Store the serial number of an Orange product"""
def __init__(self, serial_number):
"""Initialize the number attribute of this SerialNumber"""
self.serial_number = serial_number
def __cmp__(self, other):
"""Order SerialNumber objects in ascending order
by their numerical values.
"""
return cmp(self.serial_number, other.serial_number)
def __str__(self):
"""Return the SerialNumber as a string"""
return str(self.serial_number)
def gcd(self, other):
"""Return the greatest common divisor of this SerialNumber
and an other SerialNumber.
"""
return gcd(self.serial_number, other.serial_number)
def mod(self, other):
"""Return this SerialNumber modulus an other SerialNumber"""
return self.serial_number % other.serial_number
def test_bit(self, bit):
"""Return true iff a bit is set at the given index
Example:
12 is 0b1100
Therefore given a SerialNumber of 12,
test_bit returns false for indices 0 and 1
test_bit returns true for indices 2 and 3.
"""
return (self.serial_number & (1 << bit) != 0)
def is_zero(self):
"""Return true iff this SerialNumber is zero"""
return self.serial_number == 0
def is_even(self):
"""Return true iff this SerialNumber is even"""
return not self.is_zero() and self.serial_number % 2 == 0
def is_odd(self):
"""Return true iff this SerialNumber is odd"""
return not self.is_zero() and not self.is_even()
| gpl-3.0 |
grengojbo/st2 | st2common/st2common/models/api/actionrunner.py | 13 | 1356 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common import log as logging
from st2common.models.api.base import BaseAPI
__all__ = ['ActionRunnerAPI']
LOG = logging.getLogger(__name__)
class ActionRunnerAPI(BaseAPI):
"""The system entity that represents an ActionRunner environment in the system.
This entity is used internally to manage and scale-out the StackStorm services.
Attribute:
...
"""
schema = {
'type': 'object',
'parameters': {
'id': {
'type': 'string'
}
},
'additionalProperties': False
}
| apache-2.0 |
Salat-Cx65/python-for-android | python3-alpha/extra_modules/gdata/analytics/__init__.py | 261 | 6995 | #!/usr/bin/python
#
# Original Copyright (C) 2006 Google Inc.
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note that this module will not function without specifically adding
# 'analytics': [ #Google Analytics
# 'https://www.google.com/analytics/feeds/'],
# to CLIENT_LOGIN_SCOPES in the gdata/service.py file
"""Contains extensions to Atom objects used with Google Analytics."""
__author__ = 'api.suryasev (Sal Uryasev)'
import atom
import gdata
GAN_NAMESPACE = 'http://schemas.google.com/analytics/2009'
class TableId(gdata.GDataEntry):
"""tableId element."""
_tag = 'tableId'
_namespace = GAN_NAMESPACE
class Property(gdata.GDataEntry):
_tag = 'property'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, *args, **kwargs):
self.name = name
self.value = value
super(Property, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class AccountListEntry(gdata.GDataEntry):
"""The Google Documents version of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}tableId' % GAN_NAMESPACE] = ('tableId',
[TableId])
_children['{%s}property' % GAN_NAMESPACE] = ('property',
[Property])
def __init__(self, tableId=None, property=None,
*args, **kwargs):
self.tableId = tableId
self.property = property
super(AccountListEntry, self).__init__(*args, **kwargs)
def AccountListEntryFromString(xml_string):
"""Converts an XML string into an AccountListEntry object.
Args:
xml_string: string The XML describing a Document List feed entry.
Returns:
A AccountListEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(AccountListEntry, xml_string)
class AccountListFeed(gdata.GDataFeed):
"""A feed containing a list of Google Documents Items"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[AccountListEntry])
def AccountListFeedFromString(xml_string):
"""Converts an XML string into an AccountListFeed object.
Args:
xml_string: string The XML describing an AccountList feed.
Returns:
An AccountListFeed object corresponding to the given XML.
All properties are also linked to with a direct reference
from each entry object for convenience. (e.g. entry.AccountName)
"""
feed = atom.CreateClassFromXMLString(AccountListFeed, xml_string)
for entry in feed.entry:
for pro in entry.property:
entry.__dict__[pro.name.replace('ga:','')] = pro
for td in entry.tableId:
td.__dict__['value'] = td.text
return feed
class Dimension(gdata.GDataEntry):
_tag = 'dimension'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
_attributes['type'] = 'type'
_attributes['confidenceInterval'] = 'confidence_interval'
def __init__(self, name=None, value=None, type=None,
confidence_interval = None, *args, **kwargs):
self.name = name
self.value = value
self.type = type
self.confidence_interval = confidence_interval
super(Dimension, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class Metric(gdata.GDataEntry):
_tag = 'metric'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
_attributes['type'] = 'type'
_attributes['confidenceInterval'] = 'confidence_interval'
def __init__(self, name=None, value=None, type=None,
confidence_interval = None, *args, **kwargs):
self.name = name
self.value = value
self.type = type
self.confidence_interval = confidence_interval
super(Metric, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class AnalyticsDataEntry(gdata.GDataEntry):
"""The Google Analytics version of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}dimension' % GAN_NAMESPACE] = ('dimension',
[Dimension])
_children['{%s}metric' % GAN_NAMESPACE] = ('metric',
[Metric])
def __init__(self, dimension=None, metric=None, *args, **kwargs):
self.dimension = dimension
self.metric = metric
super(AnalyticsDataEntry, self).__init__(*args, **kwargs)
class AnalyticsDataFeed(gdata.GDataFeed):
"""A feed containing a list of Google Analytics Data Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[AnalyticsDataEntry])
"""
Data Feed
"""
def AnalyticsDataFeedFromString(xml_string):
"""Converts an XML string into an AccountListFeed object.
Args:
xml_string: string The XML describing an AccountList feed.
Returns:
An AccountListFeed object corresponding to the given XML.
Each metric and dimension is also referenced directly from
the entry for easier access. (e.g. entry.keyword.value)
"""
feed = atom.CreateClassFromXMLString(AnalyticsDataFeed, xml_string)
if feed.entry:
for entry in feed.entry:
for met in entry.metric:
entry.__dict__[met.name.replace('ga:','')] = met
if entry.dimension is not None:
for dim in entry.dimension:
entry.__dict__[dim.name.replace('ga:','')] = dim
return feed
| apache-2.0 |
alexteodor/odoo | addons/stock_invoice_directly/stock_invoice_directly.py | 337 | 2132 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import osv
from openerp.tools.translate import _
class stock_picking(osv.osv):
_inherit = 'stock.picking'
@api.cr_uid_ids_context
def do_transfer(self, cr, uid, picking_ids, context=None):
"""Launch Create invoice wizard if invoice state is To be Invoiced,
after processing the picking.
"""
if context is None:
context = {}
res = super(stock_picking, self).do_transfer(cr, uid, picking_ids, context=context)
pick_ids = [p.id for p in self.browse(cr, uid, picking_ids, context) if p.invoice_state == '2binvoiced']
if pick_ids:
context = dict(context, active_model='stock.picking', active_ids=pick_ids)
return {
'name': _('Create Invoice'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.invoice.onshipping',
'type': 'ir.actions.act_window',
'target': 'new',
'context': context
}
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
habnabit/pip | tests/functional/test_install_vcs_svn.py | 40 | 1055 | import pytest
from mock import patch
from pip.vcs.subversion import Subversion
@patch('pip.vcs.call_subprocess')
@pytest.mark.network
def test_obtain_should_recognize_auth_info_url(call_subprocess_mock, script):
svn = Subversion(url='svn+http://username:password@svn.example.com/')
svn.obtain(script.scratch_path / 'test')
assert call_subprocess_mock.call_args[0][0] == [
svn.name, 'checkout', '-q', '--username', 'username', '--password',
'password', 'http://username:password@svn.example.com/',
script.scratch_path / 'test',
]
@patch('pip.vcs.call_subprocess')
@pytest.mark.network
def test_export_should_recognize_auth_info_url(call_subprocess_mock, script):
svn = Subversion(url='svn+http://username:password@svn.example.com/')
svn.export(script.scratch_path / 'test')
assert call_subprocess_mock.call_args[0][0] == [
svn.name, 'export', '--username', 'username', '--password',
'password', 'http://username:password@svn.example.com/',
script.scratch_path / 'test',
]
| mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.5/Lib/distutils/tests/support.py | 147 | 1277 | """Support code for distutils test cases."""
import shutil
import tempfile
from distutils import log
class LoggingSilencer(object):
def setUp(self):
super(LoggingSilencer, self).setUp()
self.threshold = log.set_threshold(log.FATAL)
def tearDown(self):
log.set_threshold(self.threshold)
super(LoggingSilencer, self).tearDown()
class TempdirManager(object):
"""Mix-in class that handles temporary directories for test cases.
This is intended to be used with unittest.TestCase.
"""
def setUp(self):
super(TempdirManager, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TempdirManager, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d)
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
class DummyCommand:
"""Class to store options for retrieval via set_undefined_options()."""
def __init__(self, **kwargs):
for kw, val in kwargs.items():
setattr(self, kw, val)
def ensure_finalized(self):
pass
| mit |
denis-pitul/django | django/middleware/security.py | 510 | 1753 | import re
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
class SecurityMiddleware(object):
def __init__(self):
self.sts_seconds = settings.SECURE_HSTS_SECONDS
self.sts_include_subdomains = settings.SECURE_HSTS_INCLUDE_SUBDOMAINS
self.content_type_nosniff = settings.SECURE_CONTENT_TYPE_NOSNIFF
self.xss_filter = settings.SECURE_BROWSER_XSS_FILTER
self.redirect = settings.SECURE_SSL_REDIRECT
self.redirect_host = settings.SECURE_SSL_HOST
self.redirect_exempt = [re.compile(r) for r in settings.SECURE_REDIRECT_EXEMPT]
def process_request(self, request):
path = request.path.lstrip("/")
if (self.redirect and not request.is_secure() and
not any(pattern.search(path)
for pattern in self.redirect_exempt)):
host = self.redirect_host or request.get_host()
return HttpResponsePermanentRedirect(
"https://%s%s" % (host, request.get_full_path())
)
def process_response(self, request, response):
if (self.sts_seconds and request.is_secure() and
'strict-transport-security' not in response):
sts_header = "max-age=%s" % self.sts_seconds
if self.sts_include_subdomains:
sts_header = sts_header + "; includeSubDomains"
response["strict-transport-security"] = sts_header
if self.content_type_nosniff and 'x-content-type-options' not in response:
response["x-content-type-options"] = "nosniff"
if self.xss_filter and 'x-xss-protection' not in response:
response["x-xss-protection"] = "1; mode=block"
return response
| bsd-3-clause |
python-fedex-devs/python-fedex | tests/test_rate_service.py | 2 | 2051 | """
Test module for the Fedex RateService WSDL.
"""
import unittest
import logging
import sys
sys.path.insert(0, '..')
from fedex.services.rate_service import FedexRateServiceRequest
# Common global config object for testing.
from tests.common import get_fedex_config
CONFIG_OBJ = get_fedex_config()
logging.getLogger('suds').setLevel(logging.ERROR)
logging.getLogger('fedex').setLevel(logging.INFO)
@unittest.skipIf(not CONFIG_OBJ.account_number, "No credentials provided.")
class RateServiceTests(unittest.TestCase):
"""
These tests verify that the rate service WSDL is in good shape.
"""
def test_rate(self):
rate = FedexRateServiceRequest(CONFIG_OBJ)
rate.RequestedShipment.DropoffType = 'REGULAR_PICKUP'
rate.RequestedShipment.ServiceType = 'FEDEX_GROUND'
rate.RequestedShipment.PackagingType = 'YOUR_PACKAGING'
rate.RequestedShipment.Shipper.Address.StateOrProvinceCode = 'SC'
rate.RequestedShipment.Shipper.Address.PostalCode = '29631'
rate.RequestedShipment.Shipper.Address.CountryCode = 'US'
rate.RequestedShipment.Recipient.Address.StateOrProvinceCode = 'NC'
rate.RequestedShipment.Recipient.Address.PostalCode = '27577'
rate.RequestedShipment.Recipient.Address.CountryCode = 'US'
rate.RequestedShipment.EdtRequestType = 'NONE'
rate.RequestedShipment.ShippingChargesPayment.PaymentType = 'SENDER'
package1_weight = rate.create_wsdl_object_of_type('Weight')
package1_weight.Value = 1.0
package1_weight.Units = "LB"
package1 = rate.create_wsdl_object_of_type('RequestedPackageLineItem')
package1.Weight = package1_weight
package1.PhysicalPackaging = 'BOX'
package1.GroupPackageCount = 1
rate.add_package(package1)
rate.send_request()
assert rate.response.HighestSeverity == 'SUCCESS', rate.response.Notifications[0].Message
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
unittest.main()
| bsd-3-clause |
chainer/chainer | chainer/functions/connection/deformable_convolution_2d_sampler.py | 6 | 5710 | import numpy
from chainer import backend
from chainer.functions.array import broadcast
from chainer.functions.array import concat
from chainer.functions.array import pad as pad_module
from chainer.functions.array import spatial_transformer_sampler
from chainer.functions.math import matmul
def deformable_convolution_2d_sampler(x, offset, W, b=None, stride=1, pad=0):
"""Two-dimensional deformable convolution function using computed offset.
This is an implementation of two-dimensional deformable convolution from
`Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
It takes four variables: the input image ``x``, the offset image
``offset``, the filter weight ``W``, and the bias vector ``b``.
Notation: here is the notation for the dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output,
respectively.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
- :math:`s_Y` and :math:`s_X` are the strides of the filter.
- :math:`p_H` and :math:`p_W` are the spatial padding sizes.
The output size :math:`(h_O, w_O)` is determined by the following
equations:
.. math::
h_O &= (h + 2p_H - k_H) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W) / s_X + 1.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h, w)`.
offset (:class:`~chainer.Variable` or :ref:`ndarray`):
Offset variable of shape
:math:`(n, 2 \\cdot k_H \\cdot k_W, h_O, w_O)`. The first
:math:`k_H \\cdot k_W` index of the second axis corresponds to
the offsets in the horizontal direction. The last
:math:`k_H \\cdot k_W` index of the second axis corresponds to
the offsets in the vertical direction.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_O, c_I, k_H, k_W)`.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable of length :math:`c_O` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
Deformable convolution adds 2D offsets to the regular grid sampling
locations in the standard convolution. It enables free form deformation of
the sampling grid.
See `Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu,
Yichen Wei. Deformable Convolutional Networks
<https://arxiv.org/abs/1703.06211>`_
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
.. seealso::
:class:`~chainer.links.DeformableConvolution2D` to manage the model
parameters ``W`` and ``b``.
.. admonition:: Example
>>> x = np.random.uniform(0, 1, (2, 3, 4, 7)).astype(np.float32)
>>> offset = np.random.uniform(
... 0, 1, (2, 2 * 3 * 3, 2, 5)).astype(np.float32)
>>> W = np.random.uniform(0, 1, (4, 3, 3, 3)).astype(np.float32)
>>> b = np.random.uniform(0, 1, (4,)).astype(np.float32)
>>> y = F.deformable_convolution_2d_sampler(x, offset, W, b)
>>> y.shape
(2, 4, 2, 5)
"""
sy, sx = _pair(stride)
ph, pw = _pair(pad)
out_c, _, kh, kw = W.shape
n, c, h, w = x.shape
_, khkw2, out_h, out_w = offset.shape
if khkw2 != 2 * kh * kw:
raise ValueError(
'The shape of the offset does not match the kernel size')
grid = _offset2grid(offset, kh, kw, sy, sx, ph, pw, h, w)
grid = grid.reshape(n, 2, kh * kw, out_h * out_w)
x_pad = pad_module.pad(x, ((0, 0), (0, 0), (ph, ph), (pw, pw)), 'constant')
x_st = spatial_transformer_sampler.spatial_transformer_sampler(
x_pad, grid)
x_st = x_st.transpose(0, 3, 1, 2).reshape(n * out_h * out_w, c * kh * kw)
W = W.transpose(1, 2, 3, 0).reshape(c * kh * kw, out_c)
y = matmul.matmul(x_st, W)
y = y.reshape(n, out_h, out_w, out_c).transpose(0, 3, 1, 2)
if b is not None:
b = broadcast.broadcast_to(b[None, :, None, None], y.shape)
y += b
return y
def _offset2grid(offset, kh, kw, sy, sx, ph, pw, h, w):
n, khkw2, out_h, out_w = offset.shape
khkw = int(khkw2 / 2)
xp = backend.get_array_module(offset)
ys, xs = xp.meshgrid(
xp.arange(0, sy * out_h, sy, dtype=numpy.float32),
xp.arange(0, sx * out_w, sx, dtype=numpy.float32), indexing='ij',
copy=False
)
filter_offset_x = xp.tile(xp.arange(kw, dtype=numpy.float32), kh)
filter_offset_y = xp.repeat(xp.arange(kh, dtype=numpy.float32), kw)
x_coord = (offset[:, :khkw] + xs[None, None] +
filter_offset_x[None, :, None, None])
y_coord = (offset[:, khkw:] + ys[None, None] +
filter_offset_y[None, :, None, None])
# The values of this variable is clipped in range [-1, 1].
# The coordinate (-1, -1) corresponds to the upper-left
# corner of the input image.
x_coord = (x_coord / (w + 2 * pw - 1) - 0.5) * 2
y_coord = (y_coord / (h + 2 * ph - 1) - 0.5) * 2
# Shape of `coord` is (n, 2 * kh * kw, out_h, out_w)
coord = concat.concat([x_coord, y_coord], axis=1)
return coord
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
| mit |
ooici/coi-services | ion/services/eoi/table_loader.py | 1 | 13500 | #!/usr/bin/python
"""
A Service to load data products into PostgreSQL and Geoserver
"""
__author__ = 'abird'
from pyon.util.breakpoint import breakpoint
from pyon.ion.resource import LCS, LCE, PRED
from pyon.util.file_sys import FileSystem, FS
import psycopg2
import requests
import os
from pyon.public import CFG
from pyon.util.log import log
from pyon.container.cc import Container
from pyon.public import BadRequest
import logging
REAL = "real"
DOUBLE = "double precision"
INT = "int"
TIMEDATE = "timestamp"
REQUIRED_PARAMS = ["time","lat","lon"]
class ResourceParser(object):
"""
Processes the Resource Registry CRUD requests into PostgreSQL and ImporterService calls
"""
def __init__(self):
self.container = Container.instance
self.using_eoi_services = CFG.get_safe('eoi.meta.use_eoi_services', False)
self.latitude = CFG.get_safe('eoi.meta.lat_field', 'lat')
self.longitude = CFG.get_safe('eoi.meta.lon_field', 'lon')
self.resetstore = CFG.get_safe('eoi.importer_service.reset_store', 'resetstore')
self.removelayer = CFG.get_safe('eoi.importer_service.remove_layer', 'removelayer')
self.addlayer = CFG.get_safe('eoi.importer_service.add_layer', 'addlayer')
self.server = CFG.get_safe('eoi.importer_service.server', "localhost")+":"+str(CFG.get_safe('eoi.importer_service.port', 8844))
self.database = CFG.get_safe('eoi.postgres.database', 'postgres')
self.host = CFG.get_safe('eoi.postgres.host', 'localhost')
self.port = CFG.get_safe('eoi.postgres.post', 5432)
self.db_user = CFG.get_safe('eoi.postgres.user_name', 'postgres')
self.db_pass = CFG.get_safe('eoi.postgres.password', '')
self.table_prefix = CFG.get_safe('eoi.postgres.table_prefix', '_')
self.view_suffix = CFG.get_safe('eoi.postgres.table_suffix', '_view')
self.coverage_fdw_sever = CFG.get_safe('eoi.fdw.server', 'cov_srv')
log.debug("TableLoader:Using geoservices="+str(self.using_eoi_services))
if not self.using_eoi_services:
raise BadRequest("Eoi services not enabled")
self.con = None
self.postgres_db_available = False
self.importer_service_available = False
self.use_geo_services = False
try:
self.con = psycopg2.connect(port=self.port, host=self.host, database=self.database, user=self.db_user, password=self.db_pass)
self.cur = self.con.cursor()
#checks the connection
self.cur.execute('SELECT version()')
ver = self.cur.fetchone()
self.postgres_db_available = True
self.importer_service_available = self.check_for_importer_service()
log.debug(str(ver))
except Exception as e:
#error setting up connection
log.warn('Error %s', e)
if self.postgres_db_available and self.importer_service_available:
self.use_geo_services = True
log.debug("TableLoader:Using geoservices...")
else:
log.warn("TableLoader:NOT using geoservices...")
def get_eoi_service_available(self):
"""
returns the current status of the eoi services
"""
return self.use_geo_services
def check_for_importer_service(self):
try:
r = requests.get(self.server+'/service=alive&name=ooi&id=ooi')
log.debug("importer service available, status code: %s", str(r.status_code))
#alive service returned ok
if r.status_code == 200:
return True
else:
return False
except Exception as e:
#SERVICE IS REALLY NOT AVAILABLE
log.warn("importer service is really not available...%s", e)
return False
def close(self):
if self.con:
self.con.close()
def send_geonode_request(self, request, resource_id, prim_types=None):
try:
if prim_types is None:
r = requests.get(self.server+'/service='+request+'&name='+resource_id+'&id='+resource_id)
self.process_status_code(r.status_code)
else:
r = requests.get(self.server+'/service='+request+'&name='+resource_id+'&id='+resource_id+"¶ms="+str(prim_types))
self.process_status_code(r.status_code)
except Exception, e:
raise e
def reset(self):
"""
Reset all data and rows, and layers
"""
if self.get_eoi_service_available():
#remove all FDT from the DB
self.cur.execute(self.drop_all_fdt())
self.con.commit()
list_rows = self.cur.fetchall()
for row in list_rows:
self.drop_existing_table(row[0], use_cascade=True)
#reset the layer information on geoserver
self.send_geonode_request(self.resetstore, "ooi")
else:
log.debug("services not available...")
def process_status_code(self, status_code):
if status_code == 200:
log.debug("SUCCESS!")
else:
log.debug("Error Processing layer")
@staticmethod
def _get_coverage_path(dataset_id):
file_root = FileSystem.get_url(FS.CACHE, 'datasets')
return os.path.join(file_root, '%s' % dataset_id)
def remove_single_resource(self, resource_id):
"""
Removes a single resource
"""
if self.does_table_exist(resource_id):
self.drop_existing_table(resource_id, use_cascade=True)
else:
log.debug("could not remove,does not exist")
# try and remove it from geoserver
self.send_geonode_request(self.removelayer, resource_id)
def create_single_resource(self, new_resource_id, param_dict):
"""
Creates a single resource
"""
#only go forward if there are params available
coverage_path = self._get_coverage_path(new_resource_id)
#generate table from params and id
[success, prim_types] = self.generate_sql_table(new_resource_id, param_dict, coverage_path)
if success:
#generate geoserver layer
self.send_geonode_request(self.addlayer, new_resource_id, prim_types)
def get_value_encoding(self, name, value_encoding):
'''
get the primitve type, and generate something using NAME
'''
encoding_string = None
prim_type = None
if name == "time":
encoding_string = "\""+name+"\" "+TIMEDATE
prim_type = "time"
elif name.find('time') >= 0:
#ignore other times
encoding_string = None
prim_type = None
elif value_encoding.startswith('int'):
#int
encoding_string = "\""+name+"\" "+INT
prim_type = "int"
elif value_encoding.find('i8') > -1:
#int
encoding_string = "\""+name+"\" "+INT
prim_type = "int"
elif value_encoding.startswith('float'):
#float
encoding_string = "\""+name+"\" "+DOUBLE
prim_type = DOUBLE
elif value_encoding.find('f4') > -1:
#float
encoding_string = "\""+name+"\" "+DOUBLE
prim_type = DOUBLE
elif value_encoding.find('f8') > -1:
#float
encoding_string = "\""+name+"\" "+DOUBLE
prim_type = DOUBLE
log.debug('encoding_string: %s', str(encoding_string))
log.debug('prim_type: %s', str(prim_type))
return encoding_string, prim_type
'''
verifies that the required params are in the resources
'''
def required_fields_satisfied(self,param_list):
#should always contain atleast 3 params
try:
if (len(param_list)>3):
return set(REQUIRED_PARAMS).issubset(set(param_list))
else:
return False
except Exception, e:
return False
def generate_sql_table(self, dataset_id, params, coverage_path):
"""
Generates Foreign data table for used with postgres
"""
#check table exists
if not self.does_table_exist(dataset_id):
valid_types = {}
create_table_string = 'create foreign table "%s" (' % dataset_id
#loop through the params
encodings = []
if self.required_fields_satisfied(params.keys()):
for p_name, p_def in params.iteritems():
cm_type = p_def["parameter_type"]
#not supporting anyothing other than quantity or sparse
if cm_type not in ("quantity","sparse"):
continue
#get the information
units = p_def["units"] or "1"
value_encoding = p_def['value_encoding']
fill_value = p_def["fill_value"]
std_name = p_def['standard_name']
#only use things that have valid value
[encoding, prim_type] = self.get_value_encoding(p_name, value_encoding)
if encoding is not None:
encodings.append(encoding)
valid_types[p_name] = prim_type
create_table_string += ','.join(encodings)
log.debug("coverage path:"+coverage_path)
create_table_string = self.add_server_info(create_table_string, coverage_path, dataset_id)
#check that the dataproduct has all the required fields
try:
log.debug(create_table_string)
self.cur.execute(create_table_string)
self.con.commit()
#should always be lat and lon
self.cur.execute(self.generate_table_view(dataset_id, self.latitude, self.longitude))
self.con.commit()
return self.does_table_exist(dataset_id), valid_types
except Exception as e:
#error setting up connection
log.debug('Error %s', e)
raise
else:
log.warn('resource skipped, it does not contain all of the required params:')
return [False,None]
else:
log.debug('table is already there dropping it')
self.drop_existing_table(dataset_id, use_cascade=True)
return [False,None]
def generate_table_view(self, dataset_id, lat_field, lon_field):
"""
Generate table view including geom
"""
sqlquery = """
CREATE or replace VIEW "%s%s%s" as SELECT ST_SetSRID(ST_MakePoint(%s, %s),4326) as
geom, * from "%s";
""" % (self.table_prefix, dataset_id, self.view_suffix, lon_field, lat_field, dataset_id)
return sqlquery
def add_server_info(self, sqlquery, coverage_path, coverage_id):
"""
Add the server info to the sql create table request
"""
sqlquery += ") server " + self.coverage_fdw_sever + " options(k \'1\',cov_path \'" + coverage_path + "\',cov_id \'" + coverage_id + "\');"
return sqlquery
def modify_sql_table(self, dataset_id, params):
log.debug('Not Implemented')
def remove_sql_table(self, dataset_id):
log.debug('Not Implemented')
def drop_existing_table(self, dataset_id, use_cascade=False):
self.cur.execute(self.get_table_drop_cmd(dataset_id, use_cascade))
self.con.commit()
def does_table_exist(self, dataset_id):
"""
Checks to see if the table already exists before we add it
"""
self.cur.execute(self.get_table_exist_cmd(dataset_id))
out = self.cur.fetchone()
#check table exist
if out is None:
log.error('cound not find table, that was created:'+dataset_id)
return False
else:
return True
def get_table_exist_cmd(self, dataset_id):
"""
Looks in the psql catalog for the table, therefore is quick and does not hit the table itself
"""
#check table exists
sqlcmd = "SELECT 1 FROM pg_catalog.pg_class WHERE relname = \'"+dataset_id+"\';"
return sqlcmd
def get_table_drop_cmd(self, dataset_id, use_cascade=False):
#drop table
if use_cascade:
sqlcmd = "drop foreign table \""+dataset_id+"\" cascade;"
else:
sqlcmd = "drop foreign table \""+dataset_id+"\";"
return sqlcmd
def drop_all_fdt(self):
sqlcmd = "SELECT relname FROM pg_catalog.pg_class where relkind ='foreign table';"
return sqlcmd
| bsd-2-clause |
yglazko/socorro | socorro/unittest/app/test_socorro_app.py | 10 | 10471 | import mock
from nose.tools import eq_, ok_, assert_raises
from socorro.unittest.testbase import TestCase
from socorro.processor.processor_app import ProcessorApp
from configman import (
class_converter,
Namespace,
command_line,
ConfigFileFutureProxy,
)
from configman.dotdict import DotDict
from socorro.app.socorro_app import (
SocorroApp,
SocorroWelcomeApp,
main,
klass_to_pypath,
)
from socorro.app.for_application_defaults import ApplicationDefaultsProxy
tag = ''
#==============================================================================
# used in tests below
class MyProcessor(ProcessorApp):
def main(self):
global tag
tag = 'lars was here'
return "I'm a dummy main"
#==============================================================================
class TestSocorroApp(TestCase):
#--------------------------------------------------------------------------
def test_instantiation(self):
config = DotDict()
sa = SocorroApp(config)
eq_(sa.get_application_defaults(), {})
assert_raises(NotImplementedError, sa.main)
assert_raises(NotImplementedError, sa._do_run)
#--------------------------------------------------------------------------
def test_run(self):
class SomeOtherApp(SocorroApp):
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
klass.config_path = config_path
return 17
eq_(SomeOtherApp._do_run(), 17)
ok_(SomeOtherApp.config_path is None)
x = SomeOtherApp.run()
eq_(x, 17)
#--------------------------------------------------------------------------
def test_run_with_alternate_config_path(self):
class SomeOtherApp(SocorroApp):
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
klass.values_source_list = values_source_list
klass.config_path = config_path
return 17
eq_(SomeOtherApp._do_run('my/path'), 17)
eq_(SomeOtherApp.config_path, 'my/path')
x = SomeOtherApp.run('my/other/path')
eq_(x, 17)
eq_(SomeOtherApp.config_path, 'my/other/path')
#--------------------------------------------------------------------------
def test_run_with_alternate_values_source_list(self):
class SomeOtherApp(SocorroApp):
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
klass.values_source_list = values_source_list
klass.config_path = config_path
return 17
eq_(SomeOtherApp._do_run('my/path', [{}, {}]), 17)
eq_(SomeOtherApp.config_path, 'my/path')
eq_(SomeOtherApp.values_source_list, [{}, {}])
x = SomeOtherApp.run('my/other/path', [])
eq_(x, 17)
eq_(SomeOtherApp.config_path, 'my/other/path')
eq_(SomeOtherApp.values_source_list, [])
#--------------------------------------------------------------------------
def test_do_run(self):
config = DotDict()
with mock.patch('socorro.app.socorro_app.ConfigurationManager') as cm:
cm.return_value.context.return_value = mock.MagicMock()
with mock.patch('socorro.app.socorro_app.signal') as s:
class SomeOtherApp(SocorroApp):
app_name='SomeOtherApp'
app_verision='1.2.3'
app_description='a silly app'
def main(self):
ok_(
self.config
is cm.return_value.context.return_value.__enter__
.return_value
)
return 17
result = main(SomeOtherApp)
args = cm.call_args_list
args, kwargs = args[0]
ok_(isinstance(args[0], Namespace))
ok_(isinstance(kwargs['values_source_list'], list))
eq_(kwargs['app_name'], SomeOtherApp.app_name)
eq_(kwargs['app_version'], SomeOtherApp.app_version)
eq_(kwargs['app_description'], SomeOtherApp.app_description)
eq_(kwargs['config_pathname'], './config')
ok_(kwargs['values_source_list'][-1], command_line)
ok_(isinstance(kwargs['values_source_list'][-2], DotDict))
ok_(kwargs['values_source_list'][-3] is ConfigFileFutureProxy)
ok_(isinstance(
kwargs['values_source_list'][0],
ApplicationDefaultsProxy
))
eq_(result, 17)
#--------------------------------------------------------------------------
def test_do_run_with_alternate_class_path(self):
config = DotDict()
with mock.patch('socorro.app.socorro_app.ConfigurationManager') as cm:
cm.return_value.context.return_value = mock.MagicMock()
with mock.patch('socorro.app.socorro_app.signal') as s:
class SomeOtherApp(SocorroApp):
app_name='SomeOtherApp'
app_verision='1.2.3'
app_description='a silly app'
def main(self):
ok_(
self.config
is cm.return_value.context.return_value.__enter__
.return_value
)
return 17
result = main(SomeOtherApp, 'my/other/path')
args = cm.call_args_list
args, kwargs = args[0]
ok_(isinstance(args[0], Namespace))
ok_(isinstance(kwargs['values_source_list'], list))
eq_(kwargs['app_name'], SomeOtherApp.app_name)
eq_(kwargs['app_version'], SomeOtherApp.app_version)
eq_(kwargs['app_description'], SomeOtherApp.app_description)
eq_(kwargs['config_pathname'], 'my/other/path')
ok_(kwargs['values_source_list'][-1], command_line)
ok_(isinstance(kwargs['values_source_list'][-2], DotDict))
ok_(kwargs['values_source_list'][-3] is ConfigFileFutureProxy)
ok_(isinstance(
kwargs['values_source_list'][0],
ApplicationDefaultsProxy
))
eq_(result, 17)
#--------------------------------------------------------------------------
def test_do_run_with_alternate_values_source_list(self):
config = DotDict()
with mock.patch('socorro.app.socorro_app.ConfigurationManager') as cm:
cm.return_value.context.return_value = mock.MagicMock()
with mock.patch('socorro.app.socorro_app.signal') as s:
class SomeOtherApp(SocorroApp):
app_name='SomeOtherApp'
app_verision='1.2.3'
app_description='a silly app'
def main(self):
ok_(
self.config
is cm.return_value.context.return_value.__enter__
.return_value
)
return 17
result = main(
SomeOtherApp,
config_path='my/other/path',
values_source_list=[{"a": 1}, {"b": 2}]
)
args = cm.call_args_list
args, kwargs = args[0]
ok_(isinstance(args[0], Namespace))
eq_(kwargs['app_name'], SomeOtherApp.app_name)
eq_(kwargs['app_version'], SomeOtherApp.app_version)
eq_(kwargs['app_description'], SomeOtherApp.app_description)
eq_(kwargs['config_pathname'], 'my/other/path')
ok_(isinstance(kwargs['values_source_list'], list))
ok_(isinstance(
kwargs['values_source_list'][0],
ApplicationDefaultsProxy
))
eq_(kwargs['values_source_list'][1], {"a": 1})
eq_(kwargs['values_source_list'][2], {"b": 2})
eq_(result, 17)
#==============================================================================
class TestSocorroWelcomeApp(TestCase):
#--------------------------------------------------------------------------
def test_instantiation(self):
config = DotDict()
sa = SocorroWelcomeApp(config)
eq_(sa.config, config)
eq_(
sa.required_config.application.default,
None
)
#--------------------------------------------------------------------------
def test_app_replacement(self):
config = DotDict()
config.application = MyProcessor
with mock.patch(
'socorro.app.socorro_app.command_line',
new={}
) as mocked_command:
sa = SocorroWelcomeApp(config)
sa.main()
eq_(tag, 'lars was here')
#--------------------------------------------------------------------------
def test_klass_to_pypath_various_modules():
from socorro.processor.processor_app import ProcessorApp
eq_(
klass_to_pypath(ProcessorApp),
'socorro.processor.processor_app.ProcessorApp'
)
#--------------------------------------------------------------------------
def test_klass_to_pypath_a_faked_out_main():
# since we can't really get a class that reports its __module__ as
# being '__main__', we have to fake it with mocks.
fake_sys_modules = {
'__main__': '/some/bogus/path/socoro/processor/processor_app.py',
}
fake_sys_path = [
'', '/my/home/path', '/your/home/path', '/some/bogus/path',
'/elsewhere/'
]
MockedMainClass = mock.Mock()
MockedMainClass.__module__ = '__main__'
MockedMainClass.__name__ = 'ProcessorApp'
with mock.patch(
'socorro.app.socorro_app.sys.modules',
new=fake_sys_modules
):
with mock.patch(
'socorro.app.socorro_app.sys.path',
new=fake_sys_path
):
eq_(
klass_to_pypath(ProcessorApp),
'socorro.processor.processor_app.ProcessorApp'
)
| mpl-2.0 |
trungkiendt9/PlaformIO | Blynk_SIM808/.pio/libdeps/nanoatmega328/Maker PlayGround Device_ID1529/src/Blynk/tests/pseudo-library.py | 6 | 6163 | #!/usr/bin/python
'''
This is a pseudo-library implementation
Example:
./pseudo-library.py -t b168ccc8c8734fad98323247afbc1113 --dump
Author: Volodymyr Shymanskyy
License: The MIT license
'''
import select, socket, struct
import os, sys, time, getopt
from threading import Thread
# Configuration options
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
"hs:p:t:",
["help", "server=", "port=", "token=", "sndbuf=", "rcvbuf=", "nodelay=", "dump"])
except getopt.GetoptError:
print >>sys.stderr, __doc__
sys.exit(2)
# Default options
SERVER = "blynk-cloud.com"
PORT = 8442
NODELAY = 1 # TCP_NODELAY
SNDBUF = 0 # No SNDBUF override
RCVBUF = 0 # No RCVBUF override
TOKEN = "YourAuthToken"
DUMP = 0
for o, v in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit()
elif o in ("-s", "--server"):
SERVER = v
elif o in ("-p", "--port"):
PORT = int(v)
elif o in ("-t", "--token"):
TOKEN = v
elif o in ("--sndbuf",):
SNDBUF = int(v)
elif o in ("--rcvbuf",):
RCVBUF = int(v)
elif o in ("--nodelay",):
NODELAY = int(v)
elif o in ("--dump",):
DUMP = 1
# Blynk protocol helpers
hdr = struct.Struct("!BHH")
class MsgType:
RSP = 0
LOGIN = 2
PING = 6
BRIDGE = 15
HW = 20
class MsgStatus:
OK = 200
def hw(*args):
# Convert params to string and join using \0
data = "\0".join(map(str, args))
dump("< " + " ".join(map(str, args)))
# Prepend HW command header
return hdr.pack(MsgType.HW, genMsgId(), len(data)) + data
def handle_hw(data):
params = data.split("\0")
cmd = params.pop(0)
if cmd == 'info':
pass
### DIRECT pin operations
elif cmd == 'pm':
pairs = zip(params[0::2], params[1::2])
for (pin, mode) in pairs:
pin = int(pin)
if mode == 'in':
log("Pin %d mode INPUT" % pin)
elif mode == 'out':
log("Pin %d mode OUTPUT" % pin)
elif mode == 'pu':
log("Pin %d mode INPUT_PULLUP" % pin)
elif mode == 'pd':
log("Pin %d mode INPUT_PULLDOWN" % pin)
else:
log("Unknown pin %d mode: %s" % (pin, mode))
elif cmd == 'dw':
pin = int(params.pop(0))
val = params.pop(0)
log("Digital write pin %d, value %s" % (pin, val))
elif cmd == 'aw':
pin = int(params.pop(0))
val = params.pop(0)
log("Analog write pin %d, value %s" % (pin, val))
elif cmd == 'dr': # This should read digital pin
pin = int(params.pop(0))
log("Digital read pin %d" % pin)
conn.sendall(hw("dw", pin, 1)) # Send value
elif cmd == 'ar': # This should do ADC read
pin = int(params.pop(0))
log("Analog read pin %d" % pin)
conn.sendall(hw("aw", pin, 123)) # Send value
### VIRTUAL pin operations
elif cmd == 'vw': # This should call user handler
pin = int(params.pop(0))
val = params.pop(0)
log("Virtual write pin %d, value %s" % (pin, val))
elif cmd == 'vr': # This should call user handler
pin = int(params.pop(0))
log("Virtual read pin %d" % pin)
conn.sendall(hw("vw", pin, "hello")) # Send value
else:
log("Unknown HW cmd: %s" % cmd)
static_msg_id = 1
def genMsgId():
global static_msg_id
static_msg_id += 1
return static_msg_id
# Other utilities
start_time = time.time()
def log(msg):
print "[{:7.3f}] {:}".format(float(time.time() - start_time), msg)
def dump(msg):
if DUMP:
log(msg)
def receive(sock, length):
d = []
l = 0
while l < length:
r = ''
try:
r = sock.recv(length-l)
except socket.timeout:
continue
if not r:
return ''
d.append(r)
l += len(r)
return ''.join(d)
# Threads
def readthread(conn):
while (True):
data = receive(conn, hdr.size)
if not data:
break
msg_type, msg_id, msg_len = hdr.unpack(data)
dump("Got {0}, {1}, {2}".format(msg_type, msg_id, msg_len))
if msg_type == MsgType.RSP:
pass
elif msg_type == MsgType.PING:
log("Got ping")
# Send Pong
conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK))
elif msg_type == MsgType.HW or msg_type == MsgType.BRIDGE:
data = receive(conn, msg_len)
# Print HW message
dump("> " + " ".join(data.split("\0")))
handle_hw(data)
else:
log("Unknown msg type")
break
def writethread(conn):
while (True):
time.sleep(10)
log("Sending heartbeat...")
conn.sendall(hdr.pack(MsgType.PING, genMsgId(), 0))
# Main code
log('Connecting to %s:%d' % (SERVER, PORT))
try:
conn = socket.create_connection((SERVER, PORT), 3)
except:
log("Can't connect")
sys.exit(1)
if NODELAY != 0:
conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if SNDBUF != 0:
sndbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
log('Default SNDBUF %s changed to %s' % (sndbuf, SNDBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, SNDBUF)
if RCVBUF != 0:
rcvbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
log('Default RCVBUF %s changed to %s' % (rcvbuf, RCVBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, RCVBUF)
# Authenticate
conn.sendall(hdr.pack(MsgType.LOGIN, genMsgId(), len(TOKEN)))
conn.sendall(TOKEN)
data = receive(conn, hdr.size)
if not data:
log("Auth timeout")
sys.exit(1)
msg_type, msg_id, status = hdr.unpack(data)
dump("Got {0}, {1}, {2}".format(msg_type, msg_id, status))
if status != MsgStatus.OK:
log("Auth failed: %d" % status)
sys.exit(1)
wt = Thread(target=readthread, args=(conn,))
rt = Thread(target=writethread, args=(conn,))
wt.start()
rt.start()
wt.join()
rt.join()
conn.close()
| gpl-3.0 |
note35/sinon | sinon/test/TestSinonMatcher.py | 1 | 5492 | import sys
sys.path.insert(0, '../')
import unittest
import lib.base as sinon
from lib.matcher import SinonMatcher
from lib.spy import SinonSpy
from lib.stub import SinonStub
"""
======================================================
FOR TEST ONLY START
======================================================
"""
# build-in module
import os
# customized class
class A_object(object):
# class function
def A_func(self):
pass
# global function
def B_func():
pass
from TestClass import ForTestOnly
"""
======================================================
FOR TEST ONLY END
======================================================
"""
class TestSinonMatcher(unittest.TestCase):
def setUp(self):
sinon.g = sinon.init(globals())
def test001_constructor_number(self):
m = SinonMatcher(1)
self.assertTrue(m.mtest(1))
self.assertFalse(m.mtest(2))
def test002_constructor_strcmp_string(self):
m = SinonMatcher("match string", strcmp="default")
self.assertTrue(m.mtest("match"))
self.assertTrue(m.mtest("ch st"))
self.assertTrue(m.mtest("match string"))
self.assertFalse(m.mtest("match string++"))
self.assertFalse(m.mtest("match strig"))
def test003_constructor_strcmp_regex(self):
m = SinonMatcher(r"(\w*) (\w*)", strcmp="regex")
self.assertFalse(m.mtest("match"))
self.assertTrue(m.mtest("ch st"))
self.assertTrue(m.mtest("match string"))
self.assertTrue(m.mtest("match string++"))
self.assertTrue(m.mtest("match strig"))
def test004_constructor_func(self):
def custom_test_func(a, b, c):
return a+b+c
m = SinonMatcher(custom_test_func, is_custom_func=True)
self.assertEqual(m.mtest(1,2,3), 6)
m = SinonMatcher(r"(\w*) (\w*)", strcmp="regex")
self.assertFalse(m.mtest("match"))
def test005_constructor_func_invalid(self):
something = "Not Function"
with self.assertRaises(Exception) as context:
m = SinonMatcher(something, is_custom_func=True)
def test006_constructor_strcmp_invalid(self):
something = 123
with self.assertRaises(Exception) as context:
m = SinonMatcher(something, strcmp="default")
def test020_any(self):
m = SinonMatcher.any
self.assertTrue(m.mtest())
self.assertTrue(m.mtest(123))
self.assertTrue(m.mtest(self))
self.assertTrue(m.mtest("asd"))
def test021_defined(self):
m = SinonMatcher.defined
self.assertFalse(m.mtest())
self.assertFalse(m.mtest(None))
self.assertTrue(m.mtest([]))
self.assertTrue(m.mtest(['1']))
self.assertTrue(m.mtest(""))
self.assertTrue(m.mtest("1"))
def test022_truthy(self):
m = SinonMatcher.truthy
self.assertFalse(m.mtest())
self.assertTrue(m.mtest(True))
self.assertFalse(m.mtest(False))
self.assertFalse(m.mtest("asd"))
def test023_falsy(self):
m = SinonMatcher.falsy
self.assertFalse(m.mtest())
self.assertFalse(m.mtest(True))
self.assertTrue(m.mtest(False))
self.assertFalse(m.mtest("asd"))
def test024_bool(self):
m = SinonMatcher.bool
self.assertFalse(m.mtest())
self.assertTrue(m.mtest(True))
self.assertTrue(m.mtest(False))
self.assertFalse(m.mtest("asd"))
def test30_same(self):
m = SinonMatcher.same("100")
self.assertTrue(m.mtest("100"))
m = SinonMatcher.same(100)
self.assertTrue(m.mtest(100))
m = SinonMatcher.same(os.system)
self.assertTrue(m.mtest(os.system))
def test40_typeOf_class(self):
# This is a silly test, normal condition will not use this kinda cases.
fto = ForTestOnly()
m = SinonMatcher.typeOf(type)
self.assertTrue(m.mtest(ForTestOnly)) # class is a type
self.assertFalse(m.mtest(fto)) # instance is not a type
def test41_typeOf_instance(self):
fto = ForTestOnly()
m = SinonMatcher.typeOf(ForTestOnly)
self.assertFalse(m.mtest(ForTestOnly))
self.assertTrue(m.mtest(fto))
def test42_typeOf_value(self):
m = SinonMatcher.typeOf(int)
self.assertFalse(m.mtest("1")) # string is not a number
self.assertTrue(m.mtest(1)) # number is a number
def test43_typeOf_invalid_type(self):
with self.assertRaises(Exception) as context:
m = SinonMatcher.typeOf(123)
def test50_instanceOf_class(self):
fto = ForTestOnly()
with self.assertRaises(Exception) as context:
m = SinonMatcher.instanceOf(ForTestOnly)
def test51_instanceOf_instance(self):
spy = SinonSpy()
stub = SinonStub()
m = SinonMatcher.instanceOf(spy)
self.assertTrue(m.mtest(spy))
self.assertTrue(m.mtest(stub))
def test060_and_match(self):
spy = SinonSpy()
stub = SinonStub()
m = SinonMatcher.instanceOf(spy).and_match(SinonMatcher.instanceOf(stub))
self.assertFalse(m.mtest(spy))
self.assertTrue(m.mtest(stub))
def test061_or_match(self):
m = SinonMatcher.typeOf(int).or_match(SinonMatcher.typeOf(str))
self.assertTrue(m.mtest("1"))
self.assertTrue(m.mtest(1))
self.assertFalse(m.mtest())
self.assertFalse(m.mtest([1, "1"]))
| bsd-2-clause |
sparkslabs/kamaelia_ | Sketches/MPS/Examples/LUGRadio/likefile.py | 3 | 13538 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""
=================================================
LikeFile - file-like interaction with components.
=================================================
THE OUTSIDE WORLD
+----------------------------------+
| LikeFile |
+----------------------------------+
| / \
| |
InQueues OutQueues
| |
+---------+-----------------------+---------+
| \ / | |
| +---------+ +--------+ |
| | Input | Shutdown | Output | |
| | Wrapper |-------------->| | |
| | (thread)| Message |Wrapper | |
| +---------+ +--------+ |
| | / \ |
| | | |
| Inboxes Outboxes |
| | | |
| \ / | |
| +----------------------------------+ |
| | the wrapped component | |
| +----------------------------------+ |
| |
| |
| AXON SCHEDULED COMPONENTS |
+-------------------------------------------+
Note 1: Threadsafeness of activate().
when a component is activated, it calls the method inherited from microprocess, which calls _addThread(self)
on an appropriate scheduler. _addThread calls wakeThread, which places the request on a threadsafe queue.
"""
from Axon.Scheduler import scheduler
from Axon.AxonExceptions import noSpaceInBox
from Axon.Ipc import producerFinished, shutdownMicroprocess
import Queue, threading, time, copy, Axon, warnings
queuelengths = 0
def addBox(names, boxMap, addBox):
"""Add an extra wrapped box called name, using the addBox function provided
(either self.addInbox or self.addOutbox), and adding it to the box mapping
which is used to coordinate message routing within component wrappers."""
if type(names) != tuple:
names = (names,)
for boxname in names:
if boxname in boxMap:
raise ValueError, "%s %s already exists!" % (direction, boxname)
realboxname = addBox(boxname)
boxMap[boxname] = realboxname
class dummyComponent(Axon.Component.component):
"""A dummy component. Functionality: None. Prevents the scheduler from dying immediately."""
def main(self):
while True:
self.pause()
yield 1
class schedulerThread(threading.Thread):
"""A python thread which runs a scheduler."""
lock = threading.Lock()
def __init__(self,slowmo=0):
if not schedulerThread.lock.acquire(False):
raise "only one scheduler for now can be run!"
self.slowmo = slowmo
threading.Thread.__init__(self)
self.setDaemon(True) # Die when the caller dies
def run(self):
dummyComponent().activate() # to keep the scheduler from exiting immediately.
scheduler.run.runThreads(slowmo = self.slowmo)
schedulerThread.lock.release()
class componentWrapperInput(Axon.ThreadedComponent.threadedadaptivecommscomponent):
"""A wrapper that takes a child component and waits on an event from the foreground, to signal that there is
queued data to be placed on the child's inboxes."""
def __init__(self, child, extraInboxes = None):
super(componentWrapperInput, self).__init__()
self.child = child
# This is a map from the name of the wrapped inbox on the child, to the
# Queue used to convey data into it.
self.inQueues = dict()
# This queue is used by the foreground to tell us what queue it has sent us
# data on, so that we do not need to check all our input queues,
# and also so that we can block on reading it.
self.whatInbox = Queue.Queue()
self.isDead = threading.Event()
# This sets up the linkages between us and our child, avoiding extra
# box creation by connecting the "basic two" in the same way as, e.g. a pipeline.
self.childInboxMapping = { "inbox": "outbox", "control": "signal" }
if extraInboxes:
addBox(extraInboxes, self.childInboxMapping, self.addOutbox)
for childSink, parentSource in self.childInboxMapping.iteritems():
self.inQueues[childSink] = Queue.Queue(self.queuelengths)
self.link((self, parentSource),(self.child, childSink))
# This outbox is used to tell the output wrapper when to shut down.
self.deathbox = self.addOutbox(str(id(self)))
def main(self):
while True:
whatInbox = self.whatInbox.get()
if not self.pollQueue(whatInbox):
# a False return indicates that we should shut down.
self.isDead.set()
# tells the foreground object that we've successfully processed a shutdown message.
# unfortunately, whether the child honours it or not is a matter of debate.
self.send(object, self.deathbox)
return
def pollQueue(self, whatInbox):
"""This method checks all the queues from the outside world, and forwards any waiting data
to the child component. Returns False if we propogated a shutdown signal, true otherwise."""
parentSource = self.childInboxMapping[whatInbox]
queue = self.inQueues[whatInbox]
while not queue.empty():
if not self.outboxes[parentSource].isFull():
msg = queue.get_nowait() # won't fail, we're the only one reading from the queue.
try:
self.send(msg, parentSource)
except noSpaceInBox, e:
raise "Box delivery failed despite box (earlier) reporting being not full. Is more than one thread directly accessing boxes?"
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return False
else:
# if the component's inboxes are full, do something here. Preferably not succeed.
break
return True
class componentWrapperOutput(Axon.AdaptiveCommsComponent.AdaptiveCommsComponent):
"""A component which takes a child component and connects its outboxes to queues, which communicate
with the LikeFile component."""
def __init__(self, child, inputHandler, extraOutboxes = None):
super(componentWrapperOutput, self).__init__()
self.queuelengths = queuelengths
self.child = child
self.addChildren(self.child)
# This queue maps from the name of the outbox on the child which is to be wrapped,
# to the Queue which conveys that data to the foreground thread.
self.outQueues = dict()
# set up notification from the input handler to kill us when appropriate.
# we cannot rely on shutdown messages being propogated through the child.
self.isDead = inputHandler.isDead
self.deathbox = self.addInbox(str(id(self)))
self.link((inputHandler, inputHandler.deathbox), (self, self.deathbox))
# This sets up the linkages between us and our child, avoiding extra
# box creation by connecting the "basic two" in the same way as, e.g. a pipeline.
self.childOutboxMapping = { "outbox": "inbox", "signal": "control" }
if extraOutboxes:
addBox(extraOutboxes, self.childOutboxMapping, self.addInbox)
for childSource, parentSink in self.childOutboxMapping.iteritems():
self.outQueues[childSource] = Queue.Queue(self.queuelengths)
self.link((self.child, childSource),(self, parentSink))
def main(self):
self.child.activate()
while True:
self.pause()
yield 1
self.sendPendingOutput()
if self.dataReady(self.deathbox):
return
def sendPendingOutput(self):
"""This method will take any outgoing data sent to us from a child component and stick it on a queue
to the outside world."""
for childSource, parentSink in self.childOutboxMapping.iteritems():
queue = self.outQueues[childSource]
while self.dataReady(parentSink):
if not queue.full():
msg = self.recv(parentSink)
# TODO - what happens when the wrapped component terminates itself? We keep on going. Not optimal.
queue.put_nowait(msg)
else:
break
# permit a horrible backlog to build up inside our boxes. What could go wrong?
class LikeFile(object):
alive = False
"""An interface to the message queues from a wrapped component, which is activated on a backgrounded scheduler."""
def __init__(self, componenttowrap, extrainboxes = None, extraoutboxes = None):
if schedulerThread.lock.acquire(False):
schedulerThread.lock.release()
raise AttributeError, "no running scheduler found."
try: inputComponent = componentWrapperInput(componenttowrap, extrainboxes)
except KeyError, e:
raise KeyError, 'component to wrap has no such inbox: %s' % e
try: outputComponent = componentWrapperOutput(componenttowrap, inputComponent, extraoutboxes)
except KeyError, e:
del inputComponent
raise KeyError, 'component to wrap has no such outbox: %s' % e
self.inQueues = copy.copy(inputComponent.inQueues)
self.outQueues = copy.copy(outputComponent.outQueues)
# reaching into the component and its child like this is threadsafe since it has not been activated yet.
self.inputComponent = inputComponent
self.outputComponent = outputComponent
def activate(self):
"""Activates the component on the backgrounded scheduler and permits IO."""
if self.alive:
return
self.inputComponent.activate() # threadsafe, see note 1
self.outputComponent.activate()
self.alive = True
def recv(self, boxname = "outbox"):
"""Performs a blocking read on the queue corresponding to the named outbox on the wrapped component.
raises AttributeError if the LikeFile is not alive."""
if self.alive:
return self.outQueues[boxname].get()
else: raise AttributeError, "shutdown was previously called, or we were never activated."
get = recv # alias for backwards compatibility.
def send(self, msg, boxname = "inbox"):
"""Places an object on a queue which will be directed to a named inbox on the wrapped component."""
if self.alive:
queue = self.inQueues[boxname]
queue.put_nowait(msg)
self.inputComponent.whatInbox.put_nowait(boxname)
else: raise AttributeError, "shutdown was previously called, or we were never activated."
put = send # alias for backwards compatibility
def shutdown(self):
"""Sends terminatory signals to the wrapped component, and shut down the componentWrapper.
will warn if the shutdown took too long to confirm in action."""
if self.alive:
self.send(Axon.Ipc.shutdown(), "control") # legacy support.
self.send(Axon.Ipc.producerFinished(), "control") # some components only honour this one
self.send(Axon.Ipc.shutdownMicroprocess(), "control") # should be last, this is what we honour
else:
raise AttributeError, "shutdown was previously called, or we were never activated."
self.inputComponent.isDead.wait(1)
if not self.inputComponent.isDead.isSet(): # we timed out instead of someone else setting the flag
warnings.warn("Timed out waiting on shutdown confirmation, may not be dead.")
self.alive = False
def __del__(self):
if self.alive:
self.shutdown()
if __name__ == "__main__":
background = schedulerThread(slowmo=0.01).start()
time.sleep(0.1)
from Kamaelia.Protocol.HTTP.HTTPClient import SimpleHTTPClient
import time
p = LikeFile(SimpleHTTPClient())
p.activate()
p.send("http://google.com")
p.send("http://slashdot.org")
p.send("http://whatismyip.org")
google = p.recv()
slashdot = p.recv()
whatismyip = p.recv()
print "google is", len(google), "bytes long, and slashdot is", len(slashdot), "bytes long. Also, our IP address is:", whatismyip | apache-2.0 |
nasimrahaman/tracky-mctrackface | tmtf/net.py | 1 | 4380 | import theano as th
import theano.tensor as T
import Antipasti.netkit as nk
import Antipasti.netarchs as na
import Antipasti.archkit as ak
import Antipasti.netools as ntl
import Antipasti.netrain as nt
import Antipasti.backend as A
__doc__ = """Model Zoo"""
# Define shortcuts
# Convlayer with ELU
cl = lambda fmapsin, fmapsout, kersize: nk.convlayer(fmapsin=fmapsin, fmapsout=fmapsout, kersize=kersize,
activation=ntl.elu())
clv = lambda fmapsin, fmapsout, kersize: nk.convlayer(fmapsin=fmapsin, fmapsout=fmapsout, kersize=kersize,
activation=ntl.elu(), convmode='valid')
# Convlayer without activation
cll = lambda fmapsin, fmapsout, kersize: nk.convlayer(fmapsin=fmapsin, fmapsout=fmapsout, kersize=kersize)
# Strided convlayer with ELU (with autopad)
scl = lambda fmapsin, fmapsout, kersize, padding=None: nk.convlayer(fmapsin=fmapsin, fmapsout=fmapsout,
kersize=kersize,
stride=[2, 2], activation=ntl.elu(),
padding=padding)
# Strided 3x3 pool layerlayertrain or Antipasti.netarchs.layertrainyard
spl = lambda: nk.poollayer(ds=[3, 3], stride=[2, 2], padding=[1, 1])
# Batch-norm layer
bn = lambda: nk.batchnormlayer(2, 0.9)
# Softmax
sml = lambda: nk.softmax(dim=2)
# Dropout layer
drl = lambda p=0.5: nk.noiselayer(noisetype='binomial', p=p)
def _build_simple(modelconfig=None):
"""Make network."""
if modelconfig is None:
numout = 5
else:
numout = modelconfig['numout'] if 'numout' in modelconfig.keys() else 5
# Build
network = spl() + scl(5, 32, [9, 9]) + drl() + scl(32, 64, [9, 9]) + spl() + cl(64, 128, [5, 5]) + \
drl() + scl(128, 256, [5, 5]) + \
cl(256, 512, [5, 5]) + spl() + cl(512, 512, [3, 3]) + drl() + scl(512, 512, [3, 3]) + \
clv(512, 1024, [8, 8]) + drl() + cl(1024, 512, [1, 1]) + drl() + cl(512, 256, [1, 1]) + \
drl() + cl(256, 128, [1, 1]) + \
cl(128, 64, [1, 1]) + drl() + cl(64, 16, [1, 1]) + drl() + cll(16, numout, [1, 1])
# Build graph
network.feedforward()
# Return
return network
def simple(modelconfig=None):
"""
Build a simple model.
:type modelconfig: dict
:param modelconfig: Model configuration.
"""
# Build network
network = _build_simple(modelconfig)
# Build target network
targetnetwork = _build_simple(modelconfig)
# Compile inference function for network
network.classifier = A.function(inputs=[network.x], outputs=network.y, allow_input_downcast=True)
# Compile inference function for target network
targetnetwork.classifier = A.function(inputs=[targetnetwork.x], outputs=targetnetwork.y, allow_input_downcast=True)
# Compile trainer for network
# Redefine target to a scalar
network.yt = T.vector('model-yt:{}'.format(id(network)))
# Compute loss and cost
# network.y.shape = (bs, numout, 1, 1). Compute mean along all axes.
network.L = ((T.max(T.flatten(network.y, outdim=2), axis=1) - network.yt)**2).mean()
network.baggage["l2"] = nt.lp(network.params, [(2, 0.0)])
network.C = network.L + network.baggage["l2"]
# Compute gradients
network.dC = T.grad(network.C, wrt=network.params, disconnected_inputs='warn')
# Get updates
network.getupdates(method='rmsprop', learningrate=0.0005, rho=0.9)
# Compile trainer
network.classifiertrainer = A.function(inputs=[network.x, network.yt], outputs={'C': network.C, 'L': network.L},
updates=network.updates, allow_input_downcast=True)
# Make update function for targetnetwork and add to its baggage
def updatetargetparams(params, decay=0.9):
curparams, newparams = targetnetwork.params, params
for curparam, newparam in zip(curparams, newparams):
paramupdate = decay * curparam.get_value() + (1 - decay) * newparam.get_value()
curparam.set_value(paramupdate)
targetnetwork.baggage["updatetargetparams"] = updatetargetparams
# Set target networks param
targetnetwork.params = network.params
return network, targetnetwork
| mit |
collinjackson/mojo | build/win/reorder-imports.py | 103 | 1807 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import os
import shutil
import subprocess
import sys
def reorder_imports(input_dir, output_dir, architecture):
"""Run swapimports.exe on the initial chrome.exe, and write to the output
directory. Also copy over any related files that might be needed
(pdbs, manifests etc.).
"""
input_image = os.path.join(input_dir, 'chrome.exe')
output_image = os.path.join(output_dir, 'chrome.exe')
swap_exe = os.path.join(
__file__,
'..\\..\\..\\third_party\\syzygy\\binaries\\exe\\swapimport.exe')
args = [swap_exe, '--input-image=%s' % input_image,
'--output-image=%s' % output_image, '--overwrite', '--no-logo']
if architecture == 'x64':
args.append('--x64');
args.append('chrome_elf.dll');
subprocess.call(args)
for fname in glob.iglob(os.path.join(input_dir, 'chrome.exe.*')):
shutil.copy(fname, os.path.join(output_dir, os.path.basename(fname)))
return 0
def main(argv):
usage = 'reorder_imports.py -i <input_dir> -o <output_dir> -a <target_arch>'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-i', '--input', help='reorder chrome.exe in DIR',
metavar='DIR')
parser.add_option('-o', '--output', help='write new chrome.exe to DIR',
metavar='DIR')
parser.add_option('-a', '--arch', help='architecture of build (optional)',
default='ia32')
opts, args = parser.parse_args()
if not opts.input or not opts.output:
parser.error('Please provide and input and output directory')
return reorder_imports(opts.input, opts.output, opts.arch)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
noldor/CodeIgniter4 | user_guide_src/source/conf.py | 4 | 8558 | # -*- coding: utf-8 -*-
#
# CodeIgniter documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 28 07:24:38 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.ifconfig', 'sphinxcontrib.phpdomain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CodeIgniter4'
copyright = u'2014 - 2017, British Columbia Institute of Technology'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0.0-dev'
# The full version, including alpha/beta/rc tags.
release = '4.0.0-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :php:func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. php:function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
highlight_language = 'ci'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# Specifying a few options; just a starting point & we can play with it.
html_theme_options = {
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["./_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/ci-icon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CodeIgniterdoc'
html_copy_source = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CodeIgniter.tex', u'CodeIgniter4 Documentation',
u'British Columbia Institute of Technology', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'codeigniter', u'CodeIgniter4 Documentation',
[u'British Columbia Institute of Technology'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'CodeIgniter4'
epub_author = u'British Columbia Institute of Technology'
epub_publisher = u'British Columbia Institute of Technology'
epub_copyright = u'2014 - 2017, British Columbia Institute of Technology'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| mit |
ChinaMassClouds/copenstack-server | openstack/src/horizon-2014.2/openstack_dashboard/test/integration_tests/tests/test_dashboard_help_redirection.py | 12 | 1235 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
class TestDashboardHelp(helpers.TestCase):
def test_dashboard_help_redirection(self):
"""Verifies Help link redirects to the right URL."""
self.home_pg.go_to_help_page()
for handle in self.driver.window_handles:
self.driver.switch_to_window(handle)
self.assertEqual(self.conf.dashboard.help_url,
self.driver.current_url,
"help link did not redirect to the right URL")
self.driver.close()
for handle in self.driver.window_handles:
self.driver.switch_to_window(handle)
| gpl-2.0 |
VitalPet/odoo | addons/mail/tests/__init__.py | 52 | 1280 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_mail_message, test_mail_features, test_mail_gateway, test_message_read, test_invite
checks = [
test_mail_message,
test_mail_features,
test_mail_gateway,
test_message_read,
test_invite,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
keerts/home-assistant | homeassistant/components/notify/command_line.py | 12 | 1583 | """
Support for command line notification services.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.command_line/
"""
import logging
import subprocess
import voluptuous as vol
from homeassistant.const import (CONF_COMMAND, CONF_NAME)
from homeassistant.components.notify import (
BaseNotificationService, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Command Line notification service."""
command = config[CONF_COMMAND]
return CommandLineNotificationService(command)
class CommandLineNotificationService(BaseNotificationService):
"""Implement the notification service for the Command Line service."""
def __init__(self, command):
"""Initialize the service."""
self.command = command
def send_message(self, message="", **kwargs):
"""Send a message to a command line."""
try:
proc = subprocess.Popen(self.command, universal_newlines=True,
stdin=subprocess.PIPE, shell=True)
proc.communicate(input=message)
if proc.returncode != 0:
_LOGGER.error('Command failed: %s', self.command)
except subprocess.SubprocessError:
_LOGGER.error('Error trying to exec Command: %s', self.command)
| apache-2.0 |
Edraak/edx-ora2 | openassessment/xblock/test/test_save_files_descriptions.py | 3 | 2828 | # -*- coding: utf-8 -*-
"""
Test that the student can save a files descriptions.
"""
import json
import mock
from .base import XBlockHandlerTestCase, scenario
class SaveFilesDescriptionsTest(XBlockHandlerTestCase):
"""
Group of tests to check ability to save files descriptions
"""
@scenario('data/save_scenario.xml', user_id="Daniels")
def test_save_files_descriptions_blank(self, xblock):
"""
Checks ability to call handler without descriptions.
"""
resp = self.request(xblock, 'save_files_descriptions', json.dumps({}))
self.assertIn('descriptions were not submitted', resp)
@scenario('data/save_scenario.xml', user_id="Perleman")
def test_save_files_descriptions(self, xblock):
"""
Checks ability to call handler with descriptions and then saved texts should be available after xblock render.
"""
# Save the response
descriptions = [u"Ѕраѓтаиѕ! ГоиіБЂт, Щэ ↁіиэ іи Нэll!", u"Ѕраѓтаиѕ! ГоиіБЂт, Щэ ↁіиэ іи Нэll!"]
payload = json.dumps({'descriptions': descriptions})
resp = self.request(xblock, 'save_files_descriptions', payload, response_format="json")
self.assertTrue(resp['success'])
self.assertEqual(resp['msg'], u'')
# Reload the submission UI
xblock._get_download_url = mock.MagicMock(side_effect=lambda i: "https://img-url/%d" % i)
resp = self.request(xblock, 'render_submission', json.dumps({}))
self.assertIn(descriptions[0], resp.decode('utf-8'))
self.assertIn(descriptions[1], resp.decode('utf-8'))
@scenario('data/save_scenario.xml', user_id="Valchek")
def test_overwrite_files_descriptions(self, xblock):
"""
Checks ability to overwrite existed files descriptions.
"""
descriptions1 = [u"Ѕраѓтаиѕ! ГоиіБЂт, Щэ ↁіиэ іи Нэll!", u"Ѕраѓтаиѕ! ГоиіБЂт, Щэ ↁіиэ іи Нэll!"]
payload = json.dumps({'descriptions': descriptions1})
self.request(xblock, 'save_files_descriptions', payload, response_format="json")
descriptions2 = [u"test1", u"test2"]
payload = json.dumps({'descriptions': descriptions2})
self.request(xblock, 'save_files_descriptions', payload, response_format="json")
# Reload the submission UI
xblock._get_download_url = mock.MagicMock(side_effect=lambda i: "https://img-url/%d" % i)
resp = self.request(xblock, 'render_submission', json.dumps({}))
self.assertNotIn(descriptions1[0], resp.decode('utf-8'))
self.assertNotIn(descriptions1[1], resp.decode('utf-8'))
self.assertIn(descriptions2[0], resp.decode('utf-8'))
self.assertIn(descriptions2[1], resp.decode('utf-8'))
| agpl-3.0 |
SNeuhausen/training_management | models/teaching_selection_wizard/teaching_selection_wizard.py | 1 | 3023 | # -*- coding: utf-8 -*-
from openerp.api import depends, multi
from openerp.fields import Many2one, Many2many, Boolean
from openerp.models import TransientModel
from openerp.addons.training_management.models.model_names import ModelNames
from openerp.addons.training_management.utils.action_utils import ActionUtils
from openerp.addons.training_management.utils.model_utils import ModelUtils
class TeachingSelectionWizard(TransientModel):
""" A wizard for selecting a course, module and lecture. When clicking its proceed button, the grade wizard
will be launched.
"""
_name = ModelNames.TEACHING_SELECTION_WIZARD
course_id = Many2one(
comodel_name=ModelNames.TEACHING,
readonly=True,
string="Kurs",
help="Kurs, in dem die Note vergeben wird."
)
module_id = Many2one(
comodel_name=ModelNames.TEACHING,
domain="[('id', 'in', permitted_module_ids and permitted_module_ids[0][2])]",
string="Modul",
help="Modul, in dem die Note vergeben wird."
)
lecture_id = Many2one(
comodel_name=ModelNames.TEACHING,
domain="[('id', 'in', permitted_lecture_ids and permitted_lecture_ids[0][2])]",
string="Fachinhalt",
help="Fachinhalt, in dem die Note vergeben wird."
)
restrict_selection = Boolean(default=True, string=u"Auswahl einschränken")
permitted_module_ids = Many2many(
comodel_name=ModelNames.TEACHING,
compute="_compute__permitted_module_ids",
readonly=True,
)
permitted_lecture_ids = Many2many(
comodel_name=ModelNames.TEACHING,
compute="_compute__permitted_lecture_ids",
readonly=True,
)
@depends("course_id", "restrict_selection")
def _compute__permitted_module_ids(self):
teaching_model = self.env[ModelNames.TEACHING]
if self.restrict_selection:
self.permitted_module_ids = self.course_id.module_children_ids
else:
self.permitted_module_ids = teaching_model.get_all_modules()
@depends("course_id", "restrict_selection")
def _compute__permitted_lecture_ids(self):
teaching_model = self.env[ModelNames.TEACHING]
if not self.restrict_selection:
self.permitted_lecture_ids = teaching_model.get_all_lectures()
return
self.permitted_lecture_ids = self.course_id.lecture_children_ids
@multi
def action_button__proceed(self):
wizard_view = self.env.ref("training_management.view_form__grade_wizard__default")
return ActionUtils.create_action_for_view(
model_name=ModelNames.GRADE_WIZARD,
view=wizard_view,
view_mode="form",
title="Noten für Kursteilnehmer eintragen",
context={
"default_course_id": ModelUtils.get_id(self.course_id),
"default_module_id": ModelUtils.get_id(self.module_id),
"default_lecture_id": ModelUtils.get_id(self.lecture_id),
}
)
| gpl-3.0 |
TeamBasedLearning/Service | pgtbl/accounts/tests/test_model.py | 1 | 2437 | from rest_framework.test import APITestCase
from accounts.models import User
class UserTestCase(APITestCase):
"""
Unit test case to test user features.
"""
def setUp(self):
"""
This method will run before any test.
"""
self.superuser = User.objects.create_superuser(
name='Victor Arnaud',
email='victorhad@gmail.com',
password='victorhad123456'
)
self.user1 = User.objects.create(
name='Pedro',
email='pedro@gmail.com',
password='pedro123456'
)
self.user2 = User.objects.create(
name='Maria de Fatima',
email='maria@gmail.com',
password='maria123456'
)
self.user3 = User.objects.create(
name='Jose da Silva Pereira',
email='jose@gmail.com',
is_teacher=True,
password='jose123456',
institution='UnB',
course='Software Engineering',
photo='img/photo01.png'
)
def tearDown(self):
"""
This method will run after any test.
"""
self.superuser.delete()
self.user1.delete()
self.user2.delete()
self.user3.delete()
def test_full_name(self):
"""
Test to get the full name of user
"""
self.assertEquals(self.superuser.get_full_name(), self.superuser.name)
self.assertEquals(self.user1.get_full_name(), self.user1.name)
self.assertEquals(self.user2.get_full_name(), self.user2.name)
self.assertEquals(self.user3.get_full_name(), self.user3.name)
def test_short_name(self):
"""
Test to get the short name of user, the first name with the last name
"""
self.assertEquals(self.superuser.get_short_name(), 'Victor Arnaud')
self.assertEquals(self.user1.get_short_name(), self.user1.name)
self.assertEquals(self.user2.get_short_name(), 'Maria Fatima')
self.assertEquals(self.user3.get_short_name(), 'Jose Pereira')
def test_is_teacher_or_students(self):
"""
Teste to verify if user is a teacher or a student
"""
self.assertEquals(self.superuser.is_teacher, False)
self.assertEquals(self.user1.is_teacher, False)
self.assertEquals(self.user2.is_teacher, False)
self.assertEquals(self.user3.is_teacher, True)
| gpl-3.0 |
nwinter/bantling | src/lib/werkzeug/debug/__init__.py | 81 | 7867 | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import mimetypes
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.7
The `lodgeit_url` parameter was added.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param lodgeit_url: the base URL of the LodgeIt instance to use for
pasting tracebacks.
"""
# this class is public
__module__ = 'werkzeug'
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False,
lodgeit_url='http://paste.pocoo.org/'):
if not console_init_func:
console_init_func = dict
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.lodgeit_url = lodgeit_url
self.secret = gen_salt(20)
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(skip=1, show_hidden_frames=
self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8')
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
yield traceback.render_full(evalex=self.evalex,
lodgeit_url=self.lodgeit_url,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
self.frames[0] = _ConsoleFrame(self.console_init_func())
return Response(render_console_html(secret=self.secret),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
paste_id = traceback.paste(self.lodgeit_url)
return Response('{"url": "%sshow/%s/", "id": "%s"}'
% (self.lodgeit_url, paste_id, paste_id),
mimetype='application/json')
def get_source(self, request, frame):
"""Render the source viewer."""
return Response(frame.render_source(), mimetype='text/html')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = file(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'source' and frame and self.secret == secret:
response = self.get_source(request, frame)
elif self.evalex and cmd is not None and frame is not None and \
self.secret == secret:
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
| mit |
l8orre/nxtBridge | werkzeug/http.py | 317 | 33404 | # -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import time, gmtime
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
try:
from urllib2 import parse_http_list as _parse_list_header
except ImportError: # pragma: no cover
from urllib.request import parse_http_list as _parse_list_header
from datetime import datetime, timedelta
from hashlib import md5
import base64
from werkzeug._internal import _cookie_quote, _make_cookie_domain, \
_cookie_parse_impl
from werkzeug._compat import to_unicode, iteritems, text_type, \
string_types, try_coerce_native, to_bytes, PY2, \
integer_types
# incorrect
_cookie_charset = 'latin1'
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*(%s|[^\s;=]+)\s*(?:=\s*(%s|[^;]+))?\s*' %
(_quoted_string_re, _quoted_string_re))
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_hop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailer', 'transfer-encoding',
'upgrade'
])
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required', # see RFC 6585
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones
"""
if isinstance(data, bytes):
return data
return data.encode('latin1') #XXX: utf8 fallback?
def bytes_to_wsgi(data):
assert isinstance(data, bytes), 'data must be bytes'
if isinstance(data, str):
return data
else:
return data.decode('latin1')
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` arugment):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
#XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b'basic':
try:
username, password = base64.b64decode(auth_info).split(b':', 1)
except Exception as e:
return
return Authorization('basic', {'username': bytes_to_wsgi(username),
'password': bytes_to_wsgi(password)})
elif auth_type == b'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'response':
if not key in auth_map:
return
if 'qop' in auth_map:
if not auth_map.get('nc') or not auth_map.get('cnonce'):
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or '=' not in value:
return None
ranges = []
last_end = 0
units, rng = value.split('=', 1)
units = units.strip().lower()
for item in rng.split(','):
item = item.strip()
if '-' not in item:
return None
if item.startswith('-'):
if last_end < 0:
return None
begin = int(item)
end = None
last_end = -1
elif '-' in item:
begin, end = item.split('-', 1)
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or '').strip().split(None, 1)
except ValueError:
return None
if '/' not in rangedef:
return None
rng, length = rangedef.split('/', 1)
if length == '*':
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == '*':
return ContentRange(units, None, None, length, on_update=on_update)
elif '-' not in rng:
return None
start, stop = rng.split('-', 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('w/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag[:2] in ('w/', 'W/'):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, '-')
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, ' ')
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, string_types):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
unmodified = if_none_match.contains_raw(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _hop_by_hop_headers
def parse_cookie(header, charset='utf-8', errors='replace', cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
elif header is None:
header = ''
# If the value is an unicode string it's mangled through latin1. This
# is done because on PEP 3333 on Python 3 all headers are assumed latin1
# which however is incorrect for cookies, which are sent in page encoding.
# As a result we
if isinstance(header, text_type):
header = header.encode('latin1', 'replace')
if cls is None:
cls = TypeConversionDict
def _parse_pairs():
for key, val in _cookie_parse_impl(header):
key = to_unicode(key, charset, errors, allow_none_charset=True)
val = to_unicode(val, charset, errors, allow_none_charset=True)
yield try_coerce_native(key), val
return cls(_parse_pairs())
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False,
charset='utf-8', sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
buf = [key + b'=' + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in ((b'Domain', domain, True),
(b'Expires', expires, False,),
(b'Max-Age', max_age, False),
(b'Secure', secure, None),
(b'HttpOnly', httponly, None),
(b'Path', path, False)):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b'=' + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b'; '.join(buf)
if not PY2:
rv = rv.decode('latin1')
return rv
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependency fun
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
RequestCacheControl
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import MIMEAccept, CharsetAccept, \
LanguageAccept, Headers
from werkzeug.urls import iri_to_uri
| mit |
jeremiah-c-leary/vhdl-style-guide | vsg/rules/generate/rule_400.py | 1 | 1308 |
from vsg.rules import align_tokens_in_region_between_tokens_when_between_tokens_unless_between_tokens
from vsg import token
lAlign = []
lAlign.append(token.full_type_declaration.identifier)
lAlign.append(token.incomplete_type_declaration.identifier)
lAlign.append(token.file_declaration.identifier)
lAlign.append(token.constant_declaration.identifier)
lAlign.append(token.signal_declaration.identifier)
lAlign.append(token.subtype_declaration.identifier)
lAlign.append(token.variable_declaration.identifier)
oStartToken = token.for_generate_statement.generate_keyword
oEndToken = token.generate_statement_body.begin_keyword
lBetweenTokens = []
lBetweenTokens.append([token.for_generate_statement.for_keyword, token.for_generate_statement.end_keyword])
lUnless = []
lUnless.append([token.subprogram_body.is_keyword,token.subprogram_body.begin_keyword])
class rule_400(align_tokens_in_region_between_tokens_when_between_tokens_unless_between_tokens):
'''
Checks the alignment of declaration identifiers in the generate declarative region.
'''
def __init__(self):
align_tokens_in_region_between_tokens_when_between_tokens_unless_between_tokens.__init__(self, 'generate', '400', lAlign, oStartToken, oEndToken, lBetweenTokens, lUnless)
self.solution = 'Align identifer.'
| gpl-3.0 |
shitolepriya/test-erp | erpnext/hr/doctype/employee/test_employee.py | 59 | 1178 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import unittest
import frappe.utils
test_records = frappe.get_test_records('Employee')
class TestEmployee(unittest.TestCase):
def test_birthday_reminders(self):
employee = frappe.get_doc("Employee", frappe.db.sql_list("select name from tabEmployee limit 1")[0])
employee.date_of_birth = "1990" + frappe.utils.nowdate()[4:]
employee.company_email = "test@example.com"
employee.save()
from erpnext.hr.doctype.employee.employee import get_employees_who_are_born_today, send_birthday_reminders
self.assertTrue(employee.name in [e.name for e in get_employees_who_are_born_today()])
frappe.db.sql("delete from `tabBulk Email`")
hr_settings = frappe.get_doc("HR Settings", "HR Settings")
hr_settings.stop_birthday_reminders = 0
hr_settings.save()
send_birthday_reminders()
bulk_mails = frappe.db.sql("""select * from `tabBulk Email`""", as_dict=True)
self.assertTrue("Subject: Birthday Reminder for {0}".format(employee.employee_name) \
in bulk_mails[0].message)
| agpl-3.0 |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/python/lib/python3.3/stringprep.py | 205 | 12917 | # This file is generated by mkstringprep.py. DO NOT EDIT.
"""Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
"""
from unicodedata import ucd_3_2_0 as unicodedata
assert unicodedata.unidata_version == '3.2.0'
def in_table_a1(code):
if unicodedata.category(code) != 'Cn': return False
c = ord(code)
if 0xFDD0 <= c < 0xFDF0: return False
return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + list(range(65024,65040)))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {
0xb5:'\u03bc', 0xdf:'ss', 0x130:'i\u0307', 0x149:'\u02bcn',
0x17f:'s', 0x1f0:'j\u030c', 0x345:'\u03b9', 0x37a:' \u03b9',
0x390:'\u03b9\u0308\u0301', 0x3b0:'\u03c5\u0308\u0301', 0x3c2:'\u03c3', 0x3d0:'\u03b2',
0x3d1:'\u03b8', 0x3d2:'\u03c5', 0x3d3:'\u03cd', 0x3d4:'\u03cb',
0x3d5:'\u03c6', 0x3d6:'\u03c0', 0x3f0:'\u03ba', 0x3f1:'\u03c1',
0x3f2:'\u03c3', 0x3f5:'\u03b5', 0x587:'\u0565\u0582', 0x1e96:'h\u0331',
0x1e97:'t\u0308', 0x1e98:'w\u030a', 0x1e99:'y\u030a', 0x1e9a:'a\u02be',
0x1e9b:'\u1e61', 0x1f50:'\u03c5\u0313', 0x1f52:'\u03c5\u0313\u0300', 0x1f54:'\u03c5\u0313\u0301',
0x1f56:'\u03c5\u0313\u0342', 0x1f80:'\u1f00\u03b9', 0x1f81:'\u1f01\u03b9', 0x1f82:'\u1f02\u03b9',
0x1f83:'\u1f03\u03b9', 0x1f84:'\u1f04\u03b9', 0x1f85:'\u1f05\u03b9', 0x1f86:'\u1f06\u03b9',
0x1f87:'\u1f07\u03b9', 0x1f88:'\u1f00\u03b9', 0x1f89:'\u1f01\u03b9', 0x1f8a:'\u1f02\u03b9',
0x1f8b:'\u1f03\u03b9', 0x1f8c:'\u1f04\u03b9', 0x1f8d:'\u1f05\u03b9', 0x1f8e:'\u1f06\u03b9',
0x1f8f:'\u1f07\u03b9', 0x1f90:'\u1f20\u03b9', 0x1f91:'\u1f21\u03b9', 0x1f92:'\u1f22\u03b9',
0x1f93:'\u1f23\u03b9', 0x1f94:'\u1f24\u03b9', 0x1f95:'\u1f25\u03b9', 0x1f96:'\u1f26\u03b9',
0x1f97:'\u1f27\u03b9', 0x1f98:'\u1f20\u03b9', 0x1f99:'\u1f21\u03b9', 0x1f9a:'\u1f22\u03b9',
0x1f9b:'\u1f23\u03b9', 0x1f9c:'\u1f24\u03b9', 0x1f9d:'\u1f25\u03b9', 0x1f9e:'\u1f26\u03b9',
0x1f9f:'\u1f27\u03b9', 0x1fa0:'\u1f60\u03b9', 0x1fa1:'\u1f61\u03b9', 0x1fa2:'\u1f62\u03b9',
0x1fa3:'\u1f63\u03b9', 0x1fa4:'\u1f64\u03b9', 0x1fa5:'\u1f65\u03b9', 0x1fa6:'\u1f66\u03b9',
0x1fa7:'\u1f67\u03b9', 0x1fa8:'\u1f60\u03b9', 0x1fa9:'\u1f61\u03b9', 0x1faa:'\u1f62\u03b9',
0x1fab:'\u1f63\u03b9', 0x1fac:'\u1f64\u03b9', 0x1fad:'\u1f65\u03b9', 0x1fae:'\u1f66\u03b9',
0x1faf:'\u1f67\u03b9', 0x1fb2:'\u1f70\u03b9', 0x1fb3:'\u03b1\u03b9', 0x1fb4:'\u03ac\u03b9',
0x1fb6:'\u03b1\u0342', 0x1fb7:'\u03b1\u0342\u03b9', 0x1fbc:'\u03b1\u03b9', 0x1fbe:'\u03b9',
0x1fc2:'\u1f74\u03b9', 0x1fc3:'\u03b7\u03b9', 0x1fc4:'\u03ae\u03b9', 0x1fc6:'\u03b7\u0342',
0x1fc7:'\u03b7\u0342\u03b9', 0x1fcc:'\u03b7\u03b9', 0x1fd2:'\u03b9\u0308\u0300', 0x1fd3:'\u03b9\u0308\u0301',
0x1fd6:'\u03b9\u0342', 0x1fd7:'\u03b9\u0308\u0342', 0x1fe2:'\u03c5\u0308\u0300', 0x1fe3:'\u03c5\u0308\u0301',
0x1fe4:'\u03c1\u0313', 0x1fe6:'\u03c5\u0342', 0x1fe7:'\u03c5\u0308\u0342', 0x1ff2:'\u1f7c\u03b9',
0x1ff3:'\u03c9\u03b9', 0x1ff4:'\u03ce\u03b9', 0x1ff6:'\u03c9\u0342', 0x1ff7:'\u03c9\u0342\u03b9',
0x1ffc:'\u03c9\u03b9', 0x20a8:'rs', 0x2102:'c', 0x2103:'\xb0c',
0x2107:'\u025b', 0x2109:'\xb0f', 0x210b:'h', 0x210c:'h',
0x210d:'h', 0x2110:'i', 0x2111:'i', 0x2112:'l',
0x2115:'n', 0x2116:'no', 0x2119:'p', 0x211a:'q',
0x211b:'r', 0x211c:'r', 0x211d:'r', 0x2120:'sm',
0x2121:'tel', 0x2122:'tm', 0x2124:'z', 0x2128:'z',
0x212c:'b', 0x212d:'c', 0x2130:'e', 0x2131:'f',
0x2133:'m', 0x213e:'\u03b3', 0x213f:'\u03c0', 0x2145:'d',
0x3371:'hpa', 0x3373:'au', 0x3375:'ov', 0x3380:'pa',
0x3381:'na', 0x3382:'\u03bca', 0x3383:'ma', 0x3384:'ka',
0x3385:'kb', 0x3386:'mb', 0x3387:'gb', 0x338a:'pf',
0x338b:'nf', 0x338c:'\u03bcf', 0x3390:'hz', 0x3391:'khz',
0x3392:'mhz', 0x3393:'ghz', 0x3394:'thz', 0x33a9:'pa',
0x33aa:'kpa', 0x33ab:'mpa', 0x33ac:'gpa', 0x33b4:'pv',
0x33b5:'nv', 0x33b6:'\u03bcv', 0x33b7:'mv', 0x33b8:'kv',
0x33b9:'mv', 0x33ba:'pw', 0x33bb:'nw', 0x33bc:'\u03bcw',
0x33bd:'mw', 0x33be:'kw', 0x33bf:'mw', 0x33c0:'k\u03c9',
0x33c1:'m\u03c9', 0x33c3:'bq', 0x33c6:'c\u2215kg', 0x33c7:'co.',
0x33c8:'db', 0x33c9:'gy', 0x33cb:'hp', 0x33cd:'kk',
0x33ce:'km', 0x33d7:'ph', 0x33d9:'ppm', 0x33da:'pr',
0x33dc:'sv', 0x33dd:'wb', 0xfb00:'ff', 0xfb01:'fi',
0xfb02:'fl', 0xfb03:'ffi', 0xfb04:'ffl', 0xfb05:'st',
0xfb06:'st', 0xfb13:'\u0574\u0576', 0xfb14:'\u0574\u0565', 0xfb15:'\u0574\u056b',
0xfb16:'\u057e\u0576', 0xfb17:'\u0574\u056d', 0x1d400:'a', 0x1d401:'b',
0x1d402:'c', 0x1d403:'d', 0x1d404:'e', 0x1d405:'f',
0x1d406:'g', 0x1d407:'h', 0x1d408:'i', 0x1d409:'j',
0x1d40a:'k', 0x1d40b:'l', 0x1d40c:'m', 0x1d40d:'n',
0x1d40e:'o', 0x1d40f:'p', 0x1d410:'q', 0x1d411:'r',
0x1d412:'s', 0x1d413:'t', 0x1d414:'u', 0x1d415:'v',
0x1d416:'w', 0x1d417:'x', 0x1d418:'y', 0x1d419:'z',
0x1d434:'a', 0x1d435:'b', 0x1d436:'c', 0x1d437:'d',
0x1d438:'e', 0x1d439:'f', 0x1d43a:'g', 0x1d43b:'h',
0x1d43c:'i', 0x1d43d:'j', 0x1d43e:'k', 0x1d43f:'l',
0x1d440:'m', 0x1d441:'n', 0x1d442:'o', 0x1d443:'p',
0x1d444:'q', 0x1d445:'r', 0x1d446:'s', 0x1d447:'t',
0x1d448:'u', 0x1d449:'v', 0x1d44a:'w', 0x1d44b:'x',
0x1d44c:'y', 0x1d44d:'z', 0x1d468:'a', 0x1d469:'b',
0x1d46a:'c', 0x1d46b:'d', 0x1d46c:'e', 0x1d46d:'f',
0x1d46e:'g', 0x1d46f:'h', 0x1d470:'i', 0x1d471:'j',
0x1d472:'k', 0x1d473:'l', 0x1d474:'m', 0x1d475:'n',
0x1d476:'o', 0x1d477:'p', 0x1d478:'q', 0x1d479:'r',
0x1d47a:'s', 0x1d47b:'t', 0x1d47c:'u', 0x1d47d:'v',
0x1d47e:'w', 0x1d47f:'x', 0x1d480:'y', 0x1d481:'z',
0x1d49c:'a', 0x1d49e:'c', 0x1d49f:'d', 0x1d4a2:'g',
0x1d4a5:'j', 0x1d4a6:'k', 0x1d4a9:'n', 0x1d4aa:'o',
0x1d4ab:'p', 0x1d4ac:'q', 0x1d4ae:'s', 0x1d4af:'t',
0x1d4b0:'u', 0x1d4b1:'v', 0x1d4b2:'w', 0x1d4b3:'x',
0x1d4b4:'y', 0x1d4b5:'z', 0x1d4d0:'a', 0x1d4d1:'b',
0x1d4d2:'c', 0x1d4d3:'d', 0x1d4d4:'e', 0x1d4d5:'f',
0x1d4d6:'g', 0x1d4d7:'h', 0x1d4d8:'i', 0x1d4d9:'j',
0x1d4da:'k', 0x1d4db:'l', 0x1d4dc:'m', 0x1d4dd:'n',
0x1d4de:'o', 0x1d4df:'p', 0x1d4e0:'q', 0x1d4e1:'r',
0x1d4e2:'s', 0x1d4e3:'t', 0x1d4e4:'u', 0x1d4e5:'v',
0x1d4e6:'w', 0x1d4e7:'x', 0x1d4e8:'y', 0x1d4e9:'z',
0x1d504:'a', 0x1d505:'b', 0x1d507:'d', 0x1d508:'e',
0x1d509:'f', 0x1d50a:'g', 0x1d50d:'j', 0x1d50e:'k',
0x1d50f:'l', 0x1d510:'m', 0x1d511:'n', 0x1d512:'o',
0x1d513:'p', 0x1d514:'q', 0x1d516:'s', 0x1d517:'t',
0x1d518:'u', 0x1d519:'v', 0x1d51a:'w', 0x1d51b:'x',
0x1d51c:'y', 0x1d538:'a', 0x1d539:'b', 0x1d53b:'d',
0x1d53c:'e', 0x1d53d:'f', 0x1d53e:'g', 0x1d540:'i',
0x1d541:'j', 0x1d542:'k', 0x1d543:'l', 0x1d544:'m',
0x1d546:'o', 0x1d54a:'s', 0x1d54b:'t', 0x1d54c:'u',
0x1d54d:'v', 0x1d54e:'w', 0x1d54f:'x', 0x1d550:'y',
0x1d56c:'a', 0x1d56d:'b', 0x1d56e:'c', 0x1d56f:'d',
0x1d570:'e', 0x1d571:'f', 0x1d572:'g', 0x1d573:'h',
0x1d574:'i', 0x1d575:'j', 0x1d576:'k', 0x1d577:'l',
0x1d578:'m', 0x1d579:'n', 0x1d57a:'o', 0x1d57b:'p',
0x1d57c:'q', 0x1d57d:'r', 0x1d57e:'s', 0x1d57f:'t',
0x1d580:'u', 0x1d581:'v', 0x1d582:'w', 0x1d583:'x',
0x1d584:'y', 0x1d585:'z', 0x1d5a0:'a', 0x1d5a1:'b',
0x1d5a2:'c', 0x1d5a3:'d', 0x1d5a4:'e', 0x1d5a5:'f',
0x1d5a6:'g', 0x1d5a7:'h', 0x1d5a8:'i', 0x1d5a9:'j',
0x1d5aa:'k', 0x1d5ab:'l', 0x1d5ac:'m', 0x1d5ad:'n',
0x1d5ae:'o', 0x1d5af:'p', 0x1d5b0:'q', 0x1d5b1:'r',
0x1d5b2:'s', 0x1d5b3:'t', 0x1d5b4:'u', 0x1d5b5:'v',
0x1d5b6:'w', 0x1d5b7:'x', 0x1d5b8:'y', 0x1d5b9:'z',
0x1d5d4:'a', 0x1d5d5:'b', 0x1d5d6:'c', 0x1d5d7:'d',
0x1d5d8:'e', 0x1d5d9:'f', 0x1d5da:'g', 0x1d5db:'h',
0x1d5dc:'i', 0x1d5dd:'j', 0x1d5de:'k', 0x1d5df:'l',
0x1d5e0:'m', 0x1d5e1:'n', 0x1d5e2:'o', 0x1d5e3:'p',
0x1d5e4:'q', 0x1d5e5:'r', 0x1d5e6:'s', 0x1d5e7:'t',
0x1d5e8:'u', 0x1d5e9:'v', 0x1d5ea:'w', 0x1d5eb:'x',
0x1d5ec:'y', 0x1d5ed:'z', 0x1d608:'a', 0x1d609:'b',
0x1d60a:'c', 0x1d60b:'d', 0x1d60c:'e', 0x1d60d:'f',
0x1d60e:'g', 0x1d60f:'h', 0x1d610:'i', 0x1d611:'j',
0x1d612:'k', 0x1d613:'l', 0x1d614:'m', 0x1d615:'n',
0x1d616:'o', 0x1d617:'p', 0x1d618:'q', 0x1d619:'r',
0x1d61a:'s', 0x1d61b:'t', 0x1d61c:'u', 0x1d61d:'v',
0x1d61e:'w', 0x1d61f:'x', 0x1d620:'y', 0x1d621:'z',
0x1d63c:'a', 0x1d63d:'b', 0x1d63e:'c', 0x1d63f:'d',
0x1d640:'e', 0x1d641:'f', 0x1d642:'g', 0x1d643:'h',
0x1d644:'i', 0x1d645:'j', 0x1d646:'k', 0x1d647:'l',
0x1d648:'m', 0x1d649:'n', 0x1d64a:'o', 0x1d64b:'p',
0x1d64c:'q', 0x1d64d:'r', 0x1d64e:'s', 0x1d64f:'t',
0x1d650:'u', 0x1d651:'v', 0x1d652:'w', 0x1d653:'x',
0x1d654:'y', 0x1d655:'z', 0x1d670:'a', 0x1d671:'b',
0x1d672:'c', 0x1d673:'d', 0x1d674:'e', 0x1d675:'f',
0x1d676:'g', 0x1d677:'h', 0x1d678:'i', 0x1d679:'j',
0x1d67a:'k', 0x1d67b:'l', 0x1d67c:'m', 0x1d67d:'n',
0x1d67e:'o', 0x1d67f:'p', 0x1d680:'q', 0x1d681:'r',
0x1d682:'s', 0x1d683:'t', 0x1d684:'u', 0x1d685:'v',
0x1d686:'w', 0x1d687:'x', 0x1d688:'y', 0x1d689:'z',
0x1d6a8:'\u03b1', 0x1d6a9:'\u03b2', 0x1d6aa:'\u03b3', 0x1d6ab:'\u03b4',
0x1d6ac:'\u03b5', 0x1d6ad:'\u03b6', 0x1d6ae:'\u03b7', 0x1d6af:'\u03b8',
0x1d6b0:'\u03b9', 0x1d6b1:'\u03ba', 0x1d6b2:'\u03bb', 0x1d6b3:'\u03bc',
0x1d6b4:'\u03bd', 0x1d6b5:'\u03be', 0x1d6b6:'\u03bf', 0x1d6b7:'\u03c0',
0x1d6b8:'\u03c1', 0x1d6b9:'\u03b8', 0x1d6ba:'\u03c3', 0x1d6bb:'\u03c4',
0x1d6bc:'\u03c5', 0x1d6bd:'\u03c6', 0x1d6be:'\u03c7', 0x1d6bf:'\u03c8',
0x1d6c0:'\u03c9', 0x1d6d3:'\u03c3', 0x1d6e2:'\u03b1', 0x1d6e3:'\u03b2',
0x1d6e4:'\u03b3', 0x1d6e5:'\u03b4', 0x1d6e6:'\u03b5', 0x1d6e7:'\u03b6',
0x1d6e8:'\u03b7', 0x1d6e9:'\u03b8', 0x1d6ea:'\u03b9', 0x1d6eb:'\u03ba',
0x1d6ec:'\u03bb', 0x1d6ed:'\u03bc', 0x1d6ee:'\u03bd', 0x1d6ef:'\u03be',
0x1d6f0:'\u03bf', 0x1d6f1:'\u03c0', 0x1d6f2:'\u03c1', 0x1d6f3:'\u03b8',
0x1d6f4:'\u03c3', 0x1d6f5:'\u03c4', 0x1d6f6:'\u03c5', 0x1d6f7:'\u03c6',
0x1d6f8:'\u03c7', 0x1d6f9:'\u03c8', 0x1d6fa:'\u03c9', 0x1d70d:'\u03c3',
0x1d71c:'\u03b1', 0x1d71d:'\u03b2', 0x1d71e:'\u03b3', 0x1d71f:'\u03b4',
0x1d720:'\u03b5', 0x1d721:'\u03b6', 0x1d722:'\u03b7', 0x1d723:'\u03b8',
0x1d724:'\u03b9', 0x1d725:'\u03ba', 0x1d726:'\u03bb', 0x1d727:'\u03bc',
0x1d728:'\u03bd', 0x1d729:'\u03be', 0x1d72a:'\u03bf', 0x1d72b:'\u03c0',
0x1d72c:'\u03c1', 0x1d72d:'\u03b8', 0x1d72e:'\u03c3', 0x1d72f:'\u03c4',
0x1d730:'\u03c5', 0x1d731:'\u03c6', 0x1d732:'\u03c7', 0x1d733:'\u03c8',
0x1d734:'\u03c9', 0x1d747:'\u03c3', 0x1d756:'\u03b1', 0x1d757:'\u03b2',
0x1d758:'\u03b3', 0x1d759:'\u03b4', 0x1d75a:'\u03b5', 0x1d75b:'\u03b6',
0x1d75c:'\u03b7', 0x1d75d:'\u03b8', 0x1d75e:'\u03b9', 0x1d75f:'\u03ba',
0x1d760:'\u03bb', 0x1d761:'\u03bc', 0x1d762:'\u03bd', 0x1d763:'\u03be',
0x1d764:'\u03bf', 0x1d765:'\u03c0', 0x1d766:'\u03c1', 0x1d767:'\u03b8',
0x1d768:'\u03c3', 0x1d769:'\u03c4', 0x1d76a:'\u03c5', 0x1d76b:'\u03c6',
0x1d76c:'\u03c7', 0x1d76d:'\u03c8', 0x1d76e:'\u03c9', 0x1d781:'\u03c3',
0x1d790:'\u03b1', 0x1d791:'\u03b2', 0x1d792:'\u03b3', 0x1d793:'\u03b4',
0x1d794:'\u03b5', 0x1d795:'\u03b6', 0x1d796:'\u03b7', 0x1d797:'\u03b8',
0x1d798:'\u03b9', 0x1d799:'\u03ba', 0x1d79a:'\u03bb', 0x1d79b:'\u03bc',
0x1d79c:'\u03bd', 0x1d79d:'\u03be', 0x1d79e:'\u03bf', 0x1d79f:'\u03c0',
0x1d7a0:'\u03c1', 0x1d7a1:'\u03b8', 0x1d7a2:'\u03c3', 0x1d7a3:'\u03c4',
0x1d7a4:'\u03c5', 0x1d7a5:'\u03c6', 0x1d7a6:'\u03c7', 0x1d7a7:'\u03c8',
0x1d7a8:'\u03c9', 0x1d7bb:'\u03c3', }
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = "".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == " "
def in_table_c12(code):
return unicodedata.category(code) == "Zs" and code != " "
def in_table_c11_c12(code):
return unicodedata.category(code) == "Zs"
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == "Cc"
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + list(range(8288,8292)) + list(range(8298,8304)) + list(range(65529,65533)) + list(range(119155,119163)))
def in_table_c22(code):
c = ord(code)
if c < 128: return False
if unicodedata.category(code) == "Cc": return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == "Cc" or \
ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == "Co"
def in_table_c4(code):
c = ord(code)
if c < 0xFDD0: return False
if c < 0xFDF0: return True
return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
def in_table_c5(code):
return unicodedata.category(code) == "Cs"
c6_set = set(range(65529,65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272,12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + list(range(8234,8239)) + list(range(8298,8304)))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + list(range(917536,917632)))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ("R","AL")
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
| gpl-3.0 |
pla93/django-mantis-actionables | mantis_actionables/migrations/0010_auto_20150217_2021.py | 2 | 1120 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mantis_actionables', '0009_auto_20150217_0902'),
]
operations = [
migrations.AddField(
model_name='source',
name='processing',
field=models.SmallIntegerField(default=0, choices=[(0, b'Processing uncertain'), (10, b'Automatically processed'), (20, b'Manually processed')]),
preserve_default=True,
),
migrations.AlterField(
model_name='source',
name='origin',
field=models.SmallIntegerField(choices=[(0, b'Origin not external'), (10, b'Origin external, but provenance uncertain'), (10, b'Origin public'), (20, b'Provided by vendor'), (30, b'Provided by partner')]),
),
migrations.AlterField(
model_name='source',
name='tlp',
field=models.SmallIntegerField(default=0, choices=[(0, b'Unknown'), (40, b'White'), (30, b'Green'), (20, b'Amber'), (10, b'Red')]),
),
]
| gpl-2.0 |
DavidLKing/w2v-demo | venv/lib/python3.5/site-packages/pip/commands/list.py | 339 | 11369 | from __future__ import absolute_import
import json
import logging
import warnings
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from pip._vendor import six
from pip.basecommand import Command
from pip.exceptions import CommandError
from pip.index import PackageFinder
from pip.utils import (
get_installed_distributions, dist_is_editable)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
choices=('legacy', 'columns', 'freeze', 'json'),
help="Select the output format among: legacy (default), columns, "
"freeze or json.",
)
cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.list_format is None:
warnings.warn(
"The default format will switch to columns in the future. "
"You can use --format=(legacy|columns) (or define a "
"format=(legacy|columns) in your pip.conf under the [list] "
"section) to disable this warning.",
RemovedInPip10Warning,
)
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
if options.not_required:
packages = self.get_not_required(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
return set(pkg for pkg in packages if pkg.key not in dep_keys)
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in packages:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_legacy(self, dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
return '%s (%s)' % (dist.project_name, dist.version)
def output_legacy_latest(self, dist):
return '%s - Latest: %s [%s]' % (
self.output_legacy(dist),
dist.latest_version,
dist.latest_filetype,
)
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
else: # legacy
for dist in packages:
if options.outdated:
logger.info(self.output_legacy_latest(dist))
else:
logger.info(self.output_legacy(dist))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len(x) for x in vals)
for row in vals:
sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
result = []
for row in vals:
display = " ".join([str(c).ljust(s) if c is not None else ''
for s, c in zip_longest(sizes, row)])
result.append(display)
return result, sizes
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if any(dist_is_editable(x) for x in pkgs):
header.append("Location")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if dist_is_editable(proj):
row.append(proj.location)
data.append(row)
return data, header
def format_for_json(packages, options):
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': six.text_type(dist.version),
}
if options.outdated:
info['latest_version'] = six.text_type(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)
| gpl-3.0 |
nuuuboo/odoo | addons/account/res_config.py | 200 | 25453 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from dateutil.relativedelta import relativedelta
import openerp
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.translate import _
from openerp.osv import fields, osv
class account_config_settings(osv.osv_memory):
_name = 'account.config.settings'
_inherit = 'res.config.settings'
_columns = {
'company_id': fields.many2one('res.company', 'Company', required=True),
'has_default_company': fields.boolean('Has default company', readonly=True),
'expects_chart_of_accounts': fields.related('company_id', 'expects_chart_of_accounts', type='boolean',
string='This company has its own chart of accounts',
help="""Check this box if this company is a legal entity."""),
'currency_id': fields.related('company_id', 'currency_id', type='many2one', relation='res.currency', required=True,
string='Default company currency', help="Main currency of the company."),
'paypal_account': fields.related('company_id', 'paypal_account', type='char', size=128,
string='Paypal account', help="Paypal account (email) for receiving online payments (credit card, etc.) If you set a paypal account, the customer will be able to pay your invoices or quotations with a button \"Pay with Paypal\" in automated emails or through the Odoo portal."),
'company_footer': fields.related('company_id', 'rml_footer', type='text', readonly=True,
string='Bank accounts footer preview', help="Bank accounts as printed in the footer of each printed document"),
'has_chart_of_accounts': fields.boolean('Company has a chart of accounts'),
'chart_template_id': fields.many2one('account.chart.template', 'Template', domain="[('visible','=', True)]"),
'code_digits': fields.integer('# of Digits', help="No. of digits to use for account code"),
'tax_calculation_rounding_method': fields.related('company_id',
'tax_calculation_rounding_method', type='selection', selection=[
('round_per_line', 'Round per line'),
('round_globally', 'Round globally'),
], string='Tax calculation rounding method',
help="If you select 'Round per line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."),
'sale_tax': fields.many2one("account.tax.template", "Default sale tax"),
'purchase_tax': fields.many2one("account.tax.template", "Default purchase tax"),
'sale_tax_rate': fields.float('Sales tax (%)'),
'purchase_tax_rate': fields.float('Purchase tax (%)'),
'complete_tax_set': fields.boolean('Complete set of taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete'),
'has_fiscal_year': fields.boolean('Company has a fiscal year'),
'date_start': fields.date('Start date', required=True),
'date_stop': fields.date('End date', required=True),
'period': fields.selection([('month', 'Monthly'), ('3months','3 Monthly')], 'Periods', required=True),
'sale_journal_id': fields.many2one('account.journal', 'Sale journal'),
'sale_sequence_prefix': fields.related('sale_journal_id', 'sequence_id', 'prefix', type='char', string='Invoice sequence'),
'sale_sequence_next': fields.related('sale_journal_id', 'sequence_id', 'number_next', type='integer', string='Next invoice number'),
'sale_refund_journal_id': fields.many2one('account.journal', 'Sale refund journal'),
'sale_refund_sequence_prefix': fields.related('sale_refund_journal_id', 'sequence_id', 'prefix', type='char', string='Credit note sequence'),
'sale_refund_sequence_next': fields.related('sale_refund_journal_id', 'sequence_id', 'number_next', type='integer', string='Next credit note number'),
'purchase_journal_id': fields.many2one('account.journal', 'Purchase journal'),
'purchase_sequence_prefix': fields.related('purchase_journal_id', 'sequence_id', 'prefix', type='char', string='Supplier invoice sequence'),
'purchase_sequence_next': fields.related('purchase_journal_id', 'sequence_id', 'number_next', type='integer', string='Next supplier invoice number'),
'purchase_refund_journal_id': fields.many2one('account.journal', 'Purchase refund journal'),
'purchase_refund_sequence_prefix': fields.related('purchase_refund_journal_id', 'sequence_id', 'prefix', type='char', string='Supplier credit note sequence'),
'purchase_refund_sequence_next': fields.related('purchase_refund_journal_id', 'sequence_id', 'number_next', type='integer', string='Next supplier credit note number'),
'module_account_check_writing': fields.boolean('Pay your suppliers by check',
help='This allows you to check writing and printing.\n'
'-This installs the module account_check_writing.'),
'module_account_accountant': fields.boolean('Full accounting features: journals, legal statements, chart of accounts, etc.',
help="""If you do not check this box, you will be able to do invoicing & payments, but not accounting (Journal Items, Chart of Accounts, ...)"""),
'module_account_asset': fields.boolean('Assets management',
help='This allows you to manage the assets owned by a company or a person.\n'
'It keeps track of the depreciation occurred on those assets, and creates account move for those depreciation lines.\n'
'-This installs the module account_asset. If you do not check this box, you will be able to do invoicing & payments, '
'but not accounting (Journal Items, Chart of Accounts, ...)'),
'module_account_budget': fields.boolean('Budget management',
help='This allows accountants to manage analytic and crossovered budgets. '
'Once the master budgets and the budgets are defined, '
'the project managers can set the planned amount on each analytic account.\n'
'-This installs the module account_budget.'),
'module_account_payment': fields.boolean('Manage payment orders',
help='This allows you to create and manage your payment orders, with purposes to \n'
'* serve as base for an easy plug-in of various automated payment mechanisms, and \n'
'* provide a more efficient way to manage invoice payments.\n'
'-This installs the module account_payment.' ),
'module_account_voucher': fields.boolean('Manage customer payments',
help='This includes all the basic requirements of voucher entries for bank, cash, sales, purchase, expense, contra, etc.\n'
'-This installs the module account_voucher.'),
'module_account_followup': fields.boolean('Manage customer payment follow-ups',
help='This allows to automate letters for unpaid invoices, with multi-level recalls.\n'
'-This installs the module account_followup.'),
'module_product_email_template': fields.boolean('Send products tools and information at the invoice confirmation',
help='With this module, link your products to a template to send complete information and tools to your customer.\n'
'For instance when invoicing a training, the training agenda and materials will automatically be send to your customers.'),
'group_proforma_invoices': fields.boolean('Allow pro-forma invoices',
implied_group='account.group_proforma_invoices',
help="Allows you to put invoices in pro-forma state."),
'default_sale_tax': fields.many2one('account.tax', 'Default sale tax',
help="This sale tax will be assigned by default on new products."),
'default_purchase_tax': fields.many2one('account.tax', 'Default purchase tax',
help="This purchase tax will be assigned by default on new products."),
'decimal_precision': fields.integer('Decimal precision on journal entries',
help="""As an example, a decimal precision of 2 will allow journal entries like: 9.99 EUR, whereas a decimal precision of 4 will allow journal entries like: 0.0231 EUR."""),
'group_multi_currency': fields.boolean('Allow multi currencies',
implied_group='base.group_multi_currency',
help="Allows you multi currency environment"),
'group_analytic_accounting': fields.boolean('Analytic accounting',
implied_group='analytic.group_analytic_accounting',
help="Allows you to use the analytic accounting."),
'group_check_supplier_invoice_total': fields.boolean('Check the total of supplier invoices',
implied_group="account.group_supplier_inv_check_total"),
'income_currency_exchange_account_id': fields.related(
'company_id', 'income_currency_exchange_account_id',
type='many2one',
relation='account.account',
string="Gain Exchange Rate Account",
domain="[('type', '=', 'other'), ('company_id', '=', company_id)]]"),
'expense_currency_exchange_account_id': fields.related(
'company_id', 'expense_currency_exchange_account_id',
type="many2one",
relation='account.account',
string="Loss Exchange Rate Account",
domain="[('type', '=', 'other'), ('company_id', '=', company_id)]]"),
}
def _check_account_gain(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.income_currency_exchange_account_id.company_id and obj.company_id != obj.income_currency_exchange_account_id.company_id:
return False
return True
def _check_account_loss(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.expense_currency_exchange_account_id.company_id and obj.company_id != obj.expense_currency_exchange_account_id.company_id:
return False
return True
_constraints = [
(_check_account_gain, 'The company of the gain exchange rate account must be the same than the company selected.', ['income_currency_exchange_account_id']),
(_check_account_loss, 'The company of the loss exchange rate account must be the same than the company selected.', ['expense_currency_exchange_account_id']),
]
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.id
def _default_has_default_company(self, cr, uid, context=None):
count = self.pool.get('res.company').search_count(cr, uid, [], context=context)
return bool(count == 1)
def _get_default_fiscalyear_data(self, cr, uid, company_id, context=None):
"""Compute default period, starting and ending date for fiscalyear
- if in a fiscal year, use its period, starting and ending date
- if past fiscal year, use its period, and new dates [ending date of the latest +1 day ; ending date of the latest +1 year]
- if no fiscal year, use monthly, 1st jan, 31th dec of this year
:return: (date_start, date_stop, period) at format DEFAULT_SERVER_DATETIME_FORMAT
"""
fiscalyear_ids = self.pool.get('account.fiscalyear').search(cr, uid,
[('date_start', '<=', time.strftime(DF)), ('date_stop', '>=', time.strftime(DF)),
('company_id', '=', company_id)])
if fiscalyear_ids:
# is in a current fiscal year, use this one
fiscalyear = self.pool.get('account.fiscalyear').browse(cr, uid, fiscalyear_ids[0], context=context)
if len(fiscalyear.period_ids) == 5: # 4 periods of 3 months + opening period
period = '3months'
else:
period = 'month'
return (fiscalyear.date_start, fiscalyear.date_stop, period)
else:
past_fiscalyear_ids = self.pool.get('account.fiscalyear').search(cr, uid,
[('date_stop', '<=', time.strftime(DF)), ('company_id', '=', company_id)])
if past_fiscalyear_ids:
# use the latest fiscal, sorted by (start_date, id)
latest_year = self.pool.get('account.fiscalyear').browse(cr, uid, past_fiscalyear_ids[-1], context=context)
latest_stop = datetime.datetime.strptime(latest_year.date_stop, DF)
if len(latest_year.period_ids) == 5:
period = '3months'
else:
period = 'month'
return ((latest_stop+datetime.timedelta(days=1)).strftime(DF), latest_stop.replace(year=latest_stop.year+1).strftime(DF), period)
else:
return (time.strftime('%Y-01-01'), time.strftime('%Y-12-31'), 'month')
_defaults = {
'company_id': _default_company,
'has_default_company': _default_has_default_company,
}
def create(self, cr, uid, values, context=None):
id = super(account_config_settings, self).create(cr, uid, values, context)
# Hack: to avoid some nasty bug, related fields are not written upon record creation.
# Hence we write on those fields here.
vals = {}
for fname, field in self._columns.iteritems():
if isinstance(field, fields.related) and fname in values:
vals[fname] = values[fname]
self.write(cr, uid, [id], vals, context)
return id
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
# update related fields
values = {}
values['currency_id'] = False
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
has_chart_of_accounts = company_id not in self.pool.get('account.installer').get_unconfigured_cmp(cr, uid)
fiscalyear_count = self.pool.get('account.fiscalyear').search_count(cr, uid,
[('date_start', '<=', time.strftime('%Y-%m-%d')), ('date_stop', '>=', time.strftime('%Y-%m-%d')),
('company_id', '=', company_id)])
date_start, date_stop, period = self._get_default_fiscalyear_data(cr, uid, company_id, context=context)
values = {
'expects_chart_of_accounts': company.expects_chart_of_accounts,
'currency_id': company.currency_id.id,
'paypal_account': company.paypal_account,
'company_footer': company.rml_footer,
'has_chart_of_accounts': has_chart_of_accounts,
'has_fiscal_year': bool(fiscalyear_count),
'chart_template_id': False,
'tax_calculation_rounding_method': company.tax_calculation_rounding_method,
'date_start': date_start,
'date_stop': date_stop,
'period': period,
}
# update journals and sequences
for journal_type in ('sale', 'sale_refund', 'purchase', 'purchase_refund'):
for suffix in ('_journal_id', '_sequence_prefix', '_sequence_next'):
values[journal_type + suffix] = False
journal_obj = self.pool.get('account.journal')
journal_ids = journal_obj.search(cr, uid, [('company_id', '=', company_id)])
for journal in journal_obj.browse(cr, uid, journal_ids):
if journal.type in ('sale', 'sale_refund', 'purchase', 'purchase_refund'):
values.update({
journal.type + '_journal_id': journal.id,
journal.type + '_sequence_prefix': journal.sequence_id.prefix,
journal.type + '_sequence_next': journal.sequence_id.number_next,
})
# update taxes
ir_values = self.pool.get('ir.values')
taxes_id = ir_values.get_default(cr, uid, 'product.template', 'taxes_id', company_id=company_id)
supplier_taxes_id = ir_values.get_default(cr, uid, 'product.template', 'supplier_taxes_id', company_id=company_id)
values.update({
'default_sale_tax': isinstance(taxes_id, list) and taxes_id[0] or taxes_id,
'default_purchase_tax': isinstance(supplier_taxes_id, list) and supplier_taxes_id[0] or supplier_taxes_id,
})
# update gain/loss exchange rate accounts
values.update({
'income_currency_exchange_account_id': company.income_currency_exchange_account_id and company.income_currency_exchange_account_id.id or False,
'expense_currency_exchange_account_id': company.expense_currency_exchange_account_id and company.expense_currency_exchange_account_id.id or False
})
return {'value': values}
def onchange_chart_template_id(self, cr, uid, ids, chart_template_id, context=None):
tax_templ_obj = self.pool.get('account.tax.template')
res = {'value': {
'complete_tax_set': False, 'sale_tax': False, 'purchase_tax': False,
'sale_tax_rate': 15, 'purchase_tax_rate': 15,
}}
if chart_template_id:
# update complete_tax_set, sale_tax and purchase_tax
chart_template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
res['value'].update({'complete_tax_set': chart_template.complete_tax_set})
if chart_template.complete_tax_set:
# default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account
sale_tax_ids = tax_templ_obj.search(cr, uid,
[("chart_template_id", "=", chart_template_id), ('type_tax_use', 'in', ('sale','all'))],
order="sequence, id desc")
purchase_tax_ids = tax_templ_obj.search(cr, uid,
[("chart_template_id", "=", chart_template_id), ('type_tax_use', 'in', ('purchase','all'))],
order="sequence, id desc")
res['value']['sale_tax'] = sale_tax_ids and sale_tax_ids[0] or False
res['value']['purchase_tax'] = purchase_tax_ids and purchase_tax_ids[0] or False
if chart_template.code_digits:
res['value']['code_digits'] = chart_template.code_digits
return res
def onchange_tax_rate(self, cr, uid, ids, rate, context=None):
return {'value': {'purchase_tax_rate': rate or False}}
def onchange_multi_currency(self, cr, uid, ids, group_multi_currency, context=None):
res = {}
if not group_multi_currency:
res['value'] = {'income_currency_exchange_account_id': False, 'expense_currency_exchange_account_id': False}
return res
def onchange_start_date(self, cr, uid, id, start_date):
if start_date:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = (start_date + relativedelta(months=12)) - relativedelta(days=1)
return {'value': {'date_stop': end_date.strftime('%Y-%m-%d')}}
return {}
def open_company_form(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
return {
'type': 'ir.actions.act_window',
'name': 'Configure your Company',
'res_model': 'res.company',
'res_id': config.company_id.id,
'view_mode': 'form',
}
def set_default_taxes(self, cr, uid, ids, context=None):
""" set default sale and purchase taxes for products """
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool.get('ir.values')
config = self.browse(cr, uid, ids[0], context)
ir_values.set_default(cr, SUPERUSER_ID, 'product.template', 'taxes_id',
config.default_sale_tax and [config.default_sale_tax.id] or False, company_id=config.company_id.id)
ir_values.set_default(cr, SUPERUSER_ID, 'product.template', 'supplier_taxes_id',
config.default_purchase_tax and [config.default_purchase_tax.id] or False, company_id=config.company_id.id)
def set_chart_of_accounts(self, cr, uid, ids, context=None):
""" install a chart of accounts for the given company (if required) """
config = self.browse(cr, uid, ids[0], context)
if config.chart_template_id:
assert config.expects_chart_of_accounts and not config.has_chart_of_accounts
wizard = self.pool.get('wizard.multi.charts.accounts')
wizard_id = wizard.create(cr, uid, {
'company_id': config.company_id.id,
'chart_template_id': config.chart_template_id.id,
'code_digits': config.code_digits or 6,
'sale_tax': config.sale_tax.id,
'purchase_tax': config.purchase_tax.id,
'sale_tax_rate': config.sale_tax_rate,
'purchase_tax_rate': config.purchase_tax_rate,
'complete_tax_set': config.complete_tax_set,
'currency_id': config.currency_id.id,
}, context)
wizard.execute(cr, uid, [wizard_id], context)
def set_fiscalyear(self, cr, uid, ids, context=None):
""" create a fiscal year for the given company (if necessary) """
config = self.browse(cr, uid, ids[0], context)
if config.has_chart_of_accounts or config.chart_template_id:
fiscalyear = self.pool.get('account.fiscalyear')
fiscalyear_count = fiscalyear.search_count(cr, uid,
[('date_start', '<=', config.date_start), ('date_stop', '>=', config.date_stop),
('company_id', '=', config.company_id.id)],
context=context)
if not fiscalyear_count:
name = code = config.date_start[:4]
if int(name) != int(config.date_stop[:4]):
name = config.date_start[:4] +'-'+ config.date_stop[:4]
code = config.date_start[2:4] +'-'+ config.date_stop[2:4]
vals = {
'name': name,
'code': code,
'date_start': config.date_start,
'date_stop': config.date_stop,
'company_id': config.company_id.id,
}
fiscalyear_id = fiscalyear.create(cr, uid, vals, context=context)
if config.period == 'month':
fiscalyear.create_period(cr, uid, [fiscalyear_id])
elif config.period == '3months':
fiscalyear.create_period3(cr, uid, [fiscalyear_id])
def get_default_dp(self, cr, uid, fields, context=None):
dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product','decimal_account')
return {'decimal_precision': dp.digits}
def set_default_dp(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product','decimal_account')
dp.write({'digits': config.decimal_precision})
def onchange_analytic_accounting(self, cr, uid, ids, analytic_accounting, context=None):
if analytic_accounting:
return {'value': {
'module_account_accountant': True,
}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ntt-pf-lab/backup_keystone | keystone/logic/types/fault.py | 4 | 4840 | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from lxml import etree
class IdentityFault(Exception):
"""Base Exception type for all auth exceptions"""
def __init__(self, msg, details=None, code=500):
self.args = (code, msg, details)
self.code = code
self.msg = msg
self.details = details
self.key = "IdentityFault"
@property
def message(self):
return self.msg
def to_xml(self):
dom = etree.Element(self.key,
xmlns="http://docs.openstack.org/identity/api/v2.0")
dom.set("code", str(self.code))
msg = etree.Element("message")
msg.text = self.msg
dom.append(msg)
if self.details != None:
desc = etree.Element("details")
desc.text = self.details
dom.append(desc)
return etree.tostring(dom)
def to_json(self):
fault = {}
fault["message"] = self.msg
fault["code"] = str(self.code)
if self.details != None:
fault["details"] = self.details
ret = {}
ret[self.key] = fault
return json.dumps(ret)
class ServiceUnavailableFault(IdentityFault):
"""The auth service is unavailable"""
def __init__(self, msg, details=None, code=503):
super(ServiceUnavailableFault, self).__init__(msg, details, code)
self.key = "serviceUnavailable"
class BadRequestFault(IdentityFault):
"""Bad user request"""
def __init__(self, msg, details=None, code=400):
super(BadRequestFault, self).__init__(msg, details, code)
self.key = "badRequest"
class UnauthorizedFault(IdentityFault):
"""User is unauthorized"""
def __init__(self, msg, details=None, code=401):
super(UnauthorizedFault, self).__init__(msg, details, code)
self.key = "unauthorized"
class ForbiddenFault(IdentityFault):
"""The user is forbidden"""
def __init__(self, msg, details=None, code=403):
super(ForbiddenFault, self).__init__(msg, details, code)
self.key = "forbidden"
class ItemNotFoundFault(IdentityFault):
"""The item is not found"""
def __init__(self, msg, details=None, code=404):
super(ItemNotFoundFault, self).__init__(msg, details, code)
self.key = "itemNotFound"
class TenantDisabledFault(IdentityFault):
"""The tenant is disabled"""
def __init__(self, msg, details=None, code=403):
super(TenantDisabledFault, self).__init__(msg, details, code)
self.key = "tenantDisabled"
class TenantConflictFault(IdentityFault):
"""The tenant already exists?"""
def __init__(self, msg, details=None, code=409):
super(TenantConflictFault, self).__init__(msg, details, code)
self.key = "tenantConflict"
class OverlimitFault(IdentityFault):
"""A limit has been exceeded"""
def __init__(self, msg, details=None, code=409, retry_at=None):
super(OverlimitFault, self).__init__(msg, details, code)
self.args = (code, msg, details, retry_at)
self.retry_at = retry_at
self.key = "overLimit"
class UserConflictFault(IdentityFault):
"""The User already exists?"""
def __init__(self, msg, details=None, code=409):
super(UserConflictFault, self).__init__(msg, details, code)
self.key = "userConflict"
class UserDisabledFault(IdentityFault):
"""The user is disabled"""
def __init__(self, msg, details=None, code=403):
super(UserDisabledFault, self).__init__(msg, details, code)
self.key = "userDisabled"
class EmailConflictFault(IdentityFault):
"""The Email already exists?"""
def __init__(self, msg, details=None, code=409):
super(EmailConflictFault, self).__init__(msg, details, code)
self.key = "emailConflict"
class RoleConflictFault(IdentityFault):
"""The User already exists?"""
def __init__(self, msg, details=None, code=409):
super(RoleConflictFault, self).__init__(msg, details, code)
self.key = "roleConflict"
class ServiceConflictFault(IdentityFault):
"""The Service already exists?"""
def __init__(self, msg, details=None, code=409):
super(ServiceConflictFault, self).__init__(msg, details, code)
self.key = "serviceConflict"
| apache-2.0 |
pandegroup/osprey | osprey/strategies.py | 2 | 12612 | from __future__ import print_function, absolute_import, division
import sys
import inspect
import socket
import numpy as np
from sklearn.utils import check_random_state
from sklearn.model_selection import ParameterGrid
try:
from hyperopt import (Trials, tpe, fmin, STATUS_OK, STATUS_RUNNING,
STATUS_FAIL)
except ImportError:
# hyperopt is optional, but required for hyperopt_tpe()
pass
from .search_space import EnumVariable
from .acquisition_functions import AcquisitionFunction
from .surrogate_models import (MaximumLikelihoodGaussianProcess,
GaussianProcessKernel)
DEFAULT_TIMEOUT = socket._GLOBAL_DEFAULT_TIMEOUT
class BaseStrategy(object):
short_name = None
def suggest(self, history, searchspace):
"""
Parameters
----------
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
searchspace : SearchSpace
Instance of search_space.SearchSpace
random_state :i nteger or numpy.RandomState, optional
The random seed for sampling. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Returns
-------
new_params : dict
"""
raise NotImplementedError()
@staticmethod
def is_repeated_suggestion(params, history):
"""
Parameters
----------
params : dict
Trial param set
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
Returns
-------
is_repeated_suggestion : bool
"""
if any(params == hparams and hstatus == 'SUCCEEDED'
for hparams, hscore, hstatus in history):
return True
else:
return False
class RandomSearch(BaseStrategy):
short_name = 'random'
def __init__(self, seed=None):
self.seed = seed
def suggest(self, history, searchspace):
"""Randomly suggest params from searchspace.
"""
return searchspace.rvs(self.seed)
class HyperoptTPE(BaseStrategy):
short_name = 'hyperopt_tpe'
def __init__(self, seed=None, gamma=0.25, seeds=20):
self.seed = seed
self.gamma = gamma
self.seeds = seeds
def suggest(self, history, searchspace):
"""
Suggest params to maximize an objective function based on the
function evaluation history using a tree of Parzen estimators (TPE),
as implemented in the hyperopt package.
Use of this function requires that hyperopt be installed.
"""
# This function is very odd, because as far as I can tell there's
# no real documented API for any of the internals of hyperopt. Its
# execution model is that hyperopt calls your objective function
# (instead of merely providing you with suggested points, and then
# you calling the function yourself), and its very tricky (for me)
# to use the internal hyperopt data structures to get these predictions
# out directly.
# so they path we take in this function is to construct a synthetic
# hyperopt.Trials database which from the `history`, and then call
# hyoperopt.fmin with a dummy objective function that logs the value
# used, and then return that value to our client.
# The form of the hyperopt.Trials database isn't really documented in
# the code -- most of this comes from reverse engineering it, by
# running fmin() on a simple function and then inspecting the form of
# the resulting trials object.
if 'hyperopt' not in sys.modules:
raise ImportError('No module named hyperopt')
random = check_random_state(self.seed)
hp_searchspace = searchspace.to_hyperopt()
trials = Trials()
for i, (params, scores, status) in enumerate(history):
if status == 'SUCCEEDED':
# we're doing maximization, hyperopt.fmin() does minimization,
# so we need to swap the sign
result = {'loss': -np.mean(scores), 'status': STATUS_OK}
elif status == 'PENDING':
result = {'status': STATUS_RUNNING}
elif status == 'FAILED':
result = {'status': STATUS_FAIL}
else:
raise RuntimeError('unrecognized status: %s' % status)
# the vals key in the trials dict is basically just the params
# dict, but enum variables (hyperopt hp.choice() nodes) are
# different, because the index of the parameter is specified
# in vals, not the parameter itself.
vals = {}
for var in searchspace:
if isinstance(var, EnumVariable):
# get the index in the choices of the parameter, and use
# that.
matches = [
i for i, c in enumerate(var.choices)
if c == params[var.name]
]
assert len(matches) == 1
vals[var.name] = matches
else:
# the other big difference is that all of the param values
# are wrapped in length-1 lists.
vals[var.name] = [params[var.name]]
trials.insert_trial_doc({
'misc': {
'cmd': ('domain_attachment', 'FMinIter_Domain'),
'idxs': dict((k, [i]) for k in hp_searchspace.keys()),
'tid': i,
'vals': vals,
'workdir': None
},
'result': result,
'tid': i,
# bunch of fixed fields that hyperopt seems to require
'owner': None,
'spec': None,
'state': 2,
'book_time': None,
'exp_key': None,
'refresh_time': None,
'version': 0
})
trials.refresh()
chosen_params_container = []
def suggest(*args, **kwargs):
return tpe.suggest(*args,
**kwargs,
gamma=self.gamma,
n_startup_jobs=self.seeds)
def mock_fn(x):
# http://stackoverflow.com/a/3190783/1079728
# to get around no nonlocal keywork in python2
chosen_params_container.append(x)
return 0
fmin(fn=mock_fn,
algo=tpe.suggest,
space=hp_searchspace,
trials=trials,
max_evals=len(trials.trials) + 1,
**self._hyperopt_fmin_random_kwarg(random))
chosen_params = chosen_params_container[0]
return chosen_params
@staticmethod
def _hyperopt_fmin_random_kwarg(random):
if 'rstate' in inspect.getargspec(fmin).args:
# 0.0.3-dev version uses this argument
kwargs = {'rstate': random, 'allow_trials_fmin': False}
elif 'rseed' in inspect.getargspec(fmin).args:
# 0.0.2 version uses different argument
kwargs = {'rseed': random.randint(2**32 - 1)}
return kwargs
class Bayes(BaseStrategy):
short_name = 'bayes'
def __init__(self,
acquisition=None,
surrogate=None,
kernels=None,
seed=None,
seeds=1,
max_feval=5E4,
max_iter=1E5,
n_iter=50):
self.seed = seed
self.seeds = seeds
self.max_feval = max_feval
self.max_iter = max_iter
self.n_iter = n_iter
self.n_dims = None
if surrogate is None:
surrogate = 'gp'
self.surrogate = surrogate
if kernels is None:
kernels = [{
'name': 'GPy.kern.Matern52',
'params': {
'ARD': True
},
'options': {
'independent': False
}
}]
self.kernel_params = kernels
if acquisition is None:
acquisition = {'name': 'osprey', 'params': {}}
self.acquisition_params = acquisition
def _get_data(self, history, searchspace):
X = []
Y = []
V = []
ignore = []
for param_dict, scores, status in history:
# transform points into the GP domain. This invloves bringing
# int and enum variables to floating point, etc.
if status == 'FAILED':
# not sure how to deal with these yet
continue
point = searchspace.point_to_unit(param_dict)
if status == 'SUCCEEDED':
X.append(point)
Y.append(np.mean(scores))
V.append(np.var(scores))
elif status == 'PENDING':
ignore.append(point)
else:
raise RuntimeError('unrecognized status: %s' % status)
return (np.array(X).reshape(-1, self.n_dims),
np.array(Y).reshape(-1, 1), np.array(V).reshape(-1, 1),
np.array(ignore).reshape(-1, self.n_dims))
def _from_unit(self, result, searchspace):
# Note that GP only deals with float-valued variables, so we have
# a transform step on either side, where int and enum valued variables
# are transformed before calling gp, and then the result suggested by
# GP needs to be reverse-transformed.
out = {}
for gpvalue, var in zip(result, searchspace):
out[var.name] = var.point_from_unit(float(gpvalue))
return out
def _is_within(self, point, X, tol=1E-2):
if True in (np.sqrt(((point - X)**2).sum(axis=0)) <= tol):
return True
return False
def suggest(self, history, searchspace, max_tries=5):
if len(history) < self.seeds:
return RandomSearch().suggest(history, searchspace)
self.n_dims = searchspace.n_dims
X, Y, V, ignore = self._get_data(history, searchspace)
# TODO make _create_kernel accept optional args.
# Define and fit model
if self.surrogate == 'gp':
kernel = GaussianProcessKernel(self.kernel_params, self.n_dims)
model = MaximumLikelihoodGaussianProcess(X=X,
Y=Y,
kernel=kernel.kernel,
max_feval=self.max_feval)
else:
raise NotImplementedError(
'Surrogate model not recognised. Please choose from: gp')
model.fit()
# Define acquisition function and get best candidate
af = AcquisitionFunction(surrogate=model,
acquisition_params=self.acquisition_params,
n_dims=self.n_dims,
n_iter=self.n_iter,
max_iter=self.max_iter)
suggestion = af.get_best_candidate()
if suggestion in ignore or self._is_within(suggestion, X):
return RandomSearch().suggest(history, searchspace)
return self._from_unit(suggestion, searchspace)
class GridSearch(BaseStrategy):
short_name = 'grid'
def __init__(self):
self.param_grid = None
self.current = -1
def suggest(self, history, searchspace):
# Convert searchspace to param_grid
if self.param_grid is None:
if not all(isinstance(v, EnumVariable) for v in searchspace):
raise RuntimeError(
"GridSearchStrategy is defined only for all-enum search space"
)
self.param_grid = ParameterGrid(
dict((v.name, v.choices) for v in searchspace))
# NOTE: there is no way of signaling end of parameters to be searched against
# so user should pick correctly number of evaluations
self.current += 1
return self.param_grid[self.current % len(self.param_grid)]
| apache-2.0 |
BorisJeremic/Real-ESSI-Examples | parallel/test_cases/4NodeANDES/circular_plate_clamped/side_length_10/compare_txt.py | 637 | 2094 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# analytic_solution = sys.argv[1]
# numeric_result = sys.argv[2]
analytic_solution = 'analytic_solution.txt'
numeric_result = 'numeric_result.txt'
analytic_sol = np.loadtxt(analytic_solution)
numeric_res = np.loadtxt(numeric_result)
abs_error = abs(analytic_sol - numeric_res)
rel_error = abs_error/analytic_sol
analytic_sol = float(analytic_sol)
numeric_res = float(numeric_res)
rel_error = float(rel_error)
# print the results
case_flag=1
print headrun() , "-----------Testing results-----------------"
print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error[%]')
print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error )
if(case_flag==1):
print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \;
# find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \;
# find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \;
# find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \;
# find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \;
# sed -i "s/node\.fei/add_node.include/" main.fei
# sed -i "s/add_node\.fei/add_node.include/" main.fei
# sed -i "s/element\.fei/add_element.include/" main.fei
# sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei
# sed -i "s/constraint\.fei/add_constraint.include/" main.fei
# find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
| cc0-1.0 |
spcui/tp-qemu | qemu/tests/getfd.py | 16 | 2195 | import os
from autotest.client.shared import error
def run(test, params, env):
"""
Test QEMU's getfd command
1) Boot up a guest
2) Pass file descriptors via getfd
3) Check if qemu process has a copy of the file descriptor
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def has_fd(pid, filepath):
"""
Returns true if process has a file descriptor pointing to filepath
:param pid: the process id
:param filepath: the full path for the file
"""
pathlist = []
dirname = "/proc/%s/fd" % pid
dirlist = [os.path.join(dirname, f) for f in os.listdir(dirname)]
for f in dirlist:
if os.path.islink(f):
pathlist.append(os.readlink(f))
if filepath in pathlist:
return True
else:
return False
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
pid = vm.get_pid()
if pid is None:
raise error.TestError("Fail to get process id for VM")
# directory for storing temporary files
fdfiles_dir = os.path.join(test.tmpdir, 'fdfiles')
if not os.path.isdir(fdfiles_dir):
os.mkdir(fdfiles_dir)
# number of files
nofiles = int(params.get("number_of_files", "900"))
for n in range(nofiles):
name = "fdfile-%s" % n
path = os.path.join(fdfiles_dir, name)
fd = os.open(path, os.O_RDWR | os.O_CREAT)
response = vm.monitor.getfd(fd, name)
os.close(fd)
# getfd is supposed to generate no output
if response:
raise error.TestError("getfd returned error: %s" % response)
# check if qemu process has a copy of the fd
if not has_fd(pid, path):
raise error.TestError("QEMU process does not seem to have a file "
"descriptor pointing to file %s" % path)
# clean up files
for n in range(nofiles):
name = "fdfile-%s" % n
path = os.path.join(fdfiles_dir, name)
try:
os.unlink(path)
except OSError:
pass
| gpl-2.0 |
pschmitt/home-assistant | homeassistant/components/rpi_gpio/__init__.py | 8 | 1332 | """Support for controlling GPIO pins of a Raspberry Pi."""
import logging
from RPi import GPIO # pylint: disable=import-error
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
DOMAIN = "rpi_gpio"
def setup(hass, config):
"""Set up the Raspberry PI GPIO component."""
def cleanup_gpio(event):
"""Stuff to do before stopping."""
GPIO.cleanup()
def prepare_gpio(event):
"""Stuff to do when Home Assistant starts."""
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_gpio)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, prepare_gpio)
GPIO.setmode(GPIO.BCM)
return True
def setup_output(port):
"""Set up a GPIO as output."""
GPIO.setup(port, GPIO.OUT)
def setup_input(port, pull_mode):
"""Set up a GPIO as input."""
GPIO.setup(port, GPIO.IN, GPIO.PUD_DOWN if pull_mode == "DOWN" else GPIO.PUD_UP)
def write_output(port, value):
"""Write a value to a GPIO."""
GPIO.output(port, value)
def read_input(port):
"""Read a value from a GPIO."""
return GPIO.input(port)
def edge_detect(port, event_callback, bounce):
"""Add detection for RISING and FALLING events."""
GPIO.add_event_detect(port, GPIO.BOTH, callback=event_callback, bouncetime=bounce)
| apache-2.0 |
40223234/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/locale.py | 624 | 1918 | def getdefaultlocale():
return __BRYTHON__.language,None
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
return None, None
| gpl-3.0 |
kevinjqiu/btsync.py | docs/conf.py | 1 | 7788 | # -*- coding: utf-8 -*-
#
# btsync.py documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 9 17:19:48 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'btsync.py'
copyright = u'2013, Kevin Jing Qiu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'btsyncpydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'btsyncpy.tex', u'btsync.py Documentation',
u'Kevin Jing Qiu', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'btsyncpy', u'btsync.py Documentation',
[u'Kevin Jing Qiu'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'btsyncpy', u'btsync.py Documentation',
u'Kevin Jing Qiu', 'btsyncpy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
kwikadi/orange3 | Orange/tests/test_instance.py | 1 | 14396 | from math import isnan
import warnings
import unittest
from unittest.mock import MagicMock
import numpy as np
from numpy.testing import assert_array_equal
from Orange.data import \
Instance, Domain, Unknown, Value, \
DiscreteVariable, ContinuousVariable, StringVariable
class TestInstance(unittest.TestCase):
attributes = ["Feature %i" % i for i in range(10)]
class_vars = ["Class %i" % i for i in range(1)]
metas = [DiscreteVariable("Meta 1", values="XYZ"),
ContinuousVariable("Meta 2"),
StringVariable("Meta 3")]
def mock_domain(self, with_classes=False, with_metas=False):
attributes = self.attributes
class_vars = self.class_vars if with_classes else []
metas = self.metas if with_metas else []
variables = attributes + class_vars
return MagicMock(Domain,
attributes=attributes,
class_vars=class_vars,
metas=metas,
variables=variables)
def create_domain(self, attributes=(), classes=(), metas=()):
attr_vars = [ContinuousVariable(name=a) if isinstance(a, str) else a
for a in attributes]
class_vars = [ContinuousVariable(name=c) if isinstance(c, str) else c
for c in classes]
meta_vars = [DiscreteVariable(name=m, values=map(str, range(5)))
if isinstance(m, str) else m
for m in metas]
domain = Domain(attr_vars, class_vars, meta_vars)
return domain
def test_init_x_no_data(self):
domain = self.mock_domain()
inst = Instance(domain)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (len(self.attributes), ))
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
self.assertTrue(all(isnan(x) for x in inst._x))
def test_init_xy_no_data(self):
domain = self.mock_domain(with_classes=True)
inst = Instance(domain)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (len(self.attributes), ))
self.assertEqual(inst._y.shape, (len(self.class_vars), ))
self.assertEqual(inst._metas.shape, (0, ))
self.assertTrue(all(isnan(x) for x in inst._x))
self.assertTrue(all(isnan(x) for x in inst._y))
def test_init_xym_no_data(self):
domain = self.mock_domain(with_classes=True, with_metas=True)
inst = Instance(domain)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (len(self.attributes), ))
self.assertEqual(inst._y.shape, (len(self.class_vars), ))
self.assertEqual(inst._metas.shape, (3, ))
self.assertTrue(all(isnan(x) for x in inst._x))
self.assertTrue(all(isnan(x) for x in inst._y))
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert_array_equal(inst._metas,
np.array([var.Unknown for var in domain.metas],
dtype=object))
def test_init_x_arr(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")])
vals = np.array([42, 0])
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals)
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
domain = self.create_domain()
inst = Instance(domain, np.empty((0,)))
self.assertEqual(inst._x.shape, (0, ))
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
def test_init_x_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")])
lst = [42, 0]
vals = np.array(lst)
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals)
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
domain = self.create_domain()
inst = Instance(domain, [])
self.assertEqual(inst._x.shape, (0, ))
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
def test_init_xy_arr(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")])
vals = np.array([42, 0, 1])
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals[:2])
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._y[0], 1)
self.assertEqual(inst._metas.shape, (0, ))
def test_init_xy_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")])
lst = [42, "M", "C"]
vals = np.array([42, 0, 2])
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals[:2])
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._y[0], 2)
self.assertEqual(inst._metas.shape, (0, ))
def test_init_xym_arr(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = np.array([42, "M", "B", "X", 43, "Foo"], dtype=object)
inst = Instance(domain, vals)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (2, ))
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._metas.shape, (3, ))
assert_array_equal(inst._x, np.array([42, 0]))
self.assertEqual(inst._y[0], 1)
assert_array_equal(inst._metas, np.array([0, 43, "Foo"], dtype=object))
def test_init_xym_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (2, ))
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._metas.shape, (3, ))
assert_array_equal(inst._x, np.array([42, 0]))
self.assertEqual(inst._y[0], 1)
assert_array_equal(inst._metas, np.array([0, 43, "Foo"], dtype=object))
def test_init_inst(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
inst2 = Instance(domain, inst)
assert_array_equal(inst2._x, np.array([42, 0]))
self.assertEqual(inst2._y[0], 1)
assert_array_equal(inst2._metas, np.array([0, 43, "Foo"], dtype=object))
domain2 = self.create_domain(["z", domain[1], self.metas[1]],
domain.class_vars,
[self.metas[0], "w", domain[0]])
inst2 = Instance(domain2, inst)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert_array_equal(inst2._x, np.array([Unknown, 0, 43]))
self.assertEqual(inst2._y[0], 1)
assert_array_equal(inst2._metas, np.array([0, Unknown, 42],
dtype=object))
def test_get_item(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
val = inst[0]
self.assertIsInstance(val, Value)
self.assertEqual(inst[0], 42)
self.assertEqual(inst["x"], 42)
self.assertEqual(inst[domain[0]], 42)
val = inst[1]
self.assertIsInstance(val, Value)
self.assertEqual(inst[1], "M")
self.assertEqual(inst["g"], "M")
self.assertEqual(inst[domain[1]], "M")
val = inst[2]
self.assertIsInstance(val, Value)
self.assertEqual(inst[2], "B")
self.assertEqual(inst["y"], "B")
self.assertEqual(inst[domain.class_var], "B")
val = inst[-2]
self.assertIsInstance(val, Value)
self.assertEqual(inst[-2], 43)
self.assertEqual(inst["Meta 2"], 43)
self.assertEqual(inst[self.metas[1]], 43)
with self.assertRaises(ValueError):
inst["asdf"] = 42
with self.assertRaises(ValueError):
inst[ContinuousVariable("asdf")] = 42
def test_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
l = inst.list
self.assertIsInstance(l, list)
self.assertEqual(l, [42, "M", "B", "X", 43, "Foo"])
self.assertGreater(len(l), len(inst))
self.assertEqual(len(l), 6)
def test_set_item(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
inst[0] = 43
self.assertEqual(inst[0], 43)
inst["x"] = 44
self.assertEqual(inst[0], 44)
inst[domain[0]] = 45
self.assertEqual(inst[0], 45)
inst[1] = "F"
self.assertEqual(inst[1], "F")
inst["g"] = "M"
self.assertEqual(inst[1], "M")
with self.assertRaises(ValueError):
inst[1] = "N"
with self.assertRaises(ValueError):
inst["asdf"] = 42
inst[2] = "C"
self.assertEqual(inst[2], "C")
inst["y"] = "A"
self.assertEqual(inst[2], "A")
inst[domain.class_var] = "B"
self.assertEqual(inst[2], "B")
inst[-1] = "Y"
self.assertEqual(inst[-1], "Y")
inst["Meta 1"] = "Z"
self.assertEqual(inst[-1], "Z")
inst[domain.metas[0]] = "X"
self.assertEqual(inst[-1], "X")
def test_str(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")])
inst = Instance(domain, [42, 0])
self.assertEqual(str(inst), "[42.000, M]")
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")])
inst = Instance(domain, [42, "M", "B"])
self.assertEqual(str(inst), "[42.000, M | B]")
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
inst = Instance(domain, [42, "M", "B", "X", 43, "Foo"])
self.assertEqual(str(inst), "[42.000, M | B] {X, 43.000, Foo}")
domain = self.create_domain([],
[DiscreteVariable("y", values="ABC")],
self.metas)
inst = Instance(domain, ["B", "X", 43, "Foo"])
self.assertEqual(str(inst), "[ | B] {X, 43.000, Foo}")
domain = self.create_domain([],
[],
self.metas)
inst = Instance(domain, ["X", 43, "Foo"])
self.assertEqual(str(inst), "[] {X, 43.000, Foo}")
domain = self.create_domain(self.attributes)
inst = Instance(domain, range(len(self.attributes)))
self.assertEqual(
str(inst),
"[{}]".format(", ".join("{:.3f}".format(x)
for x in range(len(self.attributes)))))
for attr in domain:
attr.number_of_decimals = 0
self.assertEqual(
str(inst),
"[{}]".format(", ".join("{}".format(x)
for x in range(len(self.attributes)))))
def test_repr(self):
domain = self.create_domain(self.attributes)
inst = Instance(domain, range(len(self.attributes)))
self.assertEqual(repr(inst), "[0.000, 1.000, 2.000, 3.000, 4.000, ...]")
for attr in domain:
attr.number_of_decimals = 0
self.assertEqual(repr(inst), "[0, 1, 2, 3, 4, ...]")
def test_eq(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
inst2 = Instance(domain, vals)
self.assertTrue(inst == inst2)
self.assertTrue(inst2 == inst)
inst2[0] = 43
self.assertFalse(inst == inst2)
inst2[0] = Unknown
self.assertFalse(inst == inst2)
inst2 = Instance(domain, vals)
inst2[2] = "C"
self.assertFalse(inst == inst2)
inst2 = Instance(domain, vals)
inst2[-1] = "Y"
self.assertFalse(inst == inst2)
inst2 = Instance(domain, vals)
inst2[-2] = "33"
self.assertFalse(inst == inst2)
inst2 = Instance(domain, vals)
inst2[-3] = "Bar"
self.assertFalse(inst == inst2)
def test_instance_id(self):
domain = self.create_domain(["x"])
vals = [42]
inst = Instance(domain, vals, id=42)
self.assertEqual(inst.id, 42)
inst2 = Instance(domain, vals)
inst3 = Instance(domain, vals)
self.assertNotEqual(inst2.id, inst3.id)
| bsd-2-clause |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/tensorflow/contrib/labeled_tensor/python/ops/_typecheck.py | 52 | 10165 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal runtime type checking library.
This module should not be considered public API.
"""
# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect
import re
# used for register_type_abbreviation and _type_repr below.
_TYPE_ABBREVIATIONS = {}
class Type(object):
"""Base class for type checker types.
The custom types defined in this module are based on types in the standard
library's typing module (in Python 3.5):
https://docs.python.org/3/library/typing.html
The only difference should be that we use actual instances of Type classes to
represent custom types rather than the metaclass magic typing uses to create
new class objects. In practice, all this should mean is that we use
`List(int)` rather than `List[int]`.
Custom types should implement __instancecheck__ and inherit from Type. Every
argument in the constructor must be a type or Type instance, and these
arguments must be stored as a tuple on the `_types` attribute.
"""
def __init__(self, *types):
self._types = types
def __repr__(self):
args_repr = ", ".join(repr(t) for t in self._types)
return "typecheck.%s(%s)" % (type(self).__name__, args_repr)
class _SingleArgumentType(Type):
"""Use this subclass for parametric types that accept only one argument."""
def __init__(self, tpe):
super(_SingleArgumentType, self).__init__(tpe)
@property
def _type(self):
tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking
return tpe
class _TwoArgumentType(Type):
"""Use this subclass for parametric types that accept two arguments."""
def __init__(self, first_type, second_type):
super(_TwoArgumentType, self).__init__(first_type, second_type)
class Union(Type):
"""A sum type.
A correct type is any of the types provided.
"""
def __instancecheck__(self, instance):
return isinstance(instance, self._types)
class Optional(_SingleArgumentType):
"""An optional type.
A correct type is either the provided type or NoneType.
"""
def __instancecheck__(self, instance):
# types.NoneType does not exist in Python 3
return isinstance(instance, (self._type, type(None)))
class List(_SingleArgumentType):
"""A typed list.
A correct type is a list where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, list)
and all(isinstance(x, self._type) for x in instance))
class Sequence(_SingleArgumentType):
"""A typed sequence.
A correct type is a sequence where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Sequence)
and all(isinstance(x, self._type) for x in instance))
class Collection(_SingleArgumentType):
"""A sized, iterable container.
A correct type is an iterable and container with known size where each element
has the single provided type.
We use this in preference to Iterable because we check each instance of the
iterable at runtime, and hence need to avoid iterables that could be
exhausted.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Iterable)
and isinstance(instance, collections.Sized)
and isinstance(instance, collections.Container)
and all(isinstance(x, self._type) for x in instance))
class Tuple(Type):
"""A typed tuple.
A correct type is a tuple with the correct length where each element has
the correct type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, tuple)
and len(instance) == len(self._types)
and all(isinstance(x, t) for x, t in zip(instance, self._types)))
class Mapping(_TwoArgumentType):
"""A typed mapping.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
return (isinstance(instance, collections.Mapping)
and all(isinstance(k, key_type) for k in instance.keys())
and all(isinstance(k, value_type) for k in instance.values()))
class Dict(Mapping):
"""A typed dict.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, dict)
and super(Dict, self).__instancecheck__(instance))
def _replace_forward_references(t, context):
"""Replace forward references in the given type."""
if isinstance(t, str):
return context[t]
elif isinstance(t, Type):
return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access
else:
return t
def register_type_abbreviation(name, alias):
"""Register an abbreviation for a type in typecheck tracebacks.
This makes otherwise very long typecheck errors much more readable.
Example:
typecheck.register_type_abbreviation(tf.Dimension, 'tf.Dimension')
Args:
name: type or class to abbreviate.
alias: string alias to substitute.
"""
_TYPE_ABBREVIATIONS[name] = alias
def _type_repr(t):
"""A more succinct repr for typecheck tracebacks."""
string = repr(t)
for type_, alias in _TYPE_ABBREVIATIONS.items():
string = string.replace(repr(type_), alias)
string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string)
string = re.sub(r"typecheck\.(\w+)", r"\1", string)
return string
class Error(TypeError):
"""Exception for typecheck failures."""
def accepts(*types):
"""A decorator which checks the input types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
Returns:
A function to use as a decorator.
"""
def check_accepts(f):
"""Check the types."""
spec = inspect.getargspec(f)
num_function_arguments = len(spec.args)
if len(types) != num_function_arguments:
raise Error(
"Function %r has %d arguments but only %d types were provided in the "
"annotation." % (f, num_function_arguments, len(types)))
if spec.defaults:
num_defaults = len(spec.defaults)
for (name, a, t) in zip(spec.args[-num_defaults:],
spec.defaults,
types[-num_defaults:]):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("default argument value %r of type %r is not an instance "
"of the allowed type %s for the %s argument to %r"
% (a, type(a), _type_repr(allowed_type), name, f))
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
for (a, t) in zip(args, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r" % (a, type(a), _type_repr(allowed_type), f))
return f(*args, **kwds)
return new_f
return check_accepts
def returns(*types):
"""A decorator which checks the return types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
A list of one element corresponds to a single return value.
A list of several elements corresponds to several return values.
Note that a function with no explicit return value has an implicit
NoneType return and should be annotated correspondingly.
Returns:
A function to use as a decorator.
"""
def check_returns(f):
"""Check the types."""
if not types:
raise TypeError("A return type annotation must contain at least one type")
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
return_value = f(*args, **kwds)
if len(types) == 1:
# The function has a single return value.
allowed_type = _replace_forward_references(types[0], f.__globals__)
if not isinstance(return_value, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r"
% (return_value, type(return_value),
_type_repr(allowed_type), f))
else:
if len(return_value) != len(types):
raise Error(
"Function %r has %d return values but only %d types were "
"provided in the annotation." %
(f, len(return_value), len(types)))
for (r, t) in zip(return_value, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(r, allowed_type):
raise Error("%r of type %r is not an instance of allowed type %s "
"for %r" % (r, type(r), _type_repr(allowed_type), f))
return return_value
return new_f
return check_returns
| gpl-3.0 |
iradul/qtwebkit | Tools/QueueStatusServer/model/warninglog.py | 122 | 2147 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from time import time
from datetime import datetime
from google.appengine.ext import db
class WarningLog(db.Model):
date = db.DateTimeProperty(auto_now_add=True)
event = db.StringProperty()
message = db.StringProperty()
attachment_id = db.IntegerProperty()
queue_name = db.StringProperty()
bot_id = db.StringProperty()
@classmethod
def record(cls, event, message=None, attachment_id=None, queue_name=None, bot_id=None):
entity = cls(event=event, message=message, queue_name=queue_name, bot_id=bot_id, attachment_id=attachment_id)
entity.put()
return entity
| gpl-2.0 |
vadimtk/chrome4sdp | tools/git/mass-rename.py | 170 | 1344 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
mass-rename: update source files (gyp lists, #includes) to reflect
a rename. Expects "git diff --cached -M" to list a bunch of renames.
To use:
1) git mv foo1 bar1; git mv foo2 bar2; etc.
2) *without committing*, ./tools/git/mass-rename.py
3) look at git diff (without --cached) to see what the damage is
"""
import os
import subprocess
import sys
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
def main():
popen = subprocess.Popen('git diff --cached --raw -M',
shell=True, stdout=subprocess.PIPE)
out, _ = popen.communicate()
if popen.returncode != 0:
return 1
for line in out.splitlines():
parts = line.split('\t')
if len(parts) != 3:
print 'Skipping: %s -- not a rename?' % parts
continue
attrs, fro, to = parts
if attrs.split()[4].startswith('R'):
subprocess.check_call([
sys.executable,
os.path.join(BASE_DIR, 'move_source_file.py'),
'--already_moved',
'--no_error_for_non_source_file',
fro, to])
else:
print 'Skipping: %s -- not a rename?' % fro
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
yulbryn/armory-leap | python/lib/ws4py/server/geventserver.py | 2 | 3779 | # -*- coding: utf-8 -*-
__doc__ = """
WSGI entities to support WebSocket from within gevent.
Its usage is rather simple:
.. code-block: python
from gevent import monkey; monkey.patch_all()
from ws4py.websocket import EchoWebSocket
from ws4py.server.geventserver import WSGIServer
from ws4py.server.wsgiutils import WebSocketWSGIApplication
server = WSGIServer(('localhost', 9000), WebSocketWSGIApplication(handler_cls=EchoWebSocket))
server.serve_forever()
"""
import logging
import gevent
from gevent.pywsgi import WSGIHandler, WSGIServer as _WSGIServer
from gevent.pool import Pool
from ws4py import format_addresses
from ws4py.server.wsgiutils import WebSocketWSGIApplication
logger = logging.getLogger('ws4py')
__all__ = ['WebSocketWSGIHandler', 'WSGIServer',
'GEventWebSocketPool']
class WebSocketWSGIHandler(WSGIHandler):
"""
A WSGI handler that will perform the :rfc:`6455`
upgrade and handshake before calling the WSGI application.
If the incoming request doesn't have a `'Upgrade'` header,
the handler will simply fallback to the gevent builtin's handler
and process it as per usual.
"""
def run_application(self):
upgrade_header = self.environ.get('HTTP_UPGRADE', '').lower()
if upgrade_header:
# Build and start the HTTP response
self.environ['ws4py.socket'] = self.socket or self.environ['wsgi.input'].rfile._sock
self.result = self.application(self.environ, self.start_response) or []
self.process_result()
del self.environ['ws4py.socket']
self.socket = None
self.rfile.close()
ws = self.environ.pop('ws4py.websocket', None)
if ws:
ws_greenlet = self.server.pool.track(ws)
# issue #170
# in gevent 1.1 socket will be closed once application returns
# so let's wait for websocket handler to finish
ws_greenlet.join()
else:
gevent.pywsgi.WSGIHandler.run_application(self)
class GEventWebSocketPool(Pool):
"""
Simple pool of bound websockets.
Internally it uses a gevent group to track
the websockets. The server should call the ``clear``
method to initiate the closing handshake when the
server is shutdown.
"""
def track(self, websocket):
logger.info("Managing websocket %s" % format_addresses(websocket))
return self.spawn(websocket.run)
def clear(self):
logger.info("Terminating server and all connected websockets")
for greenlet in self:
try:
websocket = greenlet._run.im_self
if websocket:
websocket.close(1001, 'Server is shutting down')
except:
pass
finally:
self.discard(greenlet)
class WSGIServer(_WSGIServer):
handler_class = WebSocketWSGIHandler
def __init__(self, *args, **kwargs):
"""
WSGI server that simply tracks websockets
and send them a proper closing handshake
when the server terminates.
Other than that, the server is the same
as its :class:`gevent.pywsgi.WSGIServer`
base.
"""
_WSGIServer.__init__(self, *args, **kwargs)
self.pool = GEventWebSocketPool()
def stop(self, *args, **kwargs):
self.pool.clear()
_WSGIServer.stop(self, *args, **kwargs)
if __name__ == '__main__':
from ws4py import configure_logger
configure_logger()
from ws4py.websocket import EchoWebSocket
server = WSGIServer(('127.0.0.1', 9000),
WebSocketWSGIApplication(handler_cls=EchoWebSocket))
server.serve_forever()
| gpl-3.0 |
nginnever/zogminer | contrib/devtools/security-check.py | 28 | 6511 | #!/usr/bin/python2
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function,unicode_literals
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split(b'\n'):
if line.startswith(b'Program Headers:'):
in_headers = True
if line == b'':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find(b'Type')
ofs_offset = line.find(b'Offset')
ofs_flags = line.find(b'Flg')
ofs_align = line.find(b'Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == b'GNU_STACK':
have_gnu_stack = True
if b'W' in flags and b'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == b'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
if b'__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
return int(tokens[1],16)
return 0
def check_PE_PIE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
return bool(get_PE_dll_characteristics(executable) & 0x40)
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
return bool(get_PE_dll_characteristics(executable) & 0x100)
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('PIE', check_PE_PIE),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
except IOError:
print('%s: cannot open' % filename)
retval = 1
exit(retval)
| mit |
bugobliterator/ardupilot-chibios | Tools/scripts/decode_devid.py | 8 | 1865 | #!/usr/bin/env python
'''
decode a device ID, such as used for COMPASS_DEV_ID, INS_ACC_ID etc
To understand the devtype you should look at the backend headers for
the sensor library, such as libraries/AP_Compass/AP_Compass_Backend.h
'''
import sys
import optparse
parser = optparse.OptionParser("decode_devid.py")
parser.add_option("-C", "--compass", action='store_true', help='decode compass IDs')
parser.add_option("-I", "--imu", action='store_true', help='decode IMU IDs')
opts, args = parser.parse_args()
if len(args) == 0:
print("Please supply a device ID")
sys.exit(1)
devid=int(args[0])
bus_type=devid & 0x07
bus=(devid>>3) & 0x1F
address=(devid>>8)&0xFF
devtype=(devid>>16)
bustypes = {
1: "I2C",
2: "SPI",
3: "UAVCAN"
}
compass_types = {
0x01 : "DEVTYPE_HMC5883_OLD",
0x07 : "DEVTYPE_HMC5883",
0x02 : "DEVTYPE_LSM303D",
0x04 : "DEVTYPE_AK8963 ",
0x05 : "DEVTYPE_BMM150 ",
0x06 : "DEVTYPE_LSM9DS1",
0x08 : "DEVTYPE_LIS3MDL",
0x09 : "DEVTYPE_AK09916",
0x0A : "DEVTYPE_IST8310",
0x0B : "DEVTYPE_ICM20948",
0x0C : "DEVTYPE_MMC3416",
0x0D : "DEVTYPE_QMC5883L"
}
imu_types = {
0x09 : "DEVTYPE_BMI160",
0x10 : "DEVTYPE_L3G4200D",
0x11 : "DEVTYPE_ACC_LSM303D",
0x12 : "DEVTYPE_ACC_BMA180",
0x13 : "DEVTYPE_ACC_MPU6000",
0x16 : "DEVTYPE_ACC_MPU9250",
0x17 : "DEVTYPE_ACC_IIS328DQ",
0x21 : "DEVTYPE_GYR_MPU6000",
0x22 : "DEVTYPE_GYR_L3GD20",
0x24 : "DEVTYPE_GYR_MPU9250",
0x25 : "DEVTYPE_GYR_I3G4250D"
}
decoded_devname = ""
if opts.compass:
decoded_devname = compass_types.get(devtype, "UNKNOWN")
if opts.imu:
decoded_devname = imu_types.get(devtype, "UNKNOWN")
print("bus_type:%s(%u) bus:%u address:%u devtype:%u(0x%x) %s" % (
bustypes.get(bus_type,"UNKNOWN"), bus_type,
bus, address, devtype, devtype, decoded_devname))
| gpl-3.0 |
google-research/sloe-logistic | sloe_experiments/p_values.py | 1 | 2329 | # coding=utf-8
# Copyright 2021 The SLOE Logistic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run experiment to understand uniformity of p-values generated by SLOE.
Tests the SLOE estimator empirically by computing it
over a bunch of different seeds, and storing in csv files to be analyzed in a
colab.
"""
from absl import app
from absl import flags
import apache_beam as beam
from apache_beam.options import pipeline_options
import numpy as np
import sloe_logistic.sloe_experiments.experiment_helpers as exp_helper
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_sims', 100, 'number of simulations to run')
flags.DEFINE_string('output_path', '/tmp/counts.txt', 'The output file path')
flags.DEFINE_string(
'coverage_target', 'true_preds',
'Which value to check coverage in prediction intervals? Options '
'`true_preds` or `calib_ests`'
)
def run_sim(seed):
"""Runs simulation and computes estimated p-values to compare to uniform."""
# Model parameters
sim_params = exp_helper.SimulationParams.create_from_flags()
sim_params.seed = 201216 + seed
sim = exp_helper.Simulation(sim_params)
x1, y1 = sim.sample()
logit_model = exp_helper.create_inference_model()
logit_model_fit = logit_model.fit(x1, y1)
p_values = logit_model_fit.p_values()
return np.sort(p_values[sim.null_indices()])
def main(unused_argv):
# If you have custom beam options add them here.
beam_options = pipeline_options.PipelineOptions()
with beam.Pipeline(options=beam_options) as pipe:
_ = (
pipe
| beam.Create(range(FLAGS.num_sims))
| beam.Map(run_sim)
| beam.Map(exp_helper.numpy_array_to_csv)
| beam.Reshuffle()
|
'WriteToText' >> beam.io.WriteToText(FLAGS.output_path, num_shards=5))
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/selenium/webdriver/ie/service.py | 17 | 3917 | #!/usr/bin/python
#
# Copyright 2012 Webdriver_name committers
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from subprocess import PIPE
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
class Service(object):
"""
Object that manages the starting and stopping of the IEDriver
"""
def __init__(self, executable_path, port=0, host=None, log_level=None, log_file=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the IEDriver
- port : Port the service is running on
- host : IP address the service port is bound
- log_level : Level of logging of service, may be "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE".
Default is "FATAL".
- log_file : Target of logging of service, may be "stdout", "stderr" or file path.
Default is "stdout"."""
self.port = port
self.path = executable_path
if self.port == 0:
self.port = utils.free_port()
self.host = host
self.log_level = log_level
self.log_file = log_file
def start(self):
"""
Starts the IEDriver Service.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
cmd = [self.path, "--port=%d" % self.port]
if self.host is not None:
cmd.append("--host=%s" % self.host)
if self.log_level is not None:
cmd.append("--log-level=%s" % self.log_level)
if self.log_file is not None:
cmd.append("--log-file=%s" % self.log_file)
self.process = subprocess.Popen(cmd,
stdout=PIPE, stderr=PIPE)
except TypeError:
raise
except:
raise WebDriverException(
"IEDriver executable needs to be available in the path. \
Please download from http://selenium-release.storage.googleapis.com/index.html\
and read up at http://code.google.com/p/selenium/wiki/InternetExplorerDriver")
count = 0
while not utils.is_url_connectable(self.port):
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to the IEDriver")
def stop(self):
"""
Tells the IEDriver to stop and cleans up the process
"""
#If its dead dont worry
if self.process is None:
return
#Tell the Server to die!
try:
from urllib import request as url_request
except ImportError:
import urllib2 as url_request
url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port)
count = 0
while utils.is_connectable(self.port):
if count == 30:
break
count += 1
time.sleep(1)
#Tell the Server to properly die in case
try:
if self.process:
self.process.kill()
self.process.wait()
except WindowsError:
# kill may not be available under windows environment
pass
| agpl-3.0 |
callen/Alky-Reborn | Convertor/Targets/Linux/LinuxLinker.py | 2 | 4128 | # Copyright 2006 Falling Leaf Systems, LLC.
# Refer to LICENSE in the root directory.
import os
loweralpha = 'abcdefghijklmnopqrstuvwxyz'
class LinuxLinker:
def __init__(self, source, dest, linkwrapper, converted):
self.source = source
self.dest = dest
self.linkwrapper = linkwrapper
self.converted = converted
def createLinkerscript(self):
ldscript = os.popen('ld -zcombreloc --verbose', 'r').read()
ldscript = ldscript.split('==================================================')[1]
ldscript = ldscript[:ldscript.rfind('}')] # Chop off last curly brace
phdrs = []
phdrs.append('headers PT_PHDR PHDRS;')
phdrs.append('interp PT_INTERP;')
phdrs.append('text PT_LOAD FILEHDR PHDRS;')
phdrs.append('data PT_LOAD;')
phdrs.append('dynamic PT_DYNAMIC;')
seg_map = dict(
interp=('.interp',),
text=(
'.interp', '.note.ABI-tag', '.hash', '.dynsym',
'.dynstr', '.gnu.version', '.gnu.version_r',
'.rel.got', '.rel.bss', '.rel.plt', '.init',
'.plt', '.text', '.fini', '.rodata'
),
data=('.dynamic', '.got', '.got.plt', '.data', '.bss'),
dynamic=('.dynamic',),
)
prev = ldscript.find('SECTIONS')
while True:
curly = ldscript.find('}', prev)
if curly < 0:
break
lcurly = ldscript.rfind('{', 0, curly)
colon = ldscript.rfind(':', 0, curly)
dot = ldscript.rfind('.', 0, lcurly)
slash = ldscript.rfind('/', 0, lcurly)
semi = ldscript.rfind(';', 0, curly)
curly += 1
prev = curly
if slash > dot:
continue
while True:
newdot = ldscript.rfind('.', 0, dot)
cut = ldscript[newdot+1:dot].lower()
right = True
for char in cut:
if char in loweralpha:
continue
else:
right = False
break
if not right:
break
dot = newdot
if not (curly > lcurly > colon > dot > semi):
continue
segname = ldscript[dot:colon].strip().split(' ', 1)[0]
segstr = ''
for seg in seg_map:
seglist = seg_map[seg]
if segname in seglist:
segstr += ' :%s' % seg
if segstr:
ldscript = ldscript[:curly] + segstr + ldscript[curly:]
prev += 6
segs = []
i = 0
for section in self.dest.sections:
phdrs.append('alky%i PT_LOAD;' % i)
segs.append(' %s 0x%08x : { *(%s) } :alky%i' % (section.name, section.addr, section.name, i))
i += 1
ldscript += '\n'.join(segs)
ldscript += '\n}\n'
ldscript = list(ldscript.split('SECTIONS', 1))
ldscript[0] += '\nPHDRS {\n %s\n}\n' % '\n '.join(phdrs)
ldscript = 'SECTIONS'.join(ldscript)
return ldscript
def output(self, filename):
file('/tmp/linkerscript.ld', 'w').write(self.createLinkerscript())
ld = []
ld.append('ld')
ld.append('-o ' + filename)
# Define linker, output type and that we want the header frame
ld.append('-eh-frame-hdr -m elf_i386 -dynamic-linker /lib/ld-linux.so.2')
# Basic stdlib stuff
ld.append('/usr/lib/crt1.o')
ld.append('/usr/lib/crti.o')
# Ask gcc where gcc.a is located, and get the dirname from that
ld.append('-L`dirname \`gcc -print-libgcc-file-name\``')
ld.append('-L/usr/lib')
# Use our linkerscript
ld.append('-T /tmp/linkerscript.ld')
ld.append('-lgcc --as-needed -lgcc_s --no-as-needed -lc -lgcc')
ld.append('--as-needed -lgcc_s --no-as-needed ')
ld.append('/usr/lib/crtn.o -ldl')
ld.append('--export-dynamic')
ld.append(self.converted)
ld.append(self.linkwrapper)
print ' '.join(ld)
if os.system(' '.join(ld)) != 0:
return False
return True
"""
def output_gcc(self, filename):
file('/tmp/linkerscript.ld', 'w').write(self.createLinkerscript())
ld = []
ld.append('gcc')
ld.append('-o ' + filename)
ld.append(self.linkwrapper)
ld.append(self.converted)
#for section in self.dest.sections:
# ld.append('-Wl,--section-start=%s=0x%08X' % (section.name, section.addr))
ld.append('-ldl')
ld.append('-Xlinker -T/tmp/linkerscript.ld')
# ld.append('-e _start')
print ' '.join(ld)
if os.system(' '.join(ld)) != 0:
return False
return True
"""
| lgpl-3.0 |
arrabito/DIRAC | ResourceStatusSystem/Utilities/CSHelpers.py | 1 | 11101 | """ CSHelpers
Module containing functions interacting with the CS and useful for the RSS
modules.
"""
import errno
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getGOCSiteName
from DIRAC.ResourceStatusSystem.Utilities import Utils
from DIRAC.Resources.Storage.StorageElement import StorageElement
__RCSID__ = '$Id$'
def warmUp():
"""
gConfig has its own dark side, it needs some warm up phase.
"""
from DIRAC.ConfigurationSystem.private.Refresher import gRefresher
gRefresher.refreshConfigurationIfNeeded()
## Main functions ##############################################################
def getSites():
"""
Gets all sites from /Resources/Sites
"""
_basePath = 'Resources/Sites'
sites = []
domainNames = gConfig.getSections(_basePath)
if not domainNames['OK']:
return domainNames
domainNames = domainNames['Value']
for domainName in domainNames:
domainSites = gConfig.getSections('%s/%s' % (_basePath, domainName))
if not domainSites['OK']:
return domainSites
domainSites = domainSites['Value']
sites.extend(domainSites)
# Remove duplicated ( just in case )
sites = list(set(sites))
return S_OK(sites)
def getGOCSites(diracSites=None):
if diracSites is None:
diracSites = getSites()
if not diracSites['OK']:
return diracSites
diracSites = diracSites['Value']
gocSites = []
for diracSite in diracSites:
gocSite = getGOCSiteName(diracSite)
if not gocSite['OK']:
continue
gocSites.append(gocSite['Value'])
return S_OK(list(set(gocSites)))
def getDomainSites():
"""
Gets all sites from /Resources/Sites
"""
_basePath = 'Resources/Sites'
sites = {}
domainNames = gConfig.getSections(_basePath)
if not domainNames['OK']:
return domainNames
domainNames = domainNames['Value']
for domainName in domainNames:
domainSites = gConfig.getSections('%s/%s' % (_basePath, domainName))
if not domainSites['OK']:
return domainSites
domainSites = domainSites['Value']
sites[domainName] = domainSites
return S_OK(sites)
def getResources():
"""
Gets all resources
"""
resources = []
ses = getStorageElements()
if ses['OK']:
resources = resources + ses['Value']
fts = getFTS()
if fts['OK']:
resources = resources + fts['Value']
fc = getFileCatalogs()
if fc['OK']:
resources = resources + fc['Value']
ce = getComputingElements()
if ce['OK']:
resources = resources + ce['Value']
return S_OK(resources)
def getNodes():
"""
Gets all nodes
"""
nodes = []
queues = getQueues()
if queues['OK']:
nodes = nodes + queues['Value']
return S_OK(nodes)
################################################################################
def getStorageElements():
"""
Gets all storage elements from /Resources/StorageElements
"""
_basePath = 'Resources/StorageElements'
seNames = gConfig.getSections(_basePath)
return seNames
def getStorageElementsHosts(seNames=None):
""" Get the hosts of the Storage Elements
"""
seHosts = []
if seNames is None:
seNames = getStorageElements()
if not seNames['OK']:
return seNames
seNames = seNames['Value']
for seName in seNames:
seHost = getSEHost(seName)
if not seHost['OK']:
gLogger.warn("Could not get SE Host", "SE: %s" % seName)
continue
if seHost['Value']:
seHosts.append(seHost['Value'])
return S_OK(list(set(seHosts)))
def _getSEParameters(seName):
se = StorageElement(seName, hideExceptions=True)
seParameters = S_ERROR(errno.ENODATA, 'No SE parameters obtained')
pluginsList = se.getPlugins()
if not pluginsList['OK']:
gLogger.warn(pluginsList['Message'], "SE: %s" % seName)
return pluginsList
pluginsList = pluginsList['Value']
# Put the srm capable protocol first, but why doing that is a
# mystery that will eventually need to be sorted out...
for plugin in ('GFAL2_SRM2', 'SRM2'):
if plugin in pluginsList:
pluginsList.remove(plugin)
pluginsList.insert(0, plugin)
for plugin in pluginsList:
seParameters = se.getStorageParameters(plugin)
if seParameters['OK']:
break
return seParameters
def getSEToken(seName):
""" Get StorageElement token
"""
seParameters = _getSEParameters(seName)
if not seParameters['OK']:
gLogger.warn("Could not get SE parameters", "SE: %s" % seName)
return seParameters
return S_OK(seParameters['Value']['SpaceToken'])
def getSEHost(seName):
""" Get StorageElement host name
"""
seParameters = _getSEParameters(seName)
if not seParameters['OK']:
gLogger.warn("Could not get SE parameters", "SE: %s" % seName)
return seParameters
return S_OK(seParameters['Value']['Host'])
def getStorageElementEndpoint(seName):
""" Get endpoint as combination of host, port, wsurl
"""
seParameters = _getSEParameters(seName)
if not seParameters['OK']:
gLogger.warn("Could not get SE parameters", "SE: %s" % seName)
return seParameters
if seParameters['Value']['Protocol'].lower() == 'srm':
# we need to construct the URL with httpg://
host = seParameters['Value']['Host']
port = seParameters['Value']['Port']
wsurl = seParameters['Value']['WSUrl']
# MAYBE wusrl is not defined
if host and port:
url = 'httpg://%s:%s%s' % (host, port, wsurl)
url = url.replace('?SFN=', '')
return S_OK(url)
else:
return S_OK(seParameters['Value']['URLBase'])
return S_ERROR((host, port, wsurl))
def getStorageElementEndpoints(storageElements=None):
""" get the endpoints of the Storage ELements
"""
if storageElements is None:
storageElements = getStorageElements()
if not storageElements['OK']:
return storageElements
storageElements = storageElements['Value']
storageElementEndpoints = []
for se in storageElements:
seEndpoint = getStorageElementEndpoint(se)
if not seEndpoint['OK']:
continue
storageElementEndpoints.append(seEndpoint['Value'])
return S_OK(list(set(storageElementEndpoints)))
def getFTS():
"""
Gets all FTS endpoints
"""
# FIXME: FTS2 will be deprecated (first 2 lines that follow)
ftsEndpoints = gConfig.getValue('Resources/FTSEndpoints/Default/FTSEndpoint', [])
ftsEndpoints += _getFTSEndpoints('Resources/FTSEndpoints/FTS2')
ftsEndpoints += _getFTSEndpoints()
return S_OK(ftsEndpoints)
def _getFTSEndpoints(basePath='Resources/FTSEndpoints/FTS3'):
"""
Gets all FTS endpoints that are in CS
"""
result = gConfig.getOptions(basePath)
if result['OK']:
return result['Value']
return []
def getSpaceTokenEndpoints():
""" Get Space Token Endpoints """
return Utils.getCSTree('Shares/Disk')
def getFileCatalogs():
"""
Gets all storage elements from /Resources/FileCatalogs
"""
_basePath = 'Resources/FileCatalogs'
fileCatalogs = gConfig.getSections(_basePath)
return fileCatalogs
def getComputingElements():
"""
Gets all computing elements from /Resources/Sites/<>/<>/CE
"""
_basePath = 'Resources/Sites'
ces = []
domainNames = gConfig.getSections(_basePath)
if not domainNames['OK']:
return domainNames
domainNames = domainNames['Value']
for domainName in domainNames:
domainSites = gConfig.getSections('%s/%s' % (_basePath, domainName))
if not domainSites['OK']:
return domainSites
domainSites = domainSites['Value']
for site in domainSites:
siteCEs = gConfig.getSections('%s/%s/%s/CEs' % (_basePath, domainName, site))
if not siteCEs['OK']:
# return siteCEs
gLogger.error(siteCEs['Message'])
continue
siteCEs = siteCEs['Value']
ces.extend(siteCEs)
# Remove duplicated ( just in case )
ces = list(set(ces))
return S_OK(ces)
# #
# Quick functions implemented for Andrew
def getSiteComputingElements(siteName):
"""
Gets all computing elements from /Resources/Sites/<>/<siteName>/CE
"""
_basePath = 'Resources/Sites'
domainNames = gConfig.getSections(_basePath)
if not domainNames['OK']:
return domainNames
domainNames = domainNames['Value']
for domainName in domainNames:
ces = gConfig.getValue('%s/%s/%s/CE' % (_basePath, domainName, siteName), '')
if ces:
return ces.split(', ')
return []
def getSiteStorageElements(siteName):
"""
Gets all computing elements from /Resources/Sites/<>/<siteName>/SE
"""
_basePath = 'Resources/Sites'
domainNames = gConfig.getSections(_basePath)
if not domainNames['OK']:
return domainNames
domainNames = domainNames['Value']
for domainName in domainNames:
ses = gConfig.getValue('%s/%s/%s/SE' % (_basePath, domainName, siteName), '')
if ses:
return ses.split(', ')
return []
def getSiteElements(siteName):
"""
Gets all the computing and storage elements for a given site
"""
resources = []
ses = getSiteStorageElements(siteName)
resources = resources + ses
ce = getSiteComputingElements(siteName)
resources = resources + ce
return S_OK(resources)
def getQueues():
"""
Gets all computing elements from /Resources/Sites/<>/<>/CE/Queues
"""
_basePath = 'Resources/Sites'
queues = []
domainNames = gConfig.getSections(_basePath)
if not domainNames['OK']:
return domainNames
domainNames = domainNames['Value']
for domainName in domainNames:
domainSites = gConfig.getSections('%s/%s' % (_basePath, domainName))
if not domainSites['OK']:
return domainSites
domainSites = domainSites['Value']
for site in domainSites:
siteCEs = gConfig.getSections('%s/%s/%s/CEs' % (_basePath, domainName, site))
if not siteCEs['OK']:
# return siteCEs
gLogger.error(siteCEs['Message'])
continue
siteCEs = siteCEs['Value']
for siteCE in siteCEs:
siteQueue = gConfig.getSections('%s/%s/%s/CEs/%s/Queues' % (_basePath, domainName, site, siteCE))
if not siteQueue['OK']:
# return siteQueue
gLogger.error(siteQueue['Message'])
continue
siteQueue = siteQueue['Value']
queues.extend(siteQueue)
# Remove duplicated ( just in case )
queues = list(set(queues))
return S_OK(queues)
## /Registry ###################################################################
def getRegistryUsers():
"""
Gets all users from /Registry/Users
"""
_basePath = 'Registry/Users'
registryUsers = {}
userNames = gConfig.getSections(_basePath)
if not userNames['OK']:
return userNames
userNames = userNames['Value']
for userName in userNames:
# returns { 'Email' : x, 'DN': y, 'CA' : z }
userDetails = gConfig.getOptionsDict('%s/%s' % (_basePath, userName))
if not userDetails['OK']:
return userDetails
registryUsers[userName] = userDetails['Value']
return S_OK(registryUsers)
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 |
yufengg/tensorflow | tensorflow/python/ops/distributions/kullback_leibler.py | 29 | 5401 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registration and usage mechanisms for KL-divergences."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import tf_inspect
_DIVERGENCES = {}
def _registered_kl(type_a, type_b):
"""Get the KL function registered for classes a and b."""
hierarchy_a = tf_inspect.getmro(type_a)
hierarchy_b = tf_inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn
def kl_divergence(distribution_a, distribution_b,
allow_nan_stats=True, name=None):
"""Get the KL-divergence KL(distribution_a || distribution_b).
If there is no KL method registered specifically for `type(distribution_a)`
and `type(distribution_b)`, then the class hierarchies of these types are
searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(distribution_a)`).
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Returns:
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of `distribution_a` and `distribution_b`.
"""
kl_fn = _registered_kl(type(distribution_a), type(distribution_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(distribution_a || distribution_b) registered for distribution_a "
"type %s and distribution_b type %s"
% (type(distribution_a).__name__, type(distribution_b).__name__))
with ops.name_scope("KullbackLeibler"):
kl_t = kl_fn(distribution_a, distribution_b, name=name)
if allow_nan_stats:
return kl_t
# Check KL for NaNs
kl_t = array_ops.identity(kl_t, name="kl")
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_not(
math_ops.reduce_any(math_ops.is_nan(kl_t))),
["KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan_stats=False). Values:"
% (distribution_a.name, distribution_b.name), kl_t])]):
return array_ops.identity(kl_t, name="checked_kl")
class RegisterKL(object):
"""Decorator to register a KL divergence implementation function.
Usage:
@distributions.RegisterKL(distributions.Normal, distributions.Normal)
def _kl_normal_mvn(norm_a, norm_b):
# Return KL(norm_a || norm_b)
"""
def __init__(self, dist_cls_a, dist_cls_b):
"""Initialize the KL registrar.
Args:
dist_cls_a: the class of the first argument of the KL divergence.
dist_cls_b: the class of the second argument of the KL divergence.
"""
self._key = (dist_cls_a, dist_cls_b)
def __call__(self, kl_fn):
"""Perform the KL registration.
Args:
kl_fn: The function to use for the KL divergence.
Returns:
kl_fn
Raises:
TypeError: if kl_fn is not a callable.
ValueError: if a KL divergence function has already been registered for
the given argument classes.
"""
if not callable(kl_fn):
raise TypeError("kl_fn must be callable, received: %s" % kl_fn)
if self._key in _DIVERGENCES:
raise ValueError("KL(%s || %s) has already been registered to: %s"
% (self._key[0].__name__, self._key[1].__name__,
_DIVERGENCES[self._key]))
_DIVERGENCES[self._key] = kl_fn
return kl_fn
| apache-2.0 |
fkorotkov/pants | tests/python/pants_test/java/jar/test_manifest.py | 33 | 1127 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.java.jar.manifest import Manifest
class TestManifest(unittest.TestCase):
def test_isempty(self):
manifest = Manifest()
self.assertTrue(manifest.is_empty())
manifest.addentry('Header', 'value')
self.assertFalse(manifest.is_empty())
def test_addentry(self):
manifest = Manifest()
manifest.addentry('Header', 'value')
self.assertEquals(
'Header: value\n', manifest.contents())
def test_too_long_entry(self):
manifest = Manifest()
with self.assertRaises(ValueError):
manifest.addentry(
'1234567890123456789012345678901234567890'
'12345678901234567890123456789', 'value')
def test_nonascii_char(self):
manifest = Manifest()
with self.assertRaises(UnicodeEncodeError):
manifest.addentry('X-Copyright', '© 2015')
| apache-2.0 |
40123148/w17b | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/surface.py | 603 | 3844 | from browser import document, html, window
from javascript import console, JSConstructor
from .rect import Rect
#import pygame.rect
canvas_ID=1
_canvas_id=None
class Surface:
def __init__(self, dim=[], depth=16, surf=None):
if surf is None:
self._depth=depth
self._canvas=html.CANVAS(width=dim[0], height=dim[1])
elif isinstance(surf, Surface):
self._canvas=surf.copy()
#self._width=surf.get_width()
#self._height=surf.get_height()
elif isinstance(surf, html.CANVAS):
self._canvas=surf
#self._width=surf.style.width
#self._height=surf.style.height
self._context=self._canvas.getContext('2d')
self._canvas.id='layer_%s' % canvas_ID
#setattr(self._canvas.style, 'z-index',canvas_ID)
#setattr(self._canvas.style, 'position', 'relative')
#setattr(self._canvas.style, 'left', '0px')
#setattr(self._canvas.style, 'top', '0px')
canvas_ID+=1
#document['pydiv'] <= self._canvas
def blit(self, source, dest, area=None, special_flags=0):
#if area is None and isinstance(source, str):
# _img = JSConstructor(window.Image)()
# _img.src = source
# def img_onload(*args):
# self._context.drawImage(_img, dest[0], dest[1])
# _img.onload=img_onload
# _img.width, _img.height
global _canvas_id
if _canvas_id is None:
try:
_canvas_id=document.get(selector='canvas')[0].getAttribute('id')
except:
pass
if self._canvas.id == _canvas_id:
self._canvas.width=self._canvas.width
if area is None:
#lets set area to the size of the source
if isinstance(source, Surface):
area=[(0, 0), (source.canvas.width, source.canvas.height)]
if isinstance(source, Surface):
_ctx=source.canvas.getContext('2d')
_subset=_ctx.getImageData(area[0][0],area[0][1], area[1][0], area[1][1])
# we want just a subset of the source image copied
self._context.putImageData(_subset, dest[0], dest[1])
#print(dest[0], dest[1], _subset.width, _subset.height)
return Rect(dest[0], dest[1], dest[0]+_subset.width, dest[1]+_subset.height)
def convert(self, surface=None):
## fix me...
return self
def copy(self):
_imgdata=self._context.toDataURL('image/png')
_canvas=html.CANVAS(width=self._canvas.width,height=self._canvas.height)
_ctx=_canvas.getContext('2d')
_ctx.drawImage(_imgdata, 0, 0)
return _canvas
def fill(self, color):
""" fill canvas with this color """
self._context.fillStyle="rgb(%s,%s,%s)" % color
#console.log(self._canvas.width, self._canvas.height, self._context.fillStyle)
self._context.fillRect(0,0,self._canvas.width,self._canvas.height)
#self._context.fill()
@property
def height(self):
return int(self._canvas.height)
@property
def width(self):
return int(self._canvas.width)
@property
def canvas(self):
return self._canvas
def scroll(self, dx=0, dy=0):
_imgdata=self._context.toDataURL('image/png')
self._context.drawImage(_imgdata, dx, dy)
def get_at(self, pos):
#returns rgb
return self._context.getImageData(pos[0], pos[1],1,1).data
def set_at(self, pos, color):
self._context.fillStyle='rgb(%s,%s,%s)' % color
self._context.fillRect(pos[0], pos[1], 1, 1)
def get_size(self):
return self._canvas.width, self._canvas.height
def get_width(self):
return self._canvas.width
def get_height(self):
return self._canvas.height
def get_rect(self, centerx=None, centery=None):
return Rect(0, 0, self._canvas.width, self._canvas.height)
def set_colorkey(self, key, val):
pass
| agpl-3.0 |
cneill/designate-testing | designate/objects/zone_transfer_request.py | 5 | 2204 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Graham Hayes <graham.hayes@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.objects import base
class ZoneTransferRequest(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject,):
FIELDS = {
'key': {
'schema': {
"type": "string",
"maxLength": 160
},
},
'domain_id': {
'schema': {
"type": "string",
"description": "Zone identifier",
"format": "uuid"
},
"immutable": True
},
'description': {
'schema': {
"type": ["string", "null"],
"maxLength": 160
}
},
'tenant_id': {
'schema': {
'type': 'string',
},
'read_only': True
},
'target_tenant_id': {
'schema': {
'type': ['string', 'null'],
},
'immutable': True
},
'status': {
'schema': {
"type": "string",
"enum": ["ACTIVE", "PENDING", "DELETED", "ERROR", "COMPLETE"],
}
},
'domain_name': {
'schema': {
"type": ["string", "null"],
"maxLength": 255,
},
'read_only': True
},
}
STRING_KEYS = [
'id', 'domain_id', 'domain_name', 'target_tenant_id'
]
class ZoneTransferRequestList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = ZoneTransferRequest
| apache-2.0 |
rhoscanner-team/pcd-plotter | delaunay_example.py | 1 | 1435 | import numpy as np
from scipy.spatial import Delaunay
points = np.random.rand(30, 2) # 30 points in 2-d
tri = Delaunay(points)
# Make a list of line segments:
# edge_points = [ ((x1_1, y1_1), (x2_1, y2_1)),
# ((x1_2, y1_2), (x2_2, y2_2)),
# ... ]
edge_points = []
edges = set()
def add_edge(i, j):
"""Add a line between the i-th and j-th points, if not in the list already"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(points[ [i, j] ])
# loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
add_edge(ia, ib)
add_edge(ib, ic)
add_edge(ic, ia)
# plot it: the LineCollection is just a (maybe) faster way to plot lots of
# lines at once
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
lines = LineCollection(edge_points)
plt.figure()
plt.title('Delaunay triangulation')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
# -- the same stuff for the convex hull
edges = set()
edge_points = []
for ia, ib in tri.convex_hull:
add_edge(ia, ib)
lines = LineCollection(edge_points)
plt.figure()
plt.title('Convex hull')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
plt.show()
| gpl-2.0 |
bobthekingofegypt/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_handshake.py | 452 | 7134 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake._base module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.common import ExtensionParameter
from mod_pywebsocket.common import ExtensionParsingException
from mod_pywebsocket.common import format_extensions
from mod_pywebsocket.common import parse_extensions
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import validate_subprotocol
class ValidateSubprotocolTest(unittest.TestCase):
"""A unittest for validate_subprotocol method."""
def test_validate_subprotocol(self):
# Should succeed.
validate_subprotocol('sample')
validate_subprotocol('Sample')
validate_subprotocol('sample\x7eprotocol')
# Should fail.
self.assertRaises(HandshakeException,
validate_subprotocol,
'')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x09protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x20protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
_TEST_TOKEN_EXTENSION_DATA = [
('foo', [('foo', [])]),
('foo; bar', [('foo', [('bar', None)])]),
('foo; bar=baz', [('foo', [('bar', 'baz')])]),
('foo; bar=baz; car=cdr', [('foo', [('bar', 'baz'), ('car', 'cdr')])]),
('foo; bar=baz, car; cdr',
[('foo', [('bar', 'baz')]), ('car', [('cdr', None)])]),
('a, b, c, d',
[('a', []), ('b', []), ('c', []), ('d', [])]),
]
_TEST_QUOTED_EXTENSION_DATA = [
('foo; bar=""', [('foo', [('bar', '')])]),
('foo; bar=" baz "', [('foo', [('bar', ' baz ')])]),
('foo; bar=",baz;"', [('foo', [('bar', ',baz;')])]),
('foo; bar="\\\r\\\nbaz"', [('foo', [('bar', '\r\nbaz')])]),
('foo; bar="\\"baz"', [('foo', [('bar', '"baz')])]),
('foo; bar="\xbbbaz"', [('foo', [('bar', '\xbbbaz')])]),
]
_TEST_REDUNDANT_TOKEN_EXTENSION_DATA = [
('foo \t ', [('foo', [])]),
('foo; \r\n bar', [('foo', [('bar', None)])]),
('foo; bar=\r\n \r\n baz', [('foo', [('bar', 'baz')])]),
('foo ;bar = baz ', [('foo', [('bar', 'baz')])]),
('foo,bar,,baz', [('foo', []), ('bar', []), ('baz', [])]),
]
_TEST_REDUNDANT_QUOTED_EXTENSION_DATA = [
('foo; bar="\r\n \r\n baz"', [('foo', [('bar', ' baz')])]),
]
class ExtensionsParserTest(unittest.TestCase):
def _verify_extension_list(self, expected_list, actual_list):
"""Verifies that ExtensionParameter objects in actual_list have the
same members as extension definitions in expected_list. Extension
definition used in this test is a pair of an extension name and a
parameter dictionary.
"""
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
(name, parameters) = expected
self.assertEqual(name, actual._name)
self.assertEqual(parameters, actual._parameters)
def test_parse(self):
for formatted_string, definition in _TEST_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_quoted_data(self):
for formatted_string, definition in _TEST_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_quoted_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_bad_data(self):
_TEST_BAD_EXTENSION_DATA = [
('foo; ; '),
('foo; a a'),
('foo foo'),
(',,,'),
('foo; bar='),
('foo; bar="hoge'),
('foo; bar="a\r"'),
('foo; bar="\\\xff"'),
('foo; bar=\ra'),
]
for formatted_string in _TEST_BAD_EXTENSION_DATA:
self.assertRaises(
ExtensionParsingException, parse_extensions, formatted_string)
class FormatExtensionsTest(unittest.TestCase):
def test_format_extensions(self):
for formatted_string, definitions in _TEST_TOKEN_EXTENSION_DATA:
extensions = []
for definition in definitions:
(name, parameters) = definition
extension = ExtensionParameter(name)
extension._parameters = parameters
extensions.append(extension)
self.assertEqual(
formatted_string, format_extensions(extensions))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
StefanRijnhart/odoo | addons/lunch/tests/__init__.py | 172 | 1107 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_lunch
checks = [
test_lunch,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mathieugouin/txtWeb | mailer.py | 1 | 4448 | # User libs
import mgouinlib as mgl
import sendmail as sendmail
# System libs
import webapp2
#*******************************************************************************
# http://localhost:8080/mailer?from=me@acme.com&to=mgouin@gmail.com&message=hello&subject=test&name=Mat%20Gouin&redirect=redir
# http://mgouin.appspot.com/mailer?from=me@acme.com&to=mgouin@gmail.com&message=hello&subject=test&name=Mat%20Gouin&redirect=redir
#
# NOTES:
# Need post form method
#*******************************************************************************
def validateInput(args):
ret = True
requiredArgs = [
"to",
"from"
]
for a in requiredArgs:
if not a in args.keys() or len(args[a]) == 0:
ret = False
break
return ret
class MainPage(webapp2.RequestHandler):
#********************************************************************************
def commonHandler(self):
mgl.myLog("commonHandler()")
lines = []
args = {}
for argKey in self.request.arguments():
argVal = self.request.get(argKey)
args[argKey] = argVal
lines.append(argKey + " = " + argVal)
mgl.myLog(args)
if not validateInput(args):
lines.extend([
"Bad attributes: email is not sent",
])
# Good attributes
else:
#Real email sending
sendmail.sendMail(
self.request.get("from"),
self.request.get("to"),
self.request.get("subject"),
self.request.get("message"),
self.request.get("name"),
#args["from"], args["to"], args["subject"], args["message"], args["name"]
)
#TBD test
#sendmail.sendMailTest()
redirectURL = self.request.get("redirect").encode('ascii', 'ignore')
if len(redirectURL) > 0:
mgl.myLog("Performing redirection to " + redirectURL)
self.redirect(redirectURL)
else:
# Normal page display
self.response.headers["Content-Type"] = "text/html; charset=utf-8"
self.response.write('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"\n')
self.response.write(' "http://www.w3.org/TR/html4/loose.dtd">\n')
self.response.write('<html>\n' +
'<head>\n' +
'<title>Mailer</title>\n' +
'<meta http-equiv="Content-type" content="text/html;charset=UTF-8">\n' +
'</head>\n' +
'<body>\n\n')
for l in lines:
mgl.myLog(l)
self.response.write(mgl.processLine(l))
self.response.write('\n')
self.response.write('<div><img alt="Powered by Google" src="powered-by-google-on-white.png" width="104" height="16"></div>\n')
self.response.write('\n')
self.response.write(r"""<script type="text/javascript">""" + "\n")
self.response.write(r""" (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){""" + "\n")
self.response.write(r""" (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),""" + "\n")
self.response.write(r""" m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)""" + "\n")
self.response.write(r""" })(window,document,'script','//www.google-analytics.com/analytics.js','ga');""" + "\n")
self.response.write(r""" ga('create', 'UA-1787000-3', 'mgouin.appspot.com');""" + "\n")
self.response.write(r""" ga('send', 'pageview');""" + "\n")
self.response.write(r"""</script>""" + "\n")
self.response.write("\n</body>\n</html>\n")
#********************************************************************************
def post(self):
mgl.myLog("********************************************************************************")
mgl.myLog("post()")
self.commonHandler()
#********************************************************************************
def get(self):
mgl.myLog("********************************************************************************")
mgl.myLog("get()")
self.commonHandler()
app = webapp2.WSGIApplication([(r'/.*', MainPage)], debug=True)
| gpl-2.0 |
nozuono/calibre-webserver | src/calibre/ebooks/oeb/transforms/embed_fonts.py | 6 | 9738 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import logging
from collections import defaultdict
import cssutils
from lxml import etree
from calibre import guess_type
from calibre.ebooks.oeb.base import XPath, CSS_MIME, XHTML
from calibre.ebooks.oeb.transforms.subset import get_font_properties, find_font_face_rules, elem_style
from calibre.utils.filenames import ascii_filename
from calibre.utils.fonts.scanner import font_scanner, NoFonts
def used_font(style, embedded_fonts):
ff = [unicode(f) for f in style.get('font-family', []) if unicode(f).lower() not in {
'serif', 'sansserif', 'sans-serif', 'fantasy', 'cursive', 'monospace'}]
if not ff:
return False, None
lnames = {unicode(x).lower() for x in ff}
matching_set = []
# Filter on font-family
for ef in embedded_fonts:
flnames = {x.lower() for x in ef.get('font-family', [])}
if not lnames.intersection(flnames):
continue
matching_set.append(ef)
if not matching_set:
return True, None
# Filter on font-stretch
widths = {x:i for i, x in enumerate(('ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'
))}
width = widths[style.get('font-stretch', 'normal')]
for f in matching_set:
f['width'] = widths[style.get('font-stretch', 'normal')]
min_dist = min(abs(width-f['width']) for f in matching_set)
if min_dist > 0:
return True, None
nearest = [f for f in matching_set if abs(width-f['width']) ==
min_dist]
if width <= 4:
lmatches = [f for f in nearest if f['width'] <= width]
else:
lmatches = [f for f in nearest if f['width'] >= width]
matching_set = (lmatches or nearest)
# Filter on font-style
fs = style.get('font-style', 'normal')
matching_set = [f for f in matching_set if f.get('font-style', 'normal') == fs]
# Filter on font weight
fw = int(style.get('font-weight', '400'))
matching_set = [f for f in matching_set if f.get('weight', 400) == fw]
if not matching_set:
return True, None
return True, matching_set[0]
class EmbedFonts(object):
'''
Embed all referenced fonts, if found on system. Must be called after CSS flattening.
'''
def __call__(self, oeb, log, opts):
self.oeb, self.log, self.opts = oeb, log, opts
self.sheet_cache = {}
self.find_style_rules()
self.find_embedded_fonts()
self.parser = cssutils.CSSParser(loglevel=logging.CRITICAL, log=logging.getLogger('calibre.css'))
self.warned = set()
self.warned2 = set()
for item in oeb.spine:
if not hasattr(item.data, 'xpath'):
continue
sheets = []
for href in XPath('//h:link[@href and @type="text/css"]/@href')(item.data):
sheet = self.oeb.manifest.hrefs.get(item.abshref(href), None)
if sheet is not None:
sheets.append(sheet)
if sheets:
self.process_item(item, sheets)
def find_embedded_fonts(self):
'''
Find all @font-face rules and extract the relevant info from them.
'''
self.embedded_fonts = []
for item in self.oeb.manifest:
if not hasattr(item.data, 'cssRules'):
continue
self.embedded_fonts.extend(find_font_face_rules(item, self.oeb))
def find_style_rules(self):
'''
Extract all font related style information from all stylesheets into a
dict mapping classes to font properties specified by that class. All
the heavy lifting has already been done by the CSS flattening code.
'''
rules = defaultdict(dict)
for item in self.oeb.manifest:
if not hasattr(item.data, 'cssRules'):
continue
for i, rule in enumerate(item.data.cssRules):
if rule.type != rule.STYLE_RULE:
continue
props = {k:v for k,v in
get_font_properties(rule).iteritems() if v}
if not props:
continue
for sel in rule.selectorList:
sel = sel.selectorText
if sel and sel.startswith('.'):
# We dont care about pseudo-selectors as the worst that
# can happen is some extra characters will remain in
# the font
sel = sel.partition(':')[0]
rules[sel[1:]].update(props)
self.style_rules = dict(rules)
def get_page_sheet(self):
if self.page_sheet is None:
manifest = self.oeb.manifest
id_, href = manifest.generate('page_css', 'page_styles.css')
self.page_sheet = manifest.add(id_, href, CSS_MIME, data=self.parser.parseString('', validate=False))
head = self.current_item.xpath('//*[local-name()="head"][1]')
if head:
href = self.current_item.relhref(href)
l = etree.SubElement(head[0], XHTML('link'),
rel='stylesheet', type=CSS_MIME, href=href)
l.tail = '\n'
else:
self.log.warn('No <head> cannot embed font rules')
return self.page_sheet
def process_item(self, item, sheets):
ff_rules = []
self.current_item = item
self.page_sheet = None
for sheet in sheets:
if 'page_css' in sheet.id:
ff_rules.extend(find_font_face_rules(sheet, self.oeb))
self.page_sheet = sheet
base = {'font-family':['serif'], 'font-weight': '400',
'font-style':'normal', 'font-stretch':'normal'}
for body in item.data.xpath('//*[local-name()="body"]'):
self.find_usage_in(body, base, ff_rules)
def find_usage_in(self, elem, inherited_style, ff_rules):
style = elem_style(self.style_rules, elem.get('class', '') or '', inherited_style)
for child in elem:
self.find_usage_in(child, style, ff_rules)
has_font, existing = used_font(style, ff_rules)
if not has_font:
return
if existing is None:
in_book = used_font(style, self.embedded_fonts)[1]
if in_book is None:
# Try to find the font in the system
added = self.embed_font(style)
if added is not None:
ff_rules.append(added)
self.embedded_fonts.append(added)
else:
# TODO: Create a page rule from the book rule (cannot use it
# directly as paths might be different)
item = in_book['item']
sheet = self.parser.parseString(in_book['rule'].cssText, validate=False)
rule = sheet.cssRules[0]
page_sheet = self.get_page_sheet()
href = page_sheet.abshref(item.href)
rule.style.setProperty('src', 'url(%s)' % href)
ff_rules.append(find_font_face_rules(sheet, self.oeb)[0])
page_sheet.data.insertRule(rule, len(page_sheet.data.cssRules))
def embed_font(self, style):
ff = [unicode(f) for f in style.get('font-family', []) if unicode(f).lower() not in {
'serif', 'sansserif', 'sans-serif', 'fantasy', 'cursive', 'monospace'}]
if not ff:
return
ff = ff[0]
if ff in self.warned or ff == 'inherit':
return
try:
fonts = font_scanner.fonts_for_family(ff)
except NoFonts:
self.log.warn('Failed to find fonts for family:', ff, 'not embedding')
self.warned.add(ff)
return
try:
weight = int(style.get('font-weight', '400'))
except (ValueError, TypeError, AttributeError):
w = style['font-weight']
if w not in self.warned2:
self.log.warn('Invalid weight in font style: %r' % w)
self.warned2.add(w)
return
for f in fonts:
if f['weight'] == weight and f['font-style'] == style.get('font-style', 'normal') and f['font-stretch'] == style.get('font-stretch', 'normal'):
self.log('Embedding font %s from %s' % (f['full_name'], f['path']))
data = font_scanner.get_font_data(f)
name = f['full_name']
ext = 'otf' if f['is_otf'] else 'ttf'
name = ascii_filename(name).replace(' ', '-').replace('(', '').replace(')', '')
fid, href = self.oeb.manifest.generate(id=u'font', href=u'fonts/%s.%s'%(name, ext))
item = self.oeb.manifest.add(fid, href, guess_type('dummy.'+ext)[0], data=data)
item.unload_data_from_memory()
page_sheet = self.get_page_sheet()
href = page_sheet.relhref(item.href)
css = '''@font-face { font-family: "%s"; font-weight: %s; font-style: %s; font-stretch: %s; src: url(%s) }''' % (
f['font-family'], f['font-weight'], f['font-style'], f['font-stretch'], href)
sheet = self.parser.parseString(css, validate=False)
page_sheet.data.insertRule(sheet.cssRules[0], len(page_sheet.data.cssRules))
return find_font_face_rules(sheet, self.oeb)[0]
| gpl-3.0 |
jakeshi/python-twitter | twitter/api.py | 1 | 202358 | #!/usr/bin/env python
#
#
# Copyright 2007-2016 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that provides a Python interface to the Twitter API"""
from __future__ import division
from __future__ import print_function
import json
import sys
import gzip
import time
import base64
import re
import requests
from requests_oauthlib import OAuth1, OAuth2
import io
import warnings
from uuid import uuid4
import os
try:
# python 3
from urllib.parse import urlparse, urlunparse, urlencode, quote_plus
from urllib.request import urlopen
from urllib.request import __version__ as urllib_version
except ImportError:
from urlparse import urlparse, urlunparse
from urllib2 import urlopen
from urllib import urlencode, quote_plus
from urllib import __version__ as urllib_version
from twitter import (
__version__,
_FileCache,
Category,
DirectMessage,
List,
Status,
Trend,
User,
UserStatus,
)
from twitter.ratelimit import RateLimit
from twitter.twitter_utils import (
calc_expected_status_length,
is_url,
parse_media_file,
enf_type)
from twitter.error import (
TwitterError,
PythonTwitterDeprecationWarning330,
)
if sys.version_info > (3,):
long = int
CHARACTER_LIMIT = 280
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
class Api(object):
"""A python interface into the Twitter API
By default, the Api caches results for 1 minute.
Example usage:
To create an instance of the twitter.Api class, with no authentication:
>>> import twitter
>>> api = twitter.Api()
To fetch a single user's public status messages, where "user" is either
a Twitter "short name" or their user id.
>>> statuses = api.GetUserTimeline(user)
>>> print([s.text for s in statuses])
To use authentication, instantiate the twitter.Api class with a
consumer key and secret; and the oAuth key and secret:
>>> api = twitter.Api(consumer_key='twitter consumer key',
consumer_secret='twitter consumer secret',
access_token_key='the_key_given',
access_token_secret='the_key_secret')
To fetch your friends (after being authenticated):
>>> users = api.GetFriends()
>>> print([u.name for u in users])
To post a twitter status message (after being authenticated):
>>> status = api.PostUpdate('I love python-twitter!')
>>> print(status.text)
I love python-twitter!
There are many other methods, including:
>>> api.PostUpdates(status)
>>> api.PostDirectMessage(user, text)
>>> api.GetUser(user)
>>> api.GetReplies()
>>> api.GetUserTimeline(user)
>>> api.GetHomeTimeline()
>>> api.GetStatus(status_id)
>>> api.DestroyStatus(status_id)
>>> api.GetFriends(user)
>>> api.GetFollowers()
>>> api.GetFeatured()
>>> api.GetDirectMessages()
>>> api.GetSentDirectMessages()
>>> api.PostDirectMessage(user, text)
>>> api.DestroyDirectMessage(message_id)
>>> api.DestroyFriendship(user)
>>> api.CreateFriendship(user)
>>> api.LookupFriendship(user)
>>> api.VerifyCredentials()
"""
DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute
_API_REALM = 'Twitter API'
def __init__(self,
consumer_key=None,
consumer_secret=None,
access_token_key=None,
access_token_secret=None,
application_only_auth=False,
input_encoding=None,
request_headers=None,
cache=DEFAULT_CACHE,
base_url=None,
stream_url=None,
upload_url=None,
chunk_size=1024 * 1024,
use_gzip_compression=False,
debugHTTP=False,
timeout=None,
sleep_on_rate_limit=False,
tweet_mode='compat',
proxies=None):
"""Instantiate a new twitter.Api object.
Args:
consumer_key (str):
Your Twitter user's consumer_key.
consumer_secret (str):
Your Twitter user's consumer_secret.
access_token_key (str):
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret (str):
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
application_only_auth:
Use Application-Only Auth instead of User Auth.
Defaults to False [Optional]
input_encoding (str, optional):
The encoding used to encode input strings.
request_header (dict, optional):
A dictionary of additional HTTP request headers.
cache (object, optional):
The cache instance to use. Defaults to DEFAULT_CACHE.
Use None to disable caching.
base_url (str, optional):
The base URL to use to contact the Twitter API.
Defaults to https://api.twitter.com.
stream_url (str, optional):
The base URL to use for streaming endpoints.
Defaults to 'https://stream.twitter.com/1.1'.
upload_url (str, optional):
The base URL to use for uploads. Defaults to 'https://upload.twitter.com/1.1'.
chunk_size (int, optional):
Chunk size to use for chunked (multi-part) uploads of images/videos/gifs.
Defaults to 1MB. Anything under 16KB and you run the risk of erroring out
on 15MB files.
use_gzip_compression (bool, optional):
Set to True to tell enable gzip compression for any call
made to Twitter. Defaults to False.
debugHTTP (bool, optional):
Set to True to enable debug output from urllib2 when performing
any HTTP requests. Defaults to False.
timeout (int, optional):
Set timeout (in seconds) of the http/https requests. If None the
requests lib default will be used. Defaults to None.
sleep_on_rate_limit (bool, optional):
Whether to sleep an appropriate amount of time if a rate limit is hit for
an endpoint.
tweet_mode (str, optional):
Whether to use the new (as of Sept. 2016) extended tweet mode. See docs for
details. Choices are ['compatibility', 'extended'].
proxies (dict, optional):
A dictionary of proxies for the request to pass through, if not specified
allows requests lib to use environmental variables for proxy if any.
"""
# check to see if the library is running on a Google App Engine instance
# see GAE.rst for more information
if os.environ:
if 'APPENGINE_RUNTIME' in os.environ.keys():
import requests_toolbelt.adapters.appengine # Adapter ensures requests use app engine's urlfetch
requests_toolbelt.adapters.appengine.monkeypatch()
cache = None # App Engine does not like this caching strategy, disable caching
self.SetCache(cache)
self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT
self._input_encoding = input_encoding
self._use_gzip = use_gzip_compression
self._debugHTTP = debugHTTP
self._shortlink_size = 19
if timeout and timeout < 30:
warn("Warning: The Twitter streaming API sends 30s keepalives, the given timeout is shorter!")
self._timeout = timeout
self.__auth = None
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
self.rate_limit = RateLimit()
self.sleep_on_rate_limit = sleep_on_rate_limit
self.tweet_mode = tweet_mode
self.proxies = proxies
if base_url is None:
self.base_url = 'https://api.twitter.com/1.1'
else:
self.base_url = base_url
if stream_url is None:
self.stream_url = 'https://stream.twitter.com/1.1'
else:
self.stream_url = stream_url
if upload_url is None:
self.upload_url = 'https://upload.twitter.com/1.1'
else:
self.upload_url = upload_url
self.chunk_size = chunk_size
if self.chunk_size < 1024 * 16:
warnings.warn((
"A chunk size lower than 16384 may result in too many "
"requests to the Twitter API when uploading videos. You are "
"strongly advised to increase it above 16384"
))
if (consumer_key and not
(application_only_auth or all([access_token_key, access_token_secret]))):
raise TwitterError({'message': "Missing oAuth Consumer Key or Access Token"})
self.SetCredentials(consumer_key, consumer_secret, access_token_key, access_token_secret,
application_only_auth)
if debugHTTP:
import logging
try:
import http.client as http_client # python3
except ImportError:
import httplib as http_client # python2
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def GetAppOnlyAuthToken(self, consumer_key, consumer_secret):
"""
Generate a Bearer Token from consumer_key and consumer_secret
"""
key = quote_plus(consumer_key)
secret = quote_plus(consumer_secret)
bearer_token = base64.b64encode('{}:{}'.format(key, secret).encode('utf8'))
post_headers = {
'Authorization': 'Basic {0}'.format(bearer_token.decode('utf8')),
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'
}
res = requests.post(url='https://api.twitter.com/oauth2/token',
data={'grant_type': 'client_credentials'},
headers=post_headers)
bearer_creds = res.json()
return bearer_creds
def SetCredentials(self,
consumer_key,
consumer_secret,
access_token_key=None,
access_token_secret=None,
application_only_auth=False):
"""Set the consumer_key and consumer_secret for this instance
Args:
consumer_key:
The consumer_key of the twitter account.
consumer_secret:
The consumer_secret for the twitter account.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
application_only_auth:
Whether to generate a bearer token and use Application-Only Auth
"""
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._access_token_key = access_token_key
self._access_token_secret = access_token_secret
if application_only_auth:
self._bearer_token = self.GetAppOnlyAuthToken(consumer_key, consumer_secret)
self.__auth = OAuth2(token=self._bearer_token)
else:
auth_list = [consumer_key, consumer_secret,
access_token_key, access_token_secret]
if all(auth_list):
self.__auth = OAuth1(consumer_key, consumer_secret,
access_token_key, access_token_secret)
self._config = None
def GetHelpConfiguration(self):
"""Get basic help configuration details from Twitter.
Args:
None
Returns:
dict: Sets self._config and returns dict of help config values.
"""
if self._config is None:
url = '%s/help/configuration.json' % self.base_url
resp = self._RequestUrl(url, 'GET')
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
self._config = data
return self._config
def GetShortUrlLength(self, https=False):
"""Returns number of characters reserved per URL included in a tweet.
Args:
https (bool, optional):
If True, return number of characters reserved for https urls
or, if False, return number of character reserved for http urls.
Returns:
(int): Number of characters reserved per URL.
"""
config = self.GetHelpConfiguration()
if https:
return config['short_url_length_https']
else:
return config['short_url_length']
def ClearCredentials(self):
"""Clear any credentials for this instance
"""
self._consumer_key = None
self._consumer_secret = None
self._access_token_key = None
self._access_token_secret = None
self._bearer_token = None
self.__auth = None # for request upgrade
def GetSearch(self,
term=None,
raw_query=None,
geocode=None,
since_id=None,
max_id=None,
until=None,
since=None,
count=15,
lang=None,
locale=None,
result_type="mixed",
include_entities=None,
return_json=False):
"""Return twitter search results for a given term. You must specify one
of term, geocode, or raw_query.
Args:
term (str, optional):
Term to search by. Optional if you include geocode.
raw_query (str, optional):
A raw query as a string. This should be everything after the "?" in
the URL (i.e., the query parameters). You are responsible for all
type checking and ensuring that the query string is properly
formatted, as it will only be URL-encoded before be passed directly
to Twitter with no other checks performed. For advanced usage only.
*This will override any other parameters passed*
since_id (int, optional):
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID.
until (str, optional):
Returns tweets generated before the given date. Date should be
formatted as YYYY-MM-DD.
since (str, optional):
Returns tweets generated since the given date. Date should be
formatted as YYYY-MM-DD.
geocode (str or list or tuple, optional):
Geolocation within which to search for tweets. Can be either a
string in the form of "latitude,longitude,radius" where latitude
and longitude are floats and radius is a string such as "1mi" or
"1km" ("mi" or "km" are the only units allowed). For example:
>>> api.GetSearch(geocode="37.781157,-122.398720,1mi").
Otherwise, you can pass a list of either floats or strings for
lat/long and a string for radius:
>>> api.GetSearch(geocode=[37.781157, -122.398720, "1mi"])
>>> # or:
>>> api.GetSearch(geocode=(37.781157, -122.398720, "1mi"))
>>> # or:
>>> api.GetSearch(geocode=("37.781157", "-122.398720", "1mi"))
count (int, optional):
Number of results to return. Default is 15 and maxmimum that
Twitter returns is 100 irrespective of what you type in.
lang (str, optional):
Language for results as ISO 639-1 code. Default is None
(all languages).
locale (str, optional):
Language of the search query. Currently only 'ja' is effective.
This is intended for language-specific consumers and the default
should work in the majority of cases.
result_type (str, optional):
Type of result which should be returned. Default is "mixed".
Valid options are "mixed, "recent", and "popular".
include_entities (bool, optional):
If True, each tweet will include a node called "entities".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and
hashtags.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.Userret
Returns:
list: A sequence of twitter.Status instances, one for each message
containing the term, within the bounds of the geocoded area, or
given by the raw_query.
"""
url = '%s/search/tweets.json' % self.base_url
parameters = {}
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if until:
parameters['until'] = enf_type('until', str, until)
if since:
parameters['since'] = enf_type('since', str, since)
if lang:
parameters['lang'] = enf_type('lang', str, lang)
if locale:
parameters['locale'] = enf_type('locale', str, locale)
if term is None and geocode is None and raw_query is None:
return []
if term is not None:
parameters['q'] = term
if geocode is not None:
if isinstance(geocode, list) or isinstance(geocode, tuple):
parameters['geocode'] = ','.join([str(geo) for geo in geocode])
else:
parameters['geocode'] = enf_type('geocode', str, geocode)
if include_entities:
parameters['include_entities'] = enf_type('include_entities',
bool,
include_entities)
parameters['count'] = enf_type('count', int, count)
if result_type in ["mixed", "popular", "recent"]:
parameters['result_type'] = result_type
if raw_query is not None:
url = "{url}?{raw_query}".format(
url=url,
raw_query=raw_query)
resp = self._RequestUrl(url, 'GET')
else:
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [Status.NewFromJsonDict(x) for x in data.get('statuses', '')]
def GetUsersSearch(self,
term=None,
page=1,
count=20,
include_entities=None):
"""Return twitter user search results for a given term.
Args:
term:
Term to search by.
page:
Page of results to return. Default is 1
[Optional]
count:
Number of results to return. Default is 20
[Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and hashtags.
[Optional]
Returns:
A sequence of twitter.User instances, one for each message containing
the term
"""
# Build request parameters
parameters = {}
if term is not None:
parameters['q'] = term
if page != 1:
parameters['page'] = page
if include_entities:
parameters['include_entities'] = 1
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
# Make and send requests
url = '%s/users/search.json' % self.base_url
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [User.NewFromJsonDict(x) for x in data]
def GetTrendsCurrent(self, exclude=None):
"""Get the current top trending topics (global)
Args:
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
"""
return self.GetTrendsWoeid(woeid=1, exclude=exclude)
def GetTrendsWoeid(self, woeid, exclude=None):
"""Return the top 10 trending topics for a specific WOEID, if trending
information is available for it.
Args:
woeid:
the Yahoo! Where On Earth ID for a location.
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
"""
url = '%s/trends/place.json' % (self.base_url)
parameters = {'id': woeid}
if exclude:
parameters['exclude'] = exclude
resp = self._RequestUrl(url, verb='GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
trends = []
timestamp = data[0]['as_of']
for trend in data[0]['trends']:
trends.append(Trend.NewFromJsonDict(trend, timestamp=timestamp))
return trends
def GetUserSuggestionCategories(self):
""" Return the list of suggested user categories, this can be used in
GetUserSuggestion function
Returns:
A list of categories
"""
url = '%s/users/suggestions.json' % (self.base_url)
resp = self._RequestUrl(url, verb='GET')
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
categories = []
for category in data:
categories.append(Category.NewFromJsonDict(category))
return categories
def GetUserSuggestion(self, category):
""" Returns a list of users in a category
Args:
category:
The Category object to limit the search by
Returns:
A list of users in that category
"""
url = '%s/users/suggestions/%s.json' % (self.base_url, category.slug)
resp = self._RequestUrl(url, verb='GET')
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
users = []
for user in data['users']:
users.append(User.NewFromJsonDict(user))
return users
def GetHomeTimeline(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
exclude_replies=False,
contributor_details=False,
include_entities=True):
"""Fetch a collection of the most recent Tweets and retweets posted
by the authenticating user and the users they follow.
The home timeline is central to how most users interact with Twitter.
Args:
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. Defaults to 20. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
When True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object. [Optional]
exclude_replies:
This parameter will prevent replies from appearing in the
returned timeline. Using exclude_replies with the count
parameter will mean you will receive up-to count tweets -
this is because the count parameter retrieves that many
tweets before filtering out retweets and replies. [Optional]
contributor_details:
This parameter enhances the contributors element of the
status response to include the screen_name of the contributor.
By default only the user_id of the contributor is included. [Optional]
include_entities:
The entities node will be disincluded when set to false.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
"""
url = '%s/statuses/home_timeline.json' % self.base_url
parameters = {}
if count is not None:
try:
if int(count) > 200:
raise TwitterError({'message': "'count' may not be greater than 200"})
except ValueError:
raise TwitterError({'message': "'count' must be an integer"})
parameters['count'] = count
if since_id:
try:
parameters['since_id'] = int(since_id)
except ValueError:
raise TwitterError({'message': "'since_id' must be an integer"})
if max_id:
try:
parameters['max_id'] = int(max_id)
except ValueError:
raise TwitterError({'message': "'max_id' must be an integer"})
if trim_user:
parameters['trim_user'] = 1
if exclude_replies:
parameters['exclude_replies'] = 1
if contributor_details:
parameters['contributor_details'] = 1
if not include_entities:
parameters['include_entities'] = 'false'
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data]
def GetUserTimeline(self,
user_id=None,
screen_name=None,
since_id=None,
max_id=None,
count=None,
include_rts=True,
trim_user=False,
exclude_replies=False):
"""Fetch the sequence of public Status messages for a single user.
The twitter.Api instance must be authenticated if the user is private.
Args:
user_id (int, optional):
Specifies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
since_id (int, optional):
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID.
count (int, optional):
Specifies the number of statuses to retrieve. May not be
greater than 200.
include_rts (bool, optional):
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets.
trim_user (bool, optional):
If True, statuses will only contain the numerical user ID only.
Otherwise a full user object will be returned for each status.
exclude_replies (bool, optional)
If True, this will prevent replies from appearing in the returned
timeline. Using exclude_replies with the count parameter will mean you
will receive up-to count tweets - this is because the count parameter
retrieves that many tweets before filtering out retweets and replies.
This parameter is only supported for JSON and XML responses.
Returns:
A sequence of Status instances, one for each message up to count
"""
url = '%s/statuses/user_timeline.json' % (self.base_url)
parameters = {}
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if count:
parameters['count'] = enf_type('count', int, count)
parameters['include_rts'] = enf_type('include_rts', bool, include_rts)
parameters['trim_user'] = enf_type('trim_user', bool, trim_user)
parameters['exclude_replies'] = enf_type('exclude_replies', bool, exclude_replies)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data]
def GetStatus(self,
status_id,
trim_user=False,
include_my_retweet=True,
include_entities=True,
include_ext_alt_text=True):
"""Returns a single status message, specified by the status_id parameter.
Args:
status_id:
The numeric ID of the status you are trying to retrieve.
trim_user:
When set to True, each tweet returned in a timeline will include
a user object including only the status authors numerical ID.
Omit this parameter to receive the complete user object. [Optional]
include_my_retweet:
When set to True, any Tweets returned that have been retweeted by
the authenticating user will include an additional
current_user_retweet node, containing the ID of the source status
for the retweet. [Optional]
include_entities:
If False, the entities node will be disincluded.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A twitter.Status instance representing that status message
"""
url = '%s/statuses/show.json' % (self.base_url)
parameters = {
'id': enf_type('status_id', int, status_id),
'trim_user': enf_type('trim_user', bool, trim_user),
'include_my_retweet': enf_type('include_my_retweet', bool, include_my_retweet),
'include_entities': enf_type('include_entities', bool, include_entities),
'include_ext_alt_text': enf_type('include_ext_alt_text', bool, include_ext_alt_text)
}
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def GetStatusOembed(self,
status_id=None,
url=None,
maxwidth=None,
hide_media=False,
hide_thread=False,
omit_script=False,
align=None,
related=None,
lang=None):
"""Returns information allowing the creation of an embedded representation of a
Tweet on third party sites.
Specify tweet by the id or url parameter.
Args:
status_id:
The numeric ID of the status you are trying to embed.
url:
The url of the status you are trying to embed.
maxwidth:
The maximum width in pixels that the embed should be rendered at.
This value is constrained to be between 250 and 550 pixels. [Optional]
hide_media:
Specifies whether the embedded Tweet should automatically expand images. [Optional]
hide_thread:
Specifies whether the embedded Tweet should automatically show the original
message in the case that the embedded Tweet is a reply. [Optional]
omit_script:
Specifies whether the embedded Tweet HTML should include a <script>
element pointing to widgets.js. [Optional]
align:
Specifies whether the embedded Tweet should be left aligned, right aligned,
or centered in the page. [Optional]
related:
A comma sperated string of related screen names. [Optional]
lang:
Language code for the rendered embed. [Optional]
Returns:
A dictionary with the response.
"""
request_url = '%s/statuses/oembed.json' % (self.base_url)
parameters = {}
if status_id is not None:
try:
parameters['id'] = int(status_id)
except ValueError:
raise TwitterError({'message': "'status_id' must be an integer."})
elif url is not None:
parameters['url'] = url
else:
raise TwitterError({'message': "Must specify either 'status_id' or 'url'"})
if maxwidth is not None:
parameters['maxwidth'] = maxwidth
if hide_media is True:
parameters['hide_media'] = 'true'
if hide_thread is True:
parameters['hide_thread'] = 'true'
if omit_script is True:
parameters['omit_script'] = 'true'
if align is not None:
if align not in ('left', 'center', 'right', 'none'):
raise TwitterError({'message': "'align' must be 'left', 'center', 'right', or 'none'"})
parameters['align'] = align
if related:
if not isinstance(related, str):
raise TwitterError({'message': "'related' should be a string of comma separated screen names"})
parameters['related'] = related
if lang is not None:
if not isinstance(lang, str):
raise TwitterError({'message': "'lang' should be string instance"})
parameters['lang'] = lang
resp = self._RequestUrl(request_url, 'GET', data=parameters, enforce_auth=False)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return data
def DestroyStatus(self, status_id, trim_user=False):
"""Destroys the status specified by the required ID parameter.
The authenticating user must be the author of the specified
status.
Args:
status_id (int):
The numerical ID of the status you're trying to destroy.
trim_user (bool, optional):
When set to True, each tweet returned in a timeline will include
a user object including only the status authors numerical ID.
Returns:
A twitter.Status instance representing the destroyed status message
"""
url = '%s/statuses/destroy/%s.json' % (self.base_url, status_id)
post_data = {
'id': enf_type('status_id', int, status_id),
'trim_user': enf_type('trim_user', bool, trim_user)
}
resp = self._RequestUrl(url, 'POST', data=post_data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def PostUpdate(self,
status,
media=None,
media_additional_owners=None,
media_category=None,
in_reply_to_status_id=None,
auto_populate_reply_metadata=False,
exclude_reply_user_ids=None,
latitude=None,
longitude=None,
place_id=None,
display_coordinates=False,
trim_user=False,
verify_status_length=True,
attachment_url=None):
"""Post a twitter status message from the authenticated user.
https://dev.twitter.com/docs/api/1.1/post/statuses/update
Args:
status (str):
The message text to be posted. Must be less than or equal to
CHARACTER_LIMIT characters.
media (int, str, fp, optional):
A URL, a local file, or a file-like object (something with a
read() method), or a list of any combination of the above.
media_additional_owners (list, optional):
A list of user ids representing Twitter users that should be able
to use the uploaded media in their tweets. If you pass a list of
media, then additional_owners will apply to each object. If you
need more granular control, please use the UploadMedia* methods.
media_category (str, optional):
Only for use with the AdsAPI. See
https://dev.twitter.com/ads/creative/promoted-video-overview if
this applies to your application.
in_reply_to_status_id (int, optional):
The ID of an existing status that the status to be posted is
in reply to. This implicitly sets the in_reply_to_user_id
attribute of the resulting status to the user ID of the
message being replied to. Invalid/missing status IDs will be
ignored.
auto_populate_reply_metadata (bool, optional):
Automatically include the @usernames of the users mentioned or
participating in the tweet to which this tweet is in reply.
exclude_reply_user_ids (list, optional):
Remove given user_ids (*not* @usernames) from the tweet's
automatically generated reply metadata.
attachment_url (str, optional):
URL to an attachment resource: one to four photos, a GIF,
video, Quote Tweet, or DM deep link. If not specified and
media parameter is not None, we will attach the first media
object as the attachment URL. If a bad URL is passed, Twitter
will raise an error.
latitude (float, optional):
Latitude coordinate of the tweet in degrees. Will only work
in conjunction with longitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting.
longitude (float, optional):
Longitude coordinate of the tweet in degrees. Will only work
in conjunction with latitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting.
place_id (int, optional):
A place in the world. These IDs can be retrieved from
GET geo/reverse_geocode.
display_coordinates (bool, optional):
Whether or not to put a pin on the exact coordinates a tweet
has been sent from.
trim_user (bool, optional):
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
verify_status_length (bool, optional):
If True, api throws a hard error that the status is over
CHARACTER_LIMIT characters. If False, Api will attempt to post
the status.
Returns:
(twitter.Status) A twitter.Status instance representing the
message posted.
"""
url = '%s/statuses/update.json' % self.base_url
if isinstance(status, str) or self._input_encoding is None:
u_status = status
else:
u_status = str(status, self._input_encoding)
if verify_status_length and calc_expected_status_length(u_status) > CHARACTER_LIMIT:
raise TwitterError("Text must be less than or equal to CHARACTER_LIMIT characters.")
if auto_populate_reply_metadata and not in_reply_to_status_id:
raise TwitterError("If auto_populate_reply_metadata is True, you must set in_reply_to_status_id")
parameters = {
'status': u_status,
'in_reply_to_status_id': in_reply_to_status_id,
'auto_populate_reply_metadata': auto_populate_reply_metadata,
'place_id': place_id,
'display_coordinates': display_coordinates,
'trim_user': trim_user,
'exclude_reply_user_ids': ','.join([str(u) for u in exclude_reply_user_ids or []]),
}
if attachment_url:
parameters['attachment_url'] = attachment_url
if media:
chunked_types = ['video/mp4', 'video/quicktime', 'image/gif']
media_ids = []
if isinstance(media, (int, long)):
media_ids.append(media)
elif isinstance(media, list):
for media_file in media:
# If you want to pass just a media ID, it should be an int
if isinstance(media_file, (int, long)):
media_ids.append(media_file)
continue
_, _, file_size, media_type = parse_media_file(media_file)
if (media_type == 'image/gif' or media_type == 'video/mp4') and len(media) > 1:
raise TwitterError(
'You cannot post more than 1 GIF or 1 video in a single status.')
if file_size > self.chunk_size or media_type in chunked_types:
media_id = self.UploadMediaChunked(
media=media_file,
additional_owners=media_additional_owners,
media_category=media_category)
else:
media_id = self.UploadMediaSimple(
media=media_file,
additional_owners=media_additional_owners,
media_category=media_category)
media_ids.append(media_id)
else:
_, _, file_size, media_type = parse_media_file(media)
if file_size > self.chunk_size or media_type in chunked_types:
media_ids.append(self.UploadMediaChunked(media, media_additional_owners))
else:
media_ids.append(self.UploadMediaSimple(media, media_additional_owners))
parameters['media_ids'] = ','.join([str(mid) for mid in media_ids])
if latitude is not None and longitude is not None:
parameters['lat'] = str(latitude)
parameters['long'] = str(longitude)
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def UploadMediaSimple(self,
media,
additional_owners=None,
media_category=None):
""" Upload a media file to Twitter in one request. Used for small file
uploads that do not require chunked uploads.
Args:
media:
File-like object to upload.
additional_owners: additional Twitter users that are allowed to use
The uploaded media. Should be a list of integers. Maximum
number of additional owners is capped at 100 by Twitter.
media_category:
Category with which to identify media upload. Only use with Ads
API & video files.
Returns:
media_id:
ID of the uploaded media returned by the Twitter API or 0.
"""
url = '%s/media/upload.json' % self.upload_url
parameters = {}
media_fp, _, _, _ = parse_media_file(media)
parameters['media'] = media_fp.read()
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
parameters['additional_owners'] = additional_owners
if media_category:
parameters['media_category'] = media_category
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
return data['media_id']
except KeyError:
raise TwitterError({'message': 'Media could not be uploaded.'})
def PostMediaMetadata(self,
media_id,
alt_text=None):
"""Provide addtional data for uploaded media.
Args:
media_id:
ID of a previously uploaded media item.
alt_text:
Image Alternate Text.
"""
url = '%s/media/metadata/create.json' % self.upload_url
parameters = {}
parameters['media_id'] = media_id
if alt_text:
parameters['alt_text'] = {"text": alt_text}
resp = self._RequestUrl(url, 'POST', json=parameters)
return resp
def _UploadMediaChunkedInit(self,
media,
additional_owners=None,
media_category=None):
"""Start a chunked upload to Twitter.
Args:
media:
File-like object to upload.
additional_owners: additional Twitter users that are allowed to use
The uploaded media. Should be a list of integers. Maximum
number of additional owners is capped at 100 by Twitter.
media_category:
Category with which to identify media upload. Only use with Ads
API & video files.
Returns:
tuple: media_id (returned from Twitter), file-handler object (i.e., has .read()
method), filename media file.
"""
url = '%s/media/upload.json' % self.upload_url
media_fp, filename, file_size, media_type = parse_media_file(media)
if not all([media_fp, filename, file_size, media_type]):
raise TwitterError({'message': 'Could not process media file'})
parameters = {}
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
parameters['additional_owners'] = additional_owners
if media_category:
parameters['media_category'] = media_category
# INIT doesn't read in any data. It's purpose is to prepare Twitter to
# receive the content in APPEND requests.
parameters['command'] = 'INIT'
parameters['media_type'] = media_type
parameters['total_bytes'] = file_size
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
media_id = data['media_id']
except KeyError:
raise TwitterError({'message': 'Media could not be uploaded'})
return (media_id, media_fp, filename)
def _UploadMediaChunkedAppend(self,
media_id,
media_fp,
filename):
"""Appends (i.e., actually uploads) media file to Twitter.
Args:
media_id (int):
ID of the media file received from Init method.
media_fp (file):
File-like object representing media file (must have .read() method)
filename (str):
Filename of the media file being uploaded.
Returns:
True if successful. Raises otherwise.
"""
url = '%s/media/upload.json' % self.upload_url
boundary = "--{0}".format(uuid4().hex).encode('utf-8')
media_id_bytes = str(media_id).encode('utf-8')
headers = {'Content-Type': 'multipart/form-data; boundary={0}'.format(
boundary.decode('utf8')[2:]
)}
segment_id = 0
while True:
try:
data = media_fp.read(self.chunk_size)
except ValueError:
break
if not data:
break
body = [
boundary,
b'Content-Disposition: form-data; name="command"',
b'',
b'APPEND',
boundary,
b'Content-Disposition: form-data; name="media_id"',
b'',
media_id_bytes,
boundary,
b'Content-Disposition: form-data; name="segment_index"',
b'',
str(segment_id).encode('utf-8'),
boundary,
'Content-Disposition: form-data; name="media"; filename="{0!r}"'.format(filename).encode('utf8'),
b'Content-Type: application/octet-stream',
b'',
data,
boundary + b'--'
]
body_data = b'\r\n'.join(body)
headers['Content-Length'] = str(len(body_data))
resp = self._RequestChunkedUpload(url=url,
headers=headers,
data=body_data)
# The body of the response should be blank, but the normal decoding
# raises a JSONDecodeError, so we should only do error checking
# if the response is not blank.
if resp.content.decode('utf-8'):
return self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
segment_id += 1
try:
media_fp.close()
except Exception as e:
pass
return True
def _UploadMediaChunkedFinalize(self, media_id):
"""Finalize chunked upload to Twitter.
Args:
media_id (int):
ID of the media file for which to finalize the upload.
Returns:
json: JSON string of data from Twitter.
"""
url = '%s/media/upload.json' % self.upload_url
parameters = {
'command': 'FINALIZE',
'media_id': media_id
}
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return data
def UploadMediaChunked(self,
media,
additional_owners=None,
media_category=None):
"""Upload a media file to Twitter in multiple requests.
Args:
media:
File-like object to upload.
additional_owners: additional Twitter users that are allowed to use
The uploaded media. Should be a list of integers. Maximum
number of additional owners is capped at 100 by Twitter.
media_category:
Category with which to identify media upload. Only use with Ads
API & video files.
Returns:
media_id:
ID of the uploaded media returned by the Twitter API. Raises if
unsuccesful.
"""
media_id, media_fp, filename = self._UploadMediaChunkedInit(media=media,
additional_owners=additional_owners,
media_category=media_category)
append = self._UploadMediaChunkedAppend(media_id=media_id,
media_fp=media_fp,
filename=filename)
if not append:
TwitterError('Media could not be uploaded.')
data = self._UploadMediaChunkedFinalize(media_id)
try:
return data['media_id']
except KeyError:
raise TwitterError('Media could not be uploaded.')
def PostMedia(self,
status,
media,
possibly_sensitive=None,
in_reply_to_status_id=None,
latitude=None,
longitude=None,
place_id=None,
display_coordinates=False):
"""Post a twitter status message from the user with a picture attached.
Args:
status:
the text of your update
media:
This can be the location of media(PNG, JPG, GIF) on the local file
system or at an HTTP URL, it can also be a file-like object
possibly_sensitive:
set true if content is "advanced." [Optional]
in_reply_to_status_id:
ID of a status that this is in reply to. [Optional]
lat:
latitude of location. [Optional]
long:
longitude of location. [Optional]
place_id:
A place in the world identified by a Twitter place ID. [Optional]
display_coordinates:
Set true if you want to display coordinates. [Optional]
Returns:
A twitter.Status instance representing the message posted.
"""
warnings.warn((
"This endpoint has been deprecated by Twitter. Please use "
"PostUpdate() instead. Details of Twitter's deprecation can be "
"found at: "
"dev.twitter.com/rest/reference/post/statuses/update_with_media"),
PythonTwitterDeprecationWarning330)
url = '%s/statuses/update_with_media.json' % self.base_url
if isinstance(status, str) or self._input_encoding is None:
u_status = status
else:
u_status = str(status, self._input_encoding)
data = {'status': u_status}
if not hasattr(media, 'read'):
if media.startswith('http'):
data['media'] = urlopen(media).read()
else:
with open(str(media), 'rb') as f:
data['media'] = f.read()
else:
data['media'] = media.read()
if possibly_sensitive:
data['possibly_sensitive'] = 'true'
if in_reply_to_status_id:
data['in_reply_to_status_id'] = str(in_reply_to_status_id)
if latitude is not None and longitude is not None:
data['lat'] = str(latitude)
data['long'] = str(longitude)
if place_id is not None:
data['place_id'] = str(place_id)
if display_coordinates:
data['display_coordinates'] = 'true'
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def PostMultipleMedia(self, status, media, possibly_sensitive=None,
in_reply_to_status_id=None, latitude=None,
longitude=None, place_id=None,
display_coordinates=False):
"""
Post a twitter status message from the authenticated user with
multiple pictures attached.
Args:
status:
the text of your update
media:
location of multiple media elements(PNG, JPG, GIF)
possibly_sensitive:
set true is content is "advanced"
in_reply_to_status_id:
ID of a status that this is in reply to
lat:
location in latitude
long:
location in longitude
place_id:
A place in the world identified by a Twitter place ID
display_coordinates:
Returns:
A twitter.Status instance representing the message posted.
"""
warnings.warn((
"This method is deprecated. Please use PostUpdate instead, "
"passing a list of media that you would like to associate "
"with the update."), PythonTwitterDeprecationWarning330)
if type(media) is not list:
raise TwitterError("Must by multiple media elements")
if media.__len__() > 4:
raise TwitterError("Maximum of 4 media elements can be allocated to a tweet")
url = '%s/media/upload.json' % self.upload_url
if isinstance(status, str) or self._input_encoding is None:
u_status = status
else:
u_status = str(status, self._input_encoding)
media_ids = ''
for m in range(0, len(media)):
data = {}
if not hasattr(media[m], 'read'):
if media[m].startswith('http'):
data['media'] = urlopen(media[m]).read()
else:
data['media'] = open(str(media[m]), 'rb').read()
else:
data['media'] = media[m].read()
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
media_ids += str(data['media_id_string'])
if m is not len(media) - 1:
media_ids += ","
data = {'status': u_status, 'media_ids': media_ids}
url = '%s/statuses/update.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def _TweetTextWrap(self,
status,
char_lim=CHARACTER_LIMIT):
if not self._config:
self.GetHelpConfiguration()
tweets = []
line = []
line_length = 0
words = re.split(r'\s', status)
if len(words) == 1 and not is_url(words[0]):
if len(words[0]) > CHARACTER_LIMIT:
raise TwitterError({"message": "Unable to split status into tweetable parts. Word was: {0}/{1}".format(len(words[0]), char_lim)})
else:
tweets.append(words[0])
return tweets
for word in words:
if len(word) > char_lim:
raise TwitterError({"message": "Unable to split status into tweetable parts. Word was: {0}/{1}".format(len(word), char_lim)})
new_len = line_length
if is_url(word):
new_len = line_length + self._config['short_url_length_https'] + 1
else:
new_len += len(word) + 1
if new_len > CHARACTER_LIMIT:
tweets.append(' '.join(line))
line = [word]
line_length = new_len - line_length
else:
line.append(word)
line_length = new_len
tweets.append(' '.join(line))
return tweets
def PostUpdates(self,
status,
continuation=None,
**kwargs):
"""Post one or more twitter status messages from the authenticated user.
Unlike api.PostUpdate, this method will post multiple status updates
if the message is longer than CHARACTER_LIMIT characters.
Args:
status:
The message text to be posted.
May be longer than CHARACTER_LIMIT characters.
continuation:
The character string, if any, to be appended to all but the
last message. Note that Twitter strips trailing '...' strings
from messages. Consider using the unicode \u2026 character
(horizontal ellipsis) instead. [Defaults to None]
**kwargs:
See api.PostUpdate for a list of accepted parameters.
Returns:
A of list twitter.Status instance representing the messages posted.
"""
results = list()
if continuation is None:
continuation = ''
char_limit = CHARACTER_LIMIT - len(continuation)
tweets = self._TweetTextWrap(status=status, char_lim=char_limit)
if len(tweets) == 1:
results.append(self.PostUpdate(status=tweets[0], **kwargs))
return results
for tweet in tweets[0:-1]:
results.append(self.PostUpdate(status=tweet + continuation, **kwargs))
results.append(self.PostUpdate(status=tweets[-1], **kwargs))
return results
def PostRetweet(self, status_id, trim_user=False):
"""Retweet a tweet with the Retweet API.
Args:
status_id:
The numerical id of the tweet that will be retweeted
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A twitter.Status instance representing the original tweet with retweet details embedded.
"""
try:
if int(status_id) <= 0:
raise TwitterError({'message': "'status_id' must be a positive number"})
except ValueError:
raise TwitterError({'message': "'status_id' must be an integer"})
url = '%s/statuses/retweet/%s.json' % (self.base_url, status_id)
data = {'id': status_id}
if trim_user:
data['trim_user'] = 'true'
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def GetUserRetweets(self,
count=None,
since_id=None,
max_id=None,
trim_user=False):
"""Fetch the sequence of retweets made by the authenticated user.
Args:
count:
The number of status messages to retrieve. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each message up to count
"""
return self.GetUserTimeline(since_id=since_id, count=count, max_id=max_id, trim_user=trim_user,
exclude_replies=True, include_rts=True)
def GetReplies(self,
since_id=None,
count=None,
max_id=None,
trim_user=False):
"""Get a sequence of status messages representing the 20 most
recent replies (status updates prefixed with @twitterID) to the
authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each reply to the user.
"""
return self.GetUserTimeline(since_id=since_id, count=count, max_id=max_id, trim_user=trim_user,
exclude_replies=False, include_rts=False)
def GetRetweets(self,
statusid,
count=None,
trim_user=False):
"""Returns up to 100 of the first retweets of the tweet identified
by statusid
Args:
statusid (int):
The ID of the tweet for which retweets should be searched for
count (int, optional):
The number of status messages to retrieve.
trim_user (bool, optional):
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
Returns:
A list of twitter.Status instances, which are retweets of statusid
"""
url = '%s/statuses/retweets/%s.json' % (self.base_url, statusid)
parameters = {
'trim_user': enf_type('trim_user', bool, trim_user),
}
if count:
parameters['count'] = enf_type('count', int, count)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(s) for s in data]
def GetRetweeters(self,
status_id,
cursor=None,
count=100,
stringify_ids=False):
"""Returns a collection of up to 100 user IDs belonging to users who have
retweeted the tweet specified by the status_id parameter.
Args:
status_id:
the tweet's numerical ID
cursor:
breaks the ids into pages of no more than 100.
stringify_ids:
returns the IDs as unicode strings. [Optional]
Returns:
A list of user IDs
"""
url = '%s/statuses/retweeters/ids.json' % (self.base_url)
parameters = {
'id': enf_type('id', int, status_id),
'stringify_ids': enf_type('stringify_ids', bool, stringify_ids)
}
result = []
total_count = 0
while True:
if cursor:
try:
parameters['cursor'] = int(cursor)
except ValueError:
raise TwitterError({'message': "cursor must be an integer"})
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
total_count -= len(data['ids'])
if total_count < 1:
break
else:
break
return result
def GetRetweetsOfMe(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
include_entities=True,
include_user_entities=True):
"""Returns up to 100 of the most recent tweets of the user that have been
retweeted by others.
Args:
count:
The number of retweets to retrieve, up to 100.
Defaults to 20. [Optional]
since_id:
Returns results with an ID greater than
(newer than) this ID. [Optional]
max_id:
Returns results with an ID less than or equal
to this ID. [Optional]
trim_user:
When True, the user object for each tweet will
only be an ID. [Optional]
include_entities:
When True, the tweet entities will be included. [Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
"""
url = '%s/statuses/retweets_of_me.json' % self.base_url
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError({'message': "'count' may not be greater than 100"})
except ValueError:
raise TwitterError({'message': "'count' must be an integer"})
if count:
parameters['count'] = count
if since_id:
parameters['since_id'] = since_id
if max_id:
parameters['max_id'] = max_id
if trim_user:
parameters['trim_user'] = trim_user
if not include_entities:
parameters['include_entities'] = include_entities
if not include_user_entities:
parameters['include_user_entities'] = include_user_entities
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(s) for s in data]
def _GetBlocksMutesPaged(self,
endpoint,
action,
cursor=-1,
skip_status=False,
include_entities=False,
stringify_ids=False):
""" Fetch a page of the users (as twitter.User instances)
blocked or muted by the currently authenticated user.
Args:
endpoint (str):
Either "mute" or "block".
action (str):
Either 'list' or 'ids' depending if you want to return fully-hydrated
twitter.User objects or a list of user IDs as ints.
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of users that you want to return.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
next_cursor, previous_cursor, list of twitter.User instances,
one for each user.
"""
urls = {
'mute': {
'list': '%s/mutes/users/list.json' % self.base_url,
'ids': '%s/mutes/users/ids.json' % self.base_url
},
'block': {
'list': '%s/blocks/list.json' % self.base_url,
'ids': '%s/blocks/ids.json' % self.base_url
}
}
url = urls[endpoint][action]
result = []
parameters = {}
if skip_status:
parameters['skip_status'] = True
if include_entities:
parameters['include_entities'] = True
parameters['cursor'] = cursor
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if action == 'ids':
result += data.get('ids')
else:
result += [User.NewFromJsonDict(x) for x in data['users']]
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
return next_cursor, previous_cursor, result
def GetBlocks(self,
skip_status=False,
include_entities=False):
""" Fetch the sequence of all users (as twitter.User instances),
blocked by the currently authenticated user.
Args:
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
A list of twitter.User instances, one for each blocked user.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, users = self.GetBlocksPaged(
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
result += users
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetBlocksPaged(self,
cursor=-1,
skip_status=False,
include_entities=False):
""" Fetch a page of the users (as twitter.User instances)
blocked by the currently authenticated user.
Args:
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of blocked users that you want to return.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
next_cursor, previous_cursor, list of twitter.User instances,
one for each blocked user.
"""
return self._GetBlocksMutesPaged(endpoint='block',
action='list',
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
def GetBlocksIDs(self,
stringify_ids=False):
"""Fetch the sequence of all user IDs blocked by the
currently authenticated user.
Args:
stringify_ids (bool, optional):
If True user IDs will be returned as strings rather than integers.
Returns:
A list of user IDs for all blocked users.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, user_ids = self.GetBlocksIDsPaged(
cursor=cursor,
stringify_ids=stringify_ids)
result += user_ids
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetBlocksIDsPaged(self,
cursor=-1,
stringify_ids=False):
""" Fetch a page of the user IDs blocked by the currently
authenticated user.
Args:
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of blocked users that you want to return.
stringify_ids (bool, optional):
If True user IDs will be returned as strings rather than integers.
Returns:
next_cursor, previous_cursor, list of user IDs of blocked users.
"""
return self._GetBlocksMutesPaged(endpoint='block',
action='ids',
cursor=cursor,
stringify_ids=False)
def GetMutes(self,
skip_status=False,
include_entities=False):
""" Fetch the sequence of all users (as twitter.User instances),
muted by the currently authenticated user.
Args:
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
A list of twitter.User instances, one for each muted user.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, users = self.GetMutesPaged(
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
result += users
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetMutesPaged(self,
cursor=-1,
skip_status=False,
include_entities=False):
""" Fetch a page of the users (as twitter.User instances)
muted by the currently authenticated user.
Args:
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of muted users that you want to return.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
next_cursor, previous_cursor, list of twitter.User instances,
one for each muted user.
"""
return self._GetBlocksMutesPaged(endpoint='mute',
action='list',
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
def GetMutesIDs(self,
stringify_ids=False):
"""Fetch the sequence of all user IDs muted by the
currently authenticated user.
Args:
stringify_ids (bool, optional):
If True user IDs will be returned as strings rather than integers.
Returns:
A list of user IDs for all muted users.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, user_ids = self.GetMutesIDsPaged(
cursor=cursor,
stringify_ids=stringify_ids)
result += user_ids
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetMutesIDsPaged(self,
cursor=-1,
stringify_ids=False):
""" Fetch a page of the user IDs muted by the currently
authenticated user.
Args:
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of muted users that you want to return.
stringify_ids (bool, optional):
If True user IDs will be returned as strings rather than integers.
Returns:
next_cursor, previous_cursor, list of user IDs of muted users.
"""
return self._GetBlocksMutesPaged(endpoint='mute',
action='ids',
cursor=cursor,
stringify_ids=stringify_ids)
def _BlockMute(self,
action,
endpoint,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Create or destroy a block or mute on behalf of the authenticated user.
Args:
action (str):
Either 'create' or 'destroy'.
endpoint (str):
Either 'block' or 'mute'.
user_id (int, optional)
The numerical ID of the user to block/mute.
screen_name (str, optional):
The screen name of the user to block/mute.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the blocked User's statuses will not be included
with the returned User object.
Returns:
twitter.User: twitter.User object representing the blocked/muted user.
"""
urls = {
'block': {
'create': '%s/blocks/create.json' % (self.base_url),
'destroy': '%s/blocks/destroy.json' % (self.base_url),
},
'mute': {
'create': '%s/mutes/users/create.json' % (self.base_url),
'destroy': '%s/mutes/users/destroy.json' % (self.base_url)
}
}
url = urls[endpoint][action]
post_data = {}
if user_id:
post_data['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
post_data['screen_name'] = screen_name
else:
raise TwitterError("You must specify either a user_id or screen_name")
if include_entities:
post_data['include_entities'] = enf_type('include_entities', bool, include_entities)
if skip_status:
post_data['skip_status'] = enf_type('skip_status', bool, skip_status)
resp = self._RequestUrl(url, 'POST', data=post_data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def CreateBlock(self,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Blocks the user specified by either user_id or screen_name.
Args:
user_id (int, optional)
The numerical ID of the user to block.
screen_name (str, optional):
The screen name of the user to block.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the blocked User's statuses will not be included
with the returned User object.
Returns:
A twitter.User instance representing the blocked user.
"""
return self._BlockMute(action='create',
endpoint='block',
user_id=user_id,
screen_name=screen_name,
include_entities=include_entities,
skip_status=skip_status)
def DestroyBlock(self,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Unlocks the user specified by either user_id or screen_name.
Args:
user_id (int, optional)
The numerical ID of the user to block.
screen_name (str, optional):
The screen name of the user to block.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the blocked User's statuses will not be included
with the returned User object.
Returns:
A twitter.User instance representing the blocked user.
"""
return self._BlockMute(action='destroy',
endpoint='block',
user_id=user_id,
screen_name=screen_name,
include_entities=include_entities,
skip_status=skip_status)
def CreateMute(self,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Mutes the user specified by either user_id or screen_name.
Args:
user_id (int, optional)
The numerical ID of the user to mute.
screen_name (str, optional):
The screen name of the user to mute.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the muted User's statuses will not be included
with the returned User object.
Returns:
A twitter.User instance representing the muted user.
"""
return self._BlockMute(action='create',
endpoint='mute',
user_id=user_id,
screen_name=screen_name,
include_entities=include_entities,
skip_status=skip_status)
def DestroyMute(self,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Unlocks the user specified by either user_id or screen_name.
Args:
user_id (int, optional)
The numerical ID of the user to mute.
screen_name (str, optional):
The screen name of the user to mute.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the muted User's statuses will not be included
with the returned User object.
Returns:
A twitter.User instance representing the muted user.
"""
return self._BlockMute(action='destroy',
endpoint='mute',
user_id=user_id,
screen_name=screen_name,
include_entities=include_entities,
skip_status=skip_status)
def _GetIDsPaged(self,
url,
user_id,
screen_name,
cursor,
stringify_ids,
count):
"""
This is the lowest level paging logic for fetching IDs. It is used
solely by GetFollowerIDsPaged and GetFriendIDsPaged. It is not intended
for other use.
See GetFollowerIDsPaged or GetFriendIDsPaged for an explanation of the
input arguments.
"""
result = []
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if count is not None:
parameters['count'] = count
parameters['stringify_ids'] = stringify_ids
parameters['cursor'] = cursor
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if 'ids' in data:
result.extend([x for x in data['ids']])
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
return next_cursor, previous_cursor, result
def GetFollowerIDsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
stringify_ids=False,
count=5000):
"""Make a cursor driven call to return a list of one page followers.
The caller is responsible for handling the cursor value and looping
to gather all of the data
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
stringify_ids:
if True then twitter will return the ids as strings instead of
integers. [Optional]
count:
The number of user id's to retrieve per API request. Please be aware
that this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of user ids,
one for each follower
"""
url = '%s/followers/ids.json' % self.base_url
return self._GetIDsPaged(url=url,
user_id=user_id,
screen_name=screen_name,
cursor=cursor,
stringify_ids=stringify_ids,
count=count)
def GetFriendIDsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
stringify_ids=False,
count=5000):
"""Make a cursor driven call to return the list of all friends
The caller is responsible for handling the cursor value and looping
to gather all of the data
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
stringify_ids:
if True then twitter will return the ids as strings instead of
integers. [Optional]
count:
The number of user id's to retrieve per API request. Please be aware
that this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User instances,
one for each friend
"""
url = '%s/friends/ids.json' % self.base_url
return self._GetIDsPaged(url,
user_id,
screen_name,
cursor,
stringify_ids,
count)
def _GetFriendFollowerIDs(self,
url=None,
user_id=None,
screen_name=None,
cursor=None,
count=None,
stringify_ids=False,
total_count=None):
""" Common method for GetFriendIDs and GetFollowerIDs """
count = 5000
cursor = -1
result = []
if total_count:
total_count = enf_type('total_count', int, total_count)
if total_count and total_count < count:
count = total_count
while True:
if total_count is not None and len(result) + count > total_count:
break
next_cursor, previous_cursor, data = self._GetIDsPaged(
url=url,
user_id=user_id,
screen_name=screen_name,
cursor=cursor,
stringify_ids=stringify_ids,
count=count)
result.extend([x for x in data])
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetFollowerIDs(self,
user_id=None,
screen_name=None,
cursor=None,
stringify_ids=False,
count=None,
total_count=None):
"""Returns a list of twitter user id's for every person
that is following the specified user.
Args:
user_id:
The id of the user to retrieve the id list for. [Optional]
screen_name:
The screen_name of the user to retrieve the id list for. [Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits. [Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of
integers. [Optional]
count:
The number of user id's to retrieve per API request. Please be aware
that this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
total_count:
The total amount of UIDs to retrieve. Good if the account has many
followers and you don't want to get rate limited. The data returned
might contain more UIDs if total_count is not a multiple of count
(5000 by default). [Optional]
Returns:
A list of integers, one for each user id.
"""
url = '%s/followers/ids.json' % self.base_url
return self._GetFriendFollowerIDs(url=url,
user_id=user_id,
screen_name=screen_name,
cursor=cursor,
stringify_ids=stringify_ids,
count=count,
total_count=total_count)
def GetFriendIDs(self,
user_id=None,
screen_name=None,
cursor=None,
count=None,
stringify_ids=False,
total_count=None):
""" Fetch a sequence of user ids, one for each friend.
Returns a list of all the given user's friends' IDs. If no user_id or
screen_name is given, the friends will be those of the authenticated
user.
Args:
user_id:
The id of the user to retrieve the id list for. [Optional]
screen_name:
The screen_name of the user to retrieve the id list for. [Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits. [Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of integers.
[Optional]
count:
The number of user id's to retrieve per API request. Please be aware that
this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
total_count:
The total amount of UIDs to retrieve. Good if the account has many followers
and you don't want to get rate limited. The data returned might contain more
UIDs if total_count is not a multiple of count (5000 by default). [Optional]
Returns:
A list of integers, one for each user id.
"""
url = '%s/friends/ids.json' % self.base_url
return self._GetFriendFollowerIDs(url,
user_id,
screen_name,
cursor,
count,
stringify_ids,
total_count)
def _GetFriendsFollowersPaged(self,
url=None,
user_id=None,
screen_name=None,
cursor=-1,
count=200,
skip_status=False,
include_user_entities=True):
"""Make a cursor driven call to return the list of 1 page of friends
or followers.
Args:
url:
Endpoint from which to get data. Either
base_url+'/followers/list.json' or base_url+'/friends/list.json'.
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User
instances, one for each follower
"""
if user_id and screen_name:
warnings.warn(
"If both user_id and screen_name are specified, Twitter will "
"return the followers of the user specified by screen_name, "
"however this behavior is undocumented by Twitter and might "
"change without warning.", stacklevel=2)
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
parameters['skip_status'] = skip_status
parameters['include_user_entities'] = include_user_entities
parameters['cursor'] = cursor
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if 'users' in data:
users = [User.NewFromJsonDict(user) for user in data['users']]
else:
users = []
if 'next_cursor' in data:
next_cursor = data['next_cursor']
else:
next_cursor = 0
if 'previous_cursor' in data:
previous_cursor = data['previous_cursor']
else:
previous_cursor = 0
return next_cursor, previous_cursor, users
def GetFollowersPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=200,
skip_status=False,
include_user_entities=True):
"""Make a cursor driven call to return the list of all followers
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User
instances, one for each follower
"""
url = '%s/followers/list.json' % self.base_url
return self._GetFriendsFollowersPaged(url,
user_id,
screen_name,
cursor,
count,
skip_status,
include_user_entities)
def GetFriendsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=200,
skip_status=False,
include_user_entities=True):
"""Make a cursor driven call to return the list of all friends.
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a current maximum of
200. Defaults to 200. [Optional]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User
instances, one for each follower
"""
url = '%s/friends/list.json' % self.base_url
return self._GetFriendsFollowersPaged(url,
user_id,
screen_name,
cursor,
count,
skip_status,
include_user_entities)
def _GetFriendsFollowers(self,
url=None,
user_id=None,
screen_name=None,
cursor=None,
count=None,
total_count=None,
skip_status=False,
include_user_entities=True):
""" Fetch the sequence of twitter.User instances, one for each friend
or follower.
Args:
url:
URL to get. Either base_url + ('/followers/list.json' or
'/friends/list.json').
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
total_count:
The upper bound of number of users to return, defaults to None.
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A sequence of twitter.User instances, one for each friend or follower
"""
if cursor is not None or count is not None:
warnings.warn(
"Use of 'cursor' and 'count' parameters are deprecated as of "
"python-twitter 3.0. Please use GetFriendsPaged or "
"GetFollowersPaged instead.",
PythonTwitterDeprecationWarning330)
count = 200
cursor = -1
result = []
if total_count:
try:
total_count = int(total_count)
except ValueError:
raise TwitterError({'message': "total_count must be an integer"})
if total_count <= 200:
count = total_count
while True:
if total_count is not None and len(result) + count > total_count:
break
next_cursor, previous_cursor, data = self._GetFriendsFollowersPaged(
url,
user_id,
screen_name,
cursor,
count,
skip_status,
include_user_entities)
if next_cursor:
cursor = next_cursor
result.extend(data)
if next_cursor == 0 or next_cursor == previous_cursor:
break
return result
def GetFollowers(self,
user_id=None,
screen_name=None,
cursor=None,
count=None,
total_count=None,
skip_status=False,
include_user_entities=True):
"""Fetch the sequence of twitter.User instances, one for each follower.
If both user_id and screen_name are specified, this call will return
the followers of the user specified by screen_name, however this
behavior is undocumented by Twitter and may change without warning.
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
total_count:
The upper bound of number of users to return, defaults to None.
skip_status:
If True the statuses will not be returned in the user items. [Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A sequence of twitter.User instances, one for each follower
"""
url = '%s/followers/list.json' % self.base_url
return self._GetFriendsFollowers(url,
user_id,
screen_name,
cursor,
count,
total_count,
skip_status,
include_user_entities)
def GetFriends(self,
user_id=None,
screen_name=None,
cursor=None,
count=None,
total_count=None,
skip_status=False,
include_user_entities=True):
"""Fetch the sequence of twitter.User instances, one for each friend.
If both user_id and screen_name are specified, this call will return
the followers of the user specified by screen_name, however this
behavior is undocumented by Twitter and may change without warning.
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
total_count:
The upper bound of number of users to return, defaults to None.
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A sequence of twitter.User instances, one for each friend
"""
url = '%s/friends/list.json' % self.base_url
return self._GetFriendsFollowers(url,
user_id,
screen_name,
cursor,
count,
total_count,
skip_status,
include_user_entities)
def UsersLookup(self,
user_id=None,
screen_name=None,
users=None,
include_entities=True,
return_json=False):
"""Fetch extended information for the specified users.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
Args:
user_id (int, list, optional):
A list of user_ids to retrieve extended information.
screen_name (str, optional):
A list of screen_names to retrieve extended information.
users (list, optional):
A list of twitter.User objects to retrieve extended information.
include_entities (bool, optional):
The entities node that may appear within embedded statuses will be
excluded when set to False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A list of twitter.User objects for the requested users
"""
if not any([user_id, screen_name, users]):
raise TwitterError("Specify at least one of user_id, screen_name, or users.")
url = '%s/users/lookup.json' % self.base_url
parameters = {
'include_entities': include_entities
}
uids = list()
if user_id:
uids.extend(user_id)
if users:
uids.extend([u.id for u in users])
if len(uids):
parameters['user_id'] = ','.join([str(u) for u in uids])
if screen_name:
parameters['screen_name'] = ','.join(screen_name)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [User.NewFromJsonDict(u) for u in data]
def GetUser(self,
user_id=None,
screen_name=None,
include_entities=True,
return_json=False):
"""Returns a single user.
Args:
user_id (int, optional):
The id of the user to retrieve.
screen_name (str, optional):
The screen name of the user for whom to return results for.
Either a user_id or screen_name is required for this method.
include_entities (bool, optional):
The entities node will be omitted when set to False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A twitter.User instance representing that user
"""
url = '%s/users/show.json' % (self.base_url)
parameters = {
'include_entities': include_entities
}
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return User.NewFromJsonDict(data)
def GetDirectMessages(self,
since_id=None,
max_id=None,
count=None,
include_entities=True,
skip_status=False,
full_text=False,
page=None,
return_json=False):
"""Returns a list of the direct messages sent to the authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
count:
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied. [Optional]
include_entities:
The entities node will be omitted when set to False.
[Optional]
skip_status:
When set to True statuses will not be included in the returned user
objects. [Optional]
full_text:
When set to True full message will be included in the returned message
object if message length is bigger than CHARACTER_LIMIT characters. [Optional]
page:
If you want more than 200 messages, you can use this and get 20 messages
each time. You must recall it and increment the page value until it
return nothing. You can't use count option with it. First value is 1 and
not 0.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A sequence of twitter.DirectMessage instances
"""
url = '%s/direct_messages.json' % self.base_url
parameters = {}
if since_id:
parameters['since_id'] = since_id
if max_id:
parameters['max_id'] = max_id
if count:
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
if not include_entities:
parameters['include_entities'] = 'false'
if skip_status:
parameters['skip_status'] = 1
if full_text:
parameters['full_text'] = 'true'
if page:
parameters['page'] = page
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [DirectMessage.NewFromJsonDict(x) for x in data]
def GetSentDirectMessages(self,
since_id=None,
max_id=None,
count=None,
page=None,
include_entities=True,
return_json=False):
"""Returns a list of the direct messages sent by the authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
count:
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
include_entities:
The entities node will be omitted when set to False.
[Optional]
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A sequence of twitter.DirectMessage instances
"""
url = '%s/direct_messages/sent.json' % self.base_url
parameters = {}
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
if max_id:
parameters['max_id'] = max_id
if count:
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
if not include_entities:
parameters['include_entities'] = 'false'
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [DirectMessage.NewFromJsonDict(x) for x in data]
def PostDirectMessage(self,
text,
user_id=None,
screen_name=None,
return_json=False):
"""Post a twitter direct message from the authenticated user.
Args:
text: The message text to be posted.
user_id:
The ID of the user who should receive the direct message. [Optional]
screen_name:
The screen name of the user who should receive the direct message. [Optional]
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A twitter.DirectMessage instance representing the message posted
"""
url = '%s/direct_messages/new.json' % self.base_url
data = {'text': text}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError({'message': "Specify at least one of user_id or screen_name."})
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return DirectMessage.NewFromJsonDict(data)
def DestroyDirectMessage(self, message_id, include_entities=True, return_json=False):
"""Destroys the direct message specified in the required ID parameter.
The twitter.Api instance must be authenticated, and the
authenticating user must be the recipient of the specified direct
message.
Args:
message_id:
The id of the direct message to be destroyed
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A twitter.DirectMessage instance representing the message destroyed
"""
url = '%s/direct_messages/destroy.json' % self.base_url
data = {
'id': enf_type('message_id', int, message_id),
'include_entities': enf_type('include_entities', bool, include_entities)
}
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return DirectMessage.NewFromJsonDict(data)
def CreateFriendship(self, user_id=None, screen_name=None, follow=True):
"""Befriends the user specified by the user_id or screen_name.
Args:
user_id:
A user_id to follow [Optional]
screen_name:
A screen_name to follow [Optional]
follow:
Set to False to disable notifications for the target user
Returns:
A twitter.User instance representing the befriended user.
"""
return self._AddOrEditFriendship(user_id=user_id, screen_name=screen_name, follow=follow)
def _AddOrEditFriendship(self, user_id=None, screen_name=None, uri_end='create', follow_key='follow', follow=True):
"""
Shared method for Create/Update Friendship.
"""
url = '%s/friendships/%s.json' % (self.base_url, uri_end)
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError({'message': "Specify at least one of user_id or screen_name."})
follow_json = json.dumps(follow)
data['{}'.format(follow_key)] = follow_json
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def UpdateFriendship(self, user_id=None, screen_name=None, follow=True, **kwargs): # api compat with Create
"""Updates a friendship with the user specified by the user_id or screen_name.
Args:
user_id:
A user_id to update [Optional]
screen_name:
A screen_name to update [Optional]
follow:
Set to False to disable notifications for the target user
device:
Set to False to disable notifications for the target user
Returns:
A twitter.User instance representing the befriended user.
"""
follow = kwargs.get('device', follow)
return self._AddOrEditFriendship(user_id=user_id, screen_name=screen_name, follow=follow, follow_key='device',
uri_end='update')
def DestroyFriendship(self, user_id=None, screen_name=None):
"""Discontinues friendship with a user_id or screen_name.
Args:
user_id:
A user_id to unfollow [Optional]
screen_name:
A screen_name to unfollow [Optional]
Returns:
A twitter.User instance representing the discontinued friend.
"""
url = '%s/friendships/destroy.json' % self.base_url
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError({'message': "Specify at least one of user_id or screen_name."})
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def ShowFriendship(self,
source_user_id=None,
source_screen_name=None,
target_user_id=None,
target_screen_name=None):
"""Returns information about the relationship between the two users.
Args:
source_id:
The user_id of the subject user [Optional]
source_screen_name:
The screen_name of the subject user [Optional]
target_id:
The user_id of the target user [Optional]
target_screen_name:
The screen_name of the target user [Optional]
Returns:
A Twitter Json structure.
"""
url = '%s/friendships/show.json' % self.base_url
data = {}
if source_user_id:
data['source_id'] = source_user_id
elif source_screen_name:
data['source_screen_name'] = source_screen_name
else:
raise TwitterError({'message': "Specify at least one of source_user_id or source_screen_name."})
if target_user_id:
data['target_id'] = target_user_id
elif target_screen_name:
data['target_screen_name'] = target_screen_name
else:
raise TwitterError({'message': "Specify at least one of target_user_id or target_screen_name."})
resp = self._RequestUrl(url, 'GET', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return data
def LookupFriendship(self,
user_id=None,
screen_name=None,
return_json=False):
"""Lookup friendship status for user to authed user.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
Up to 100 users may be specified.
Args:
user_id (int, User, or list of ints or Users, optional):
A list of user_ids to retrieve extended information.
screen_name (string, User, or list of strings or Users, optional):
A list of screen_names to retrieve extended information.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
list: A list of twitter.UserStatus instance representing the
friendship status between the specified users and the authenticated
user.
"""
url = '%s/friendships/lookup.json' % (self.base_url)
parameters = {}
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
uids = list()
for user in user_id:
if isinstance(user, User):
uids.append(user.id)
else:
uids.append(enf_type('user_id', int, user))
parameters['user_id'] = ",".join([str(uid) for uid in uids])
else:
if isinstance(user_id, User):
parameters['user_id'] = user_id.id
else:
parameters['user_id'] = enf_type('user_id', int, user_id)
if screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
sn_list = list()
for user in screen_name:
if isinstance(user, User):
sn_list.append(user.screen_name)
else:
sn_list.append(enf_type('screen_name', str, user))
parameters['screen_name'] = ','.join(sn_list)
else:
if isinstance(screen_name, User):
parameters['screen_name'] = screen_name.screen_name
else:
parameters['screen_name'] = enf_type('screen_name', str, screen_name)
if not user_id and not screen_name:
raise TwitterError(
"Specify at least one of user_id or screen_name.")
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [UserStatus.NewFromJsonDict(x) for x in data]
def IncomingFriendship(self,
cursor=None,
stringify_ids=None):
"""Returns a collection of user IDs belonging to users who have
pending request to follow the authenticated user.
Args:
cursor:
breaks the ids into pages of no more than 5000.
stringify_ids:
returns the IDs as unicode strings. [Optional]
Returns:
A list of user IDs
"""
url = '%s/friendships/incoming.json' % (self.base_url)
parameters = {}
if stringify_ids:
parameters['stringify_ids'] = 'true'
result = []
total_count = 0
while True:
if cursor:
try:
parameters['count'] = int(cursor)
except ValueError:
raise TwitterError({'message': "cursor must be an integer"})
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
total_count -= len(data['ids'])
if total_count < 1:
break
else:
break
return result
def OutgoingFriendship(self,
cursor=None,
stringify_ids=None):
"""Returns a collection of user IDs for every protected user
for whom the authenticated user has a pending follow request.
Args:
cursor:
breaks the ids into pages of no more than 5000.
stringify_ids:
returns the IDs as unicode strings. [Optional]
Returns:
A list of user IDs
"""
url = '%s/friendships/outgoing.json' % (self.base_url)
parameters = {}
if stringify_ids:
parameters['stringify_ids'] = 'true'
result = []
total_count = 0
while True:
if cursor:
try:
parameters['count'] = int(cursor)
except ValueError:
raise TwitterError({'message': "cursor must be an integer"})
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
total_count -= len(data['ids'])
if total_count < 1:
break
else:
break
return result
def CreateFavorite(self,
status=None,
status_id=None,
include_entities=True):
"""Favorites the specified status object or id as the authenticating user.
Returns the favorite status when successful.
Args:
status_id (int, optional):
The id of the twitter status to mark as a favorite.
status (twitter.Status, optional):
The twitter.Status object to mark as a favorite.
include_entities (bool, optional):
The entities node will be omitted when set to False.
Returns:
A twitter.Status instance representing the newly-marked favorite.
"""
url = '%s/favorites/create.json' % self.base_url
data = {}
if status_id:
data['id'] = status_id
elif status:
data['id'] = status.id
else:
raise TwitterError({'message': "Specify status_id or status"})
data['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def DestroyFavorite(self,
status=None,
status_id=None,
include_entities=True):
"""Un-Favorites the specified status object or id as the authenticating user.
Returns the un-favorited status when successful.
Args:
status_id (int, optional):
The id of the twitter status to mark as a favorite.
status (twitter.Status, optional):
The twitter.Status object to mark as a favorite.
include_entities (bool, optional):
The entities node will be omitted when set to False.
Returns:
A twitter.Status instance representing the newly-unmarked favorite.
"""
url = '%s/favorites/destroy.json' % self.base_url
data = {}
if status_id:
data['id'] = status_id
elif status:
data['id'] = status.id
else:
raise TwitterError({'message': "Specify status_id or status"})
data['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def GetFavorites(self,
user_id=None,
screen_name=None,
count=None,
since_id=None,
max_id=None,
include_entities=True,
return_json=False):
"""Return a list of Status objects representing favorited tweets.
Returns up to 200 most recent tweets for the authenticated user.
Args:
user_id (int, optional):
Specifies the ID of the user for whom to return the
favorites. Helpful for disambiguating when a valid user ID
is also a valid screen name.
screen_name (str, optional):
Specifies the screen name of the user for whom to return the
favorites. Helpful for disambiguating when a valid screen
name is also a user ID.
since_id (int, optional):
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID.
count (int, optional):
Specifies the number of statuses to retrieve. May not be
greater than 200.
include_entities (bool, optional):
The entities node will be omitted when set to False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A sequence of Status instances, one for each favorited tweet up to count
"""
parameters = {}
url = '%s/favorites/list.json' % self.base_url
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if count:
parameters['count'] = enf_type('count', int, count)
parameters['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [Status.NewFromJsonDict(x) for x in data]
def GetMentions(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
contributor_details=False,
include_entities=True,
return_json=False):
"""Returns the 20 most recent mentions (status containing @screen_name)
for the authenticating user.
Args:
count:
Specifies the number of tweets to try and retrieve, up to a maximum of
200. The value of count is best thought of as a limit to the number of
tweets to return because suspended or deleted content is removed after
the count has been applied. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than
(that is, older than) the specified ID. [Optional]
trim_user:
When set to True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object. [Optional]
contributor_details:
If set to True, this parameter enhances the contributors element of the
status response to include the screen_name of the contributor. By
default only the user_id of the contributor is included. [Optional]
include_entities:
The entities node will be disincluded when set to False. [Optional]
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A sequence of twitter.Status instances, one for each mention of the user.
"""
url = '%s/statuses/mentions_timeline.json' % self.base_url
parameters = {}
if count:
parameters['count'] = enf_type('count', int, count)
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if trim_user:
parameters['trim_user'] = 1
if contributor_details:
parameters['contributor_details'] = 'true'
if not include_entities:
parameters['include_entities'] = 'false'
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [Status.NewFromJsonDict(x) for x in data]
@staticmethod
def _IDList(list_id, slug, owner_id, owner_screen_name):
parameters = {}
if list_id is not None:
parameters['list_id'] = enf_type('list_id', int, list_id)
elif slug is not None:
parameters['slug'] = slug
if owner_id is not None:
parameters['owner_id'] = enf_type('owner_id', int, owner_id)
elif owner_screen_name is not None:
parameters['owner_screen_name'] = owner_screen_name
else:
raise TwitterError({'message': (
'If specifying a list by slug, an owner_id or '
'owner_screen_name must also be given.')})
else:
raise TwitterError({'message': (
'Either list_id or slug and one of owner_id and '
'owner_screen_name must be passed.')})
return parameters
def CreateList(self, name, mode=None, description=None):
"""Creates a new list with the give name for the authenticated user.
Args:
name (str):
New name for the list
mode (str, optional):
'public' or 'private'. Defaults to 'public'.
description (str, optional):
Description of the list.
Returns:
twitter.list.List: A twitter.List instance representing the new list
"""
url = '%s/lists/create.json' % self.base_url
parameters = {'name': name}
if mode is not None:
parameters['mode'] = mode
if description is not None:
parameters['description'] = description
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def DestroyList(self,
owner_screen_name=None,
owner_id=None,
list_id=None,
slug=None):
"""Destroys the list identified by list_id or slug and one of
owner_screen_name or owner_id.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
Returns:
twitter.list.List: A twitter.List instance representing the
removed list.
"""
url = '%s/lists/destroy.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def CreateSubscription(self,
owner_screen_name=None,
owner_id=None,
list_id=None,
slug=None):
"""Creates a subscription to a list by the authenticated user.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
Returns:
twitter.user.User: A twitter.User instance representing the user subscribed
"""
url = '%s/lists/subscribers/create.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def DestroySubscription(self,
owner_screen_name=None,
owner_id=None,
list_id=None,
slug=None):
"""Destroys the subscription to a list for the authenticated user.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify the
list owner using the owner_id or owner_screen_name parameters.
Returns:
twitter.list.List: A twitter.List instance representing
the removed list.
"""
url = '%s/lists/subscribers/destroy.json' % (self.base_url)
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def ShowSubscription(self,
owner_screen_name=None,
owner_id=None,
list_id=None,
slug=None,
user_id=None,
screen_name=None,
include_entities=False,
skip_status=False,
return_json=False):
"""Check if the specified user is a subscriber of the specified list.
Returns the user if they are subscriber.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical ID of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical ID.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
user_id (int, optional):
The user_id or a list of user_id's to add to the list.
If not given, then screen_name is required.
screen_name (str, optional):
The screen_name or a list of screen_name's to add to the list.
If not given, then user_id is required.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
twitter.user.User: A twitter.User instance representing the user
requested.
"""
url = '%s/lists/subscribers/show.json' % (self.base_url)
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if skip_status:
parameters['skip_status'] = True
if include_entities:
parameters['include_entities'] = True
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return User.NewFromJsonDict(data)
def GetSubscriptions(self,
user_id=None,
screen_name=None,
count=20,
cursor=-1,
return_json=False):
"""Obtain a collection of the lists the specified user is
subscribed to. If neither user_id or screen_name is specified, the
data returned will be for the authenticated user.
The list will contain a maximum of 20 lists per page by default.
Does not include the user's own lists.
Args:
user_id (int, optional):
The ID of the user for whom to return results for.
screen_name (str, optional):
The screen name of the user for whom to return results for.
count (int, optional):
The amount of results to return per page.
No more than 1000 results will ever be returned in a single
page. Defaults to 20.
cursor (int, optional):
The "page" value that Twitter will use to start building the
list sequence from. Use the value of -1 to start at the
beginning. Twitter will return in the result the values for
next_cursor and previous_cursor.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
twitter.list.List: A sequence of twitter.List instances,
one for each list
"""
url = '%s/lists/subscriptions.json' % (self.base_url)
parameters = {}
parameters['cursor'] = enf_type('cursor', int, cursor)
parameters['count'] = enf_type('count', int, count)
if user_id is not None:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name is not None:
parameters['screen_name'] = screen_name
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetMemberships(self,
user_id=None,
screen_name=None,
count=20,
cursor=-1,
filter_to_owned_lists=False,
return_json=False):
"""Obtain the lists the specified user is a member of. If no user_id or
screen_name is specified, the data returned will be for the
authenticated user.
Returns a maximum of 20 lists per page by default.
Args:
user_id (int, optional):
The ID of the user for whom to return results for.
screen_name (str, optional):
The screen name of the user for whom to return
results for.
count (int, optional):
The amount of results to return per page.
No more than 1000 results will ever be returned in a single page.
Defaults to 20.
cursor (int, optional):
The "page" value that Twitter will use to start building the list
sequence from. Use the value of -1 to start at the beginning.
Twitter will return in the result the values for next_cursor and
previous_cursor.
filter_to_owned_lists (bool, optional):
Set to True to return only the lists the authenticating user
owns, and the user specified by user_id or screen_name is a
member of. Default value is False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
list: A list of twitter.List instances, one for each list in which
the user specified by user_id or screen_name is a member
"""
url = '%s/lists/memberships.json' % (self.base_url)
parameters = {}
if cursor is not None:
parameters['cursor'] = enf_type('cursor', int, cursor)
if count is not None:
parameters['count'] = enf_type('count', int, count)
if filter_to_owned_lists:
parameters['filter_to_owned_lists'] = enf_type(
'filter_to_owned_lists', bool, filter_to_owned_lists)
if user_id is not None:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name is not None:
parameters['screen_name'] = screen_name
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetListsList(self,
screen_name=None,
user_id=None,
reverse=False,
return_json=False):
"""Returns all lists the user subscribes to, including their own.
If no user_id or screen_name is specified, the data returned will be
for the authenticated user.
Args:
screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
user_id (int, optional):
Specifies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
reverse (bool, optional):
If False, the owned lists will be returned first, othewise
subscribed lists will be at the top. Returns a maximum of 100
entries regardless. Defaults to False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
list: A sequence of twitter.List instances.
"""
url = '%s/lists/list.json' % (self.base_url)
parameters = {}
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if reverse:
parameters['reverse'] = enf_type('reverse', bool, reverse)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [List.NewFromJsonDict(x) for x in data]
def GetListTimeline(self,
list_id=None,
slug=None,
owner_id=None,
owner_screen_name=None,
since_id=None,
max_id=None,
count=None,
include_rts=True,
include_entities=True,
return_json=False):
"""Fetch the sequence of Status messages for a given List ID.
Args:
list_id (int, optional):
Specifies the ID of the list to retrieve.
slug (str, optional):
The slug name for the list to retrieve. If you specify None for the
list_id, then you have to provide either a owner_screen_name or
owner_id.
owner_id (int, optional):
Specifies the ID of the user for whom to return the
list timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
owner_screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
since_id (int, optional):
Returns results with an ID greater than (that is, more recent than)
the specified ID. There are limits to the number of Tweets which
can be accessed through the API.
If the limit of Tweets has occurred since the since_id, the
since_id will be forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older than) or
equal to the specified ID.
count (int, optional):
Specifies the number of statuses to retrieve.
May not be greater than 200.
include_rts (bool, optional):
If True, the timeline will contain native retweets (if they exist)
in addition to the standard stream of tweets.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
list: A list of twitter.status.Status instances, one for each
message up to count.
"""
url = '%s/lists/statuses.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if count:
parameters['count'] = enf_type('count', int, count)
if not include_rts:
parameters['include_rts'] = enf_type('include_rts', bool, include_rts)
if not include_entities:
parameters['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [Status.NewFromJsonDict(x) for x in data]
def GetListMembersPaged(self,
list_id=None,
slug=None,
owner_id=None,
owner_screen_name=None,
cursor=-1,
count=100,
skip_status=False,
include_entities=True):
"""Fetch the sequence of twitter.User instances, one for each member
of the given list_id or slug.
Args:
list_id (int, optional):
Specifies the ID of the list to retrieve.
slug (str, optional):
The slug name for the list to retrieve. If you specify None for the
list_id, then you have to provide either a owner_screen_name or
owner_id.
owner_id (int, optional):
Specifies the ID of the user for whom to return the
list timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
owner_screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
cursor (int, optional):
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
Returns:
list: A sequence of twitter.user.User instances, one for each
member of the twitter.list.List.
"""
url = '%s/lists/members.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if count:
parameters['count'] = enf_type('count', int, count)
if cursor:
parameters['cursor'] = enf_type('cursor', int, cursor)
parameters['skip_status'] = enf_type('skip_status', bool, skip_status)
parameters['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
users = [User.NewFromJsonDict(user) for user in data.get('users', [])]
return next_cursor, previous_cursor, users
def GetListMembers(self,
list_id=None,
slug=None,
owner_id=None,
owner_screen_name=None,
skip_status=False,
include_entities=False):
"""Fetch the sequence of twitter.User instances, one for each member
of the given list_id or slug.
Args:
list_id (int, optional):
Specifies the ID of the list to retrieve.
slug (str, optional):
The slug name for the list to retrieve. If you specify None for the
list_id, then you have to provide either a owner_screen_name or
owner_id.
owner_id (int, optional):
Specifies the ID of the user for whom to return the
list timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
owner_screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
Returns:
list: A sequence of twitter.user.User instances, one for each
member of the twitter.list.List.
"""
cursor = -1
result = []
while True:
next_cursor, previous_cursor, users = self.GetListMembersPaged(
list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name,
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
result += users
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def CreateListsMember(self,
list_id=None,
slug=None,
user_id=None,
screen_name=None,
owner_screen_name=None,
owner_id=None):
"""Add a new member (or list of members) to the specified list.
Args:
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify the
list owner using the owner_id or owner_screen_name parameters.
user_id (int, optional):
The user_id or a list of user_id's to add to the list.
If not given, then screen_name is required.
screen_name (str, optional):
The screen_name or a list of screen_name's to add to the list.
If not given, then user_id is required.
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested by
a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested by
a slug.
Returns:
twitter.list.List: A twitter.List instance representing the list
subscribed to.
"""
is_list = False
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
is_list = True
uids = [str(enf_type('user_id', int, uid)) for uid in user_id]
parameters['user_id'] = ','.join(uids)
else:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
is_list = True
parameters['screen_name'] = ','.join(screen_name)
else:
parameters['screen_name'] = screen_name
if is_list:
url = '%s/lists/members/create_all.json' % self.base_url
else:
url = '%s/lists/members/create.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def DestroyListsMember(self,
list_id=None,
slug=None,
owner_screen_name=None,
owner_id=None,
user_id=None,
screen_name=None):
"""Destroys the subscription to a list for the authenticated user.
Args:
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested by a
slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested by a slug.
user_id (int, optional):
The user_id or a list of user_id's to remove from the list.
If not given, then screen_name is required.
screen_name (str, optional):
The screen_name or a list of Screen_name's to remove from the list.
If not given, then user_id is required.
Returns:
twitter.list.List: A twitter.List instance representing the
removed list.
"""
is_list = False
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
is_list = True
uids = [str(enf_type('user_id', int, uid)) for uid in user_id]
parameters['user_id'] = ','.join(uids)
else:
parameters['user_id'] = int(user_id)
elif screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
is_list = True
parameters['screen_name'] = ','.join(screen_name)
else:
parameters['screen_name'] = screen_name
if is_list:
url = '%s/lists/members/destroy_all.json' % self.base_url
else:
url = '%s/lists/members/destroy.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def GetListsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=20):
""" Fetch the sequence of lists for a user. If no user_id or
screen_name is passed, the data returned will be for the
authenticated user.
Args:
user_id (int, optional):
The ID of the user for whom to return results for.
screen_name (str, optional):
The screen name of the user for whom to return results
for.
count (int, optional):
The amount of results to return per page. No more than 1000 results
will ever be returned in a single page. Defaults to 20.
cursor (int, optional):
The "page" value that Twitter will use to start building the list
sequence from. Use the value of -1 to start at the beginning.
Twitter will return in the result the values for next_cursor and
previous_cursor.
Returns:
next_cursor (int), previous_cursor (int), list of twitter.List
instances, one for each list
"""
url = '%s/lists/ownerships.json' % self.base_url
parameters = {}
if user_id is not None:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name is not None:
parameters['screen_name'] = screen_name
if count is not None:
parameters['count'] = enf_type('count', int, count)
parameters['cursor'] = enf_type('cursor', int, cursor)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
lists = [List.NewFromJsonDict(x) for x in data.get('lists', [])]
return next_cursor, previous_cursor, lists
def GetLists(self,
user_id=None,
screen_name=None):
"""Fetch the sequence of lists for a user. If no user_id or screen_name
is passed, the data returned will be for the authenticated user.
Args:
user_id:
The ID of the user for whom to return results for. [Optional]
screen_name:
The screen name of the user for whom to return results
for. [Optional]
count:
The amount of results to return per page.
No more than 1000 results will ever be returned in a single page.
Defaults to 20. [Optional]
cursor:
The "page" value that Twitter will use to start building the list
sequence from. Use the value of -1 to start at the beginning.
Twitter will return in the result the values for next_cursor and
previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
"""
result = []
cursor = -1
while True:
next_cursor, prev_cursor, lists = self.GetListsPaged(
user_id=user_id,
screen_name=screen_name,
cursor=cursor)
result += lists
if next_cursor == 0 or next_cursor == prev_cursor:
break
else:
cursor = next_cursor
return result
def UpdateProfile(self,
name=None,
profileURL=None,
location=None,
description=None,
profile_link_color=None,
include_entities=False,
skip_status=False):
"""Update's the authenticated user's profile data.
Args:
name:
Full name associated with the profile.
Maximum of 20 characters. [Optional]
profileURL:
URL associated with the profile.
Will be prepended with "http://" if not present.
Maximum of 100 characters. [Optional]
location:
The city or country describing where the user of the account is located.
The contents are not normalized or geocoded in any way.
Maximum of 30 characters. [Optional]
description:
A description of the user owning the account.
Maximum of 160 characters. [Optional]
profile_link_color:
hex value of profile color theme. formated without '#' or '0x'. Ex: FF00FF
[Optional]
include_entities:
The entities node will be omitted when set to False.
[Optional]
skip_status:
When set to either True, t or 1 then statuses will not be included
in the returned user objects. [Optional]
Returns:
A twitter.User instance representing the modified user.
"""
url = '%s/account/update_profile.json' % (self.base_url)
data = {}
if name:
data['name'] = name
if profileURL:
data['url'] = profileURL
if location:
data['location'] = location
if description:
data['description'] = description
if profile_link_color:
data['profile_link_color'] = profile_link_color
if include_entities:
data['include_entities'] = include_entities
if skip_status:
data['skip_status'] = skip_status
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def UpdateBackgroundImage(self,
image,
tile=False,
include_entities=False,
skip_status=False):
"""Deprecated function. Used to update the background of a User's
Twitter profile. Removed in approx. July, 2015"""
warnings.warn((
"This method has been deprecated by Twitter as of July 2015 and "
"will be removed in future versions of python-twitter."),
PythonTwitterDeprecationWarning330)
url = '%s/account/update_profile_background_image.json' % (self.base_url)
with open(image, 'rb') as image_file:
encoded_image = base64.b64encode(image_file.read())
data = {
'image': encoded_image
}
if tile:
data['tile'] = 1
if include_entities:
data['include_entities'] = 1
if skip_status:
data['skip_status'] = 1
resp = self._RequestUrl(url, 'POST', data=data)
if resp.status_code in [200, 201, 202]:
return True
if resp.status_code == 400:
raise TwitterError({'message': "Image data could not be processed"})
if resp.status_code == 422:
raise TwitterError({'message': "The image could not be resized or is too large."})
def UpdateImage(self,
image,
include_entities=False,
skip_status=False):
"""Update a User's profile image. Change may not be immediately
reflected due to image processing on Twitter's side.
Args:
image (str):
Location of local image file to use.
include_entities (bool, optional):
Include the entities node in the return data.
skip_status (bool, optional):
Include the User's last Status in the User entity returned.
Returns:
(twitter.models.User): Updated User object.
"""
url = '%s/account/update_profile_image.json' % (self.base_url)
with open(image, 'rb') as image_file:
encoded_image = base64.b64encode(image_file.read())
data = {
'image': encoded_image
}
if include_entities:
data['include_entities'] = 1
if skip_status:
data['skip_status'] = 1
resp = self._RequestUrl(url, 'POST', data=data)
if resp.status_code in [200, 201, 202]:
return True
if resp.status_code == 400:
raise TwitterError({'message': "Image data could not be processed"})
if resp.status_code == 422:
raise TwitterError({'message': "The image could not be resized or is too large."})
def UpdateBanner(self,
image,
include_entities=False,
skip_status=False):
"""Updates the authenticated users profile banner.
Args:
image:
Location of image in file system
include_entities:
If True, each tweet will include a node called "entities."
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and hashtags.
[Optional]
Returns:
A twitter.List instance representing the list subscribed to
"""
url = '%s/account/update_profile_banner.json' % (self.base_url)
with open(image, 'rb') as image_file:
encoded_image = base64.b64encode(image_file.read())
data = {
# When updated for API v1.1 use image, not banner
# https://dev.twitter.com/docs/api/1.1/post/account/update_profile_banner
# 'image': encoded_image
'banner': encoded_image
}
if include_entities:
data['include_entities'] = 1
if skip_status:
data['skip_status'] = 1
resp = self._RequestUrl(url, 'POST', data=data)
if resp.status_code in [200, 201, 202]:
return True
if resp.status_code == 400:
raise TwitterError({'message': "Image data could not be processed"})
if resp.status_code == 422:
raise TwitterError({'message': "The image could not be resized or is too large."})
raise TwitterError({'message': "Unkown banner image upload issue"})
def GetStreamSample(self, delimited=False, stall_warnings=True):
"""Returns a small sample of public statuses.
Args:
delimited:
Specifies a message length. [Optional]
stall_warnings:
Set to True to have Twitter deliver stall warnings. [Optional]
Returns:
A Twitter stream
"""
url = '%s/statuses/sample.json' % self.stream_url
parameters = {
'delimited': bool(delimited),
'stall_warnings': bool(stall_warnings)
}
resp = self._RequestStream(url, 'GET', data=parameters)
for line in resp.iter_lines():
if line:
data = self._ParseAndCheckTwitter(line.decode('utf-8'))
yield data
def GetStreamFilter(self,
follow=None,
track=None,
locations=None,
languages=None,
delimited=None,
stall_warnings=None,
filter_level=None):
"""Returns a filtered view of public statuses.
Args:
follow:
A list of user IDs to track. [Optional]
track:
A list of expressions to track. [Optional]
locations:
A list of Longitude,Latitude pairs (as strings) specifying
bounding boxes for the tweets' origin. [Optional]
delimited:
Specifies a message length. [Optional]
stall_warnings:
Set to True to have Twitter deliver stall warnings. [Optional]
languages:
A list of Languages.
Will only return Tweets that have been detected as being
written in the specified languages. [Optional]
filter_level:
Specifies level of filtering applied to stream.
Set to None, 'low' or 'medium'. [Optional]
Returns:
A twitter stream
"""
if all((follow is None, track is None, locations is None)):
raise ValueError({'message': "No filter parameters specified."})
url = '%s/statuses/filter.json' % self.stream_url
data = {}
if follow is not None:
data['follow'] = ','.join(follow)
if track is not None:
data['track'] = ','.join(track)
if locations is not None:
data['locations'] = ','.join(locations)
if delimited is not None:
data['delimited'] = str(delimited)
if stall_warnings is not None:
data['stall_warnings'] = str(stall_warnings)
if languages is not None:
data['language'] = ','.join(languages)
if filter_level is not None:
data['filter_level'] = filter_level
resp = self._RequestStream(url, 'POST', data=data)
for line in resp.iter_lines():
if line:
data = self._ParseAndCheckTwitter(line.decode('utf-8'))
yield data
def GetUserStream(self,
replies='all',
withuser='user',
track=None,
locations=None,
delimited=None,
stall_warnings=None,
stringify_friend_ids=False,
filter_level=None,
session=None,
include_keepalive=False):
"""Returns the data from the user stream.
Args:
replies:
Specifies whether to return additional @replies in the stream.
Defaults to 'all'.
withuser:
Specifies whether to return information for just the authenticating
user, or include messages from accounts the user follows. [Optional]
track:
A list of expressions to track. [Optional]
locations:
A list of Latitude,Longitude pairs (as strings) specifying
bounding boxes for the tweets' origin. [Optional]
delimited:
Specifies a message length. [Optional]
stall_warnings:
Set to True to have Twitter deliver stall warnings. [Optional]
stringify_friend_ids:
Specifies whether to send the friends list preamble as an array of
integers or an array of strings. [Optional]
filter_level:
Specifies level of filtering applied to stream.
Set to None, low or medium. [Optional]
Returns:
A twitter stream
"""
url = 'https://userstream.twitter.com/1.1/user.json'
data = {}
if stringify_friend_ids:
data['stringify_friend_ids'] = 'true'
if replies is not None:
data['replies'] = replies
if withuser is not None:
data['with'] = withuser
if track is not None:
data['track'] = ','.join(track)
if locations is not None:
data['locations'] = ','.join(locations)
if delimited is not None:
data['delimited'] = str(delimited)
if stall_warnings is not None:
data['stall_warnings'] = str(stall_warnings)
if filter_level is not None:
data['filter_level'] = filter_level
resp = self._RequestStream(url, 'POST', data=data, session=session)
# The Twitter streaming API sends keep-alive newlines every 30s if there has not been other
# traffic, and specifies that streams should only be reset after three keep-alive ticks.
#
# The original implementation of this API didn't expose keep-alive signals to the user,
# making it difficult to determine whether the connection should be hung up or not.
#
# https://dev.twitter.com/streaming/overview/connecting
for line in resp.iter_lines():
if line:
data = self._ParseAndCheckTwitter(line.decode('utf-8'))
yield data
elif include_keepalive:
yield None
def VerifyCredentials(self, include_entities=None, skip_status=None, include_email=None):
"""Returns a twitter.User instance if the authenticating user is valid.
Args:
include_entities:
Specifies whether to return additional @replies in the stream.
skip_status:
When set to either true, t or 1 statuses will not be included in the
returned user object.
include_email:
Use of this parameter requires whitelisting.
When set to true email will be returned in the user objects as a string.
If the user does not have an email address on their account, or if the
email address is un-verified, null will be returned. If your app is
not whitelisted, then the 'email' key will not be present in the json
response.
Returns:
A twitter.User instance representing that user if the
credentials are valid, None otherwise.
"""
url = '%s/account/verify_credentials.json' % self.base_url
data = {
'include_entities': enf_type('include_entities', bool, include_entities),
'skip_status': enf_type('skip_status', bool, skip_status),
'include_email': 'true' if enf_type('include_email', bool, include_email) else 'false',
}
resp = self._RequestUrl(url, 'GET', data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def SetCache(self, cache):
"""Override the default cache. Set to None to prevent caching.
Args:
cache:
An instance that supports the same API as the twitter._FileCache
"""
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def SetUrllib(self, urllib):
"""Override the default urllib implementation.
Args:
urllib:
An instance that supports the same API as the urllib2 module
"""
self._urllib = urllib
def SetCacheTimeout(self, cache_timeout):
"""Override the default cache timeout.
Args:
cache_timeout:
Time, in seconds, that responses should be reused.
"""
self._cache_timeout = cache_timeout
def SetUserAgent(self, user_agent):
"""Override the default user agent.
Args:
user_agent:
A string that should be send to the server as the user-agent.
"""
self._request_headers['User-Agent'] = user_agent
def SetXTwitterHeaders(self, client, url, version):
"""Set the X-Twitter HTTP headers that will be sent to the server.
Args:
client:
The client name as a string. Will be sent to the server as
the 'X-Twitter-Client' header.
url:
The URL of the meta.xml as a string. Will be sent to the server
as the 'X-Twitter-Client-URL' header.
version:
The client version as a string. Will be sent to the server
as the 'X-Twitter-Client-Version' header.
"""
self._request_headers['X-Twitter-Client'] = client
self._request_headers['X-Twitter-Client-URL'] = url
self._request_headers['X-Twitter-Client-Version'] = version
def SetSource(self, source):
"""Suggest the "from source" value to be displayed on the Twitter web site.
The value of the 'source' parameter must be first recognized by
the Twitter server.
New source values are authorized on a case by case basis by the
Twitter development team.
Args:
source:
The source name as a string. Will be sent to the server as
the 'source' parameter.
"""
self._default_params['source'] = source
def InitializeRateLimit(self):
""" Make a call to the Twitter API to get the rate limit
status for the currently authenticated user or application.
Returns:
None.
"""
_sleep = self.sleep_on_rate_limit
if self.sleep_on_rate_limit:
self.sleep_on_rate_limit = False
url = '%s/application/rate_limit_status.json' % self.base_url
resp = self._RequestUrl(url, 'GET') # No-Cache
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
self.sleep_on_rate_limit = _sleep
self.rate_limit = RateLimit(**data)
def CheckRateLimit(self, url):
""" Checks a URL to see the rate limit status for that endpoint.
Args:
url (str):
URL to check against the current rate limits.
Returns:
namedtuple: EndpointRateLimit namedtuple.
"""
if not self.rate_limit.__dict__.get('resources', None):
self.InitializeRateLimit()
if url:
limit = self.rate_limit.get_limit(url)
return limit
def _BuildUrl(self, url, path_elements=None, extra_params=None):
# Break url into constituent parts
(scheme, netloc, path, params, query, fragment) = urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlunparse((scheme, netloc, path, params, query, fragment))
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-twitter/%s)' % \
(urllib_version, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
@staticmethod
def _DecompressGzippedResponse(response):
raw_data = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
url_data = gzip.GzipFile(fileobj=io.StringIO(raw_data)).read()
else:
url_data = raw_data
return url_data
@staticmethod
def _EncodeParameters(parameters):
"""Return a string in key=value&key=value form.
Values of None are not included in the output string.
Args:
parameters (dict): dictionary of query parameters to be converted into a
string for encoding and sending to Twitter.
Returns:
A URL-encoded string in "key=value&key=value" form
"""
if parameters is None:
return None
if not isinstance(parameters, dict):
raise TwitterError("`parameters` must be a dict.")
else:
return urlencode(dict((k, v) for k, v in parameters.items() if v is not None))
def _ParseAndCheckTwitter(self, json_data):
"""Try and parse the JSON returned from Twitter and return
an empty dictionary if there is any error.
This is a purely defensive check because during some Twitter
network outages it will return an HTML failwhale page.
"""
try:
data = json.loads(json_data)
except ValueError:
if "<title>Twitter / Over capacity</title>" in json_data:
raise TwitterError({'message': "Capacity Error"})
if "<title>Twitter / Error</title>" in json_data:
raise TwitterError({'message': "Technical Error"})
if "Exceeded connection limit for user" in json_data:
raise TwitterError({'message': "Exceeded connection limit for user"})
if "Error 401 Unauthorized" in json_data:
raise TwitterError({'message': "Unauthorized"})
raise TwitterError({'Unknown error: {0}'.format(json_data)})
self._CheckForTwitterError(data)
return data
@staticmethod
def _CheckForTwitterError(data):
"""Raises a TwitterError if twitter returns an error message.
Args:
data (dict):
A python dict created from the Twitter json response
Raises:
(twitter.TwitterError): TwitterError wrapping the twitter error
message if one exists.
"""
# Twitter errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'error' in data:
raise TwitterError(data['error'])
if 'errors' in data:
raise TwitterError(data['errors'])
def _RequestChunkedUpload(self, url, headers, data):
try:
return requests.post(
url,
headers=headers,
data=data,
auth=self.__auth,
timeout=self._timeout,
proxies=self.proxies
)
except requests.RequestException as e:
raise TwitterError(str(e))
def _RequestUrl(self, url, verb, data=None, json=None, enforce_auth=True):
"""Request a url.
Args:
url:
The web location we want to retrieve.
verb:
Either POST or GET.
data:
A dict of (str, unicode) key/value pairs.
Returns:
A JSON object.
"""
if enforce_auth:
if not self.__auth:
raise TwitterError("The twitter.Api instance must be authenticated.")
if url and self.sleep_on_rate_limit:
limit = self.CheckRateLimit(url)
if limit.remaining == 0:
try:
time.sleep(max(int(limit.reset - time.time()) + 2, 0))
except ValueError:
pass
if not data:
data = {}
if verb == 'POST':
if data:
if 'media_ids' in data:
url = self._BuildUrl(url, extra_params={'media_ids': data['media_ids']})
resp = requests.post(url, data=data, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
elif 'media' in data:
resp = requests.post(url, files=data, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
else:
resp = requests.post(url, data=data, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
elif json:
resp = requests.post(url, json=json, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
else:
resp = 0 # POST request, but without data or json
elif verb == 'GET':
data['tweet_mode'] = self.tweet_mode
url = self._BuildUrl(url, extra_params=data)
resp = requests.get(url, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
else:
resp = 0 # if not a POST or GET request
if url and self.rate_limit:
limit = resp.headers.get('x-rate-limit-limit', 0)
remaining = resp.headers.get('x-rate-limit-remaining', 0)
reset = resp.headers.get('x-rate-limit-reset', 0)
self.rate_limit.set_limit(url, limit, remaining, reset)
return resp
def _RequestStream(self, url, verb, data=None, session=None):
"""Request a stream of data.
Args:
url:
The web location we want to retrieve.
verb:
Either POST or GET.
data:
A dict of (str, unicode) key/value pairs.
Returns:
A twitter stream.
"""
session = session or requests.Session()
if verb == 'POST':
try:
return session.post(url, data=data, stream=True,
auth=self.__auth,
timeout=self._timeout,
proxies=self.proxies)
except requests.RequestException as e:
raise TwitterError(str(e))
if verb == 'GET':
url = self._BuildUrl(url, extra_params=data)
try:
return session.get(url, stream=True, auth=self.__auth,
timeout=self._timeout, proxies=self.proxies)
except requests.RequestException as e:
raise TwitterError(str(e))
return 0 # if not a POST or GET request
| apache-2.0 |
zcbenz/cefode-chromium | chrome/common/extensions/docs/server2/appengine_url_fetcher.py | 5 | 1715 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
from appengine_wrappers import urlfetch
from future import Future
class _AsyncFetchDelegate(object):
def __init__(self, rpc):
self._rpc = rpc
def Get(self):
return self._rpc.get_result()
def _MakeHeaders(username, password):
headers = { 'Cache-Control': 'max-age=0' }
if username is not None and password is not None:
headers['Authorization'] = 'Basic %s' % base64.encodestring(
'%s:%s' % (username, password))
return headers
class AppEngineUrlFetcher(object):
"""A wrapper around the App Engine urlfetch module that allows for easy
async fetches.
"""
def __init__(self, base_path):
self._base_path = base_path
def Fetch(self, url, username=None, password=None):
"""Fetches a file synchronously.
"""
headers = _MakeHeaders(username, password)
if self._base_path is not None:
return urlfetch.fetch('%s/%s' % (self._base_path, url), headers=headers)
else:
return urlfetch.fetch(url, headers={ 'Cache-Control': 'max-age=0' })
def FetchAsync(self, url, username=None, password=None):
"""Fetches a file asynchronously, and returns a Future with the result.
"""
rpc = urlfetch.create_rpc()
headers = _MakeHeaders(username, password)
if self._base_path is not None:
urlfetch.make_fetch_call(rpc,
'%s/%s' % (self._base_path, url),
headers=headers)
else:
urlfetch.make_fetch_call(rpc, url, headers=headers)
return Future(delegate=_AsyncFetchDelegate(rpc))
| bsd-3-clause |
aaltinisik/OCBAltinkaya | addons/account_followup/report/account_followup_report.py | 382 | 4561 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class account_followup_stat(osv.osv):
_name = "account_followup.stat"
_description = "Follow-up Statistics"
_rec_name = 'partner_id'
_auto = False
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'date_move':fields.date('First move', readonly=True),
'date_move_last':fields.date('Last move', readonly=True),
'date_followup':fields.date('Latest followup', readonly=True),
'followup_id': fields.many2one('account_followup.followup.line',
'Follow Ups', readonly=True, ondelete="cascade"),
'balance':fields.float('Balance', readonly=True),
'debit':fields.float('Debit', readonly=True),
'credit':fields.float('Credit', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'blocked': fields.boolean('Blocked', readonly=True),
'period_id': fields.many2one('account.period', 'Period', readonly=True),
}
_order = 'date_move'
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
for arg in args:
if arg[0] == 'period_id' and arg[2] == 'current_year':
current_year = self.pool.get('account.fiscalyear').find(cr, uid)
ids = self.pool.get('account.fiscalyear').read(cr, uid, [current_year], ['period_ids'])[0]['period_ids']
args.append(['period_id','in',ids])
args.remove(arg)
return super(account_followup_stat, self).search(cr, uid, args=args, offset=offset, limit=limit, order=order,
context=context, count=count)
def read_group(self, cr, uid, domain, *args, **kwargs):
for arg in domain:
if arg[0] == 'period_id' and arg[2] == 'current_year':
current_year = self.pool.get('account.fiscalyear').find(cr, uid)
ids = self.pool.get('account.fiscalyear').read(cr, uid, [current_year], ['period_ids'])[0]['period_ids']
domain.append(['period_id','in',ids])
domain.remove(arg)
return super(account_followup_stat, self).read_group(cr, uid, domain, *args, **kwargs)
def init(self, cr):
tools.drop_view_if_exists(cr, 'account_followup_stat')
cr.execute("""
create or replace view account_followup_stat as (
SELECT
l.id as id,
l.partner_id AS partner_id,
min(l.date) AS date_move,
max(l.date) AS date_move_last,
max(l.followup_date) AS date_followup,
max(l.followup_line_id) AS followup_id,
sum(l.debit) AS debit,
sum(l.credit) AS credit,
sum(l.debit - l.credit) AS balance,
l.company_id AS company_id,
l.blocked as blocked,
l.period_id AS period_id
FROM
account_move_line l
LEFT JOIN account_account a ON (l.account_id = a.id)
WHERE
a.active AND
a.type = 'receivable' AND
l.reconcile_id is NULL AND
l.partner_id IS NOT NULL
GROUP BY
l.id, l.partner_id, l.company_id, l.blocked, l.period_id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
blaquee/volatility | volatility/plugins/linux/dentry_cache.py | 57 | 2513 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.plugins.linux.common as linux_common
from volatility.plugins.linux.slab_info import linux_slabinfo
class linux_dentry_cache(linux_common.AbstractLinuxCommand):
"""Gather files from the dentry cache"""
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('UNALLOCATED', short_option = 'u',
default = False,
help = 'Show unallocated',
action = 'store_true')
def make_body(self, dentry):
"""Create a pipe-delimited bodyfile from a dentry structure.
MD5|name|inode|mode_as_string|UID|GID|size|atime|mtime|ctime|crtime
"""
path = dentry.get_partial_path() or ""
i = dentry.d_inode
if i:
ret = [0, path, i.i_ino, 0, i.i_uid, i.i_gid, i.i_size, i.i_atime, i.i_mtime, 0, i.i_ctime]
else:
ret = [0, path] + [0] * 8
ret = "|".join([str(val) for val in ret])
return ret
def calculate(self):
linux_common.set_plugin_members(self)
cache = linux_slabinfo(self._config).get_kmem_cache("dentry", self._config.UNALLOCATED)
# support for old kernels
if cache == []:
cache = linux_slabinfo(self._config).get_kmem_cache("dentry_cache", self._config.UNALLOCATED, struct_name = "dentry")
for dentry in cache:
yield self.make_body(dentry)
def render_text(self, outfd, data):
for bodyline in data:
outfd.write(bodyline + "\n")
| gpl-2.0 |
jeremiahmarks/sl4a | python/gdata/src/gdata/tlslite/integration/POP3_TLS.py | 271 | 5466 | """TLS Lite + poplib."""
import socket
from poplib import POP3
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
# POP TLS PORT
POP3_TLS_PORT = 995
class POP3_TLS(POP3, ClientHelper):
"""This class extends L{poplib.POP3} with TLS support."""
def __init__(self, host, port = POP3_TLS_PORT,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new POP3_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
### New code below (all else copied from poplib)
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
self.sock = TLSConnection(self.sock)
self.sock.closeSocket = True
ClientHelper._handshake(self, self.sock)
###
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
| apache-2.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/encodings/iso8859_10.py | 272 | 13589 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
'\xad' # 0xAD -> SOFT HYPHEN
'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
'\xb7' # 0xB7 -> MIDDLE DOT
'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
'\u2015' # 0xBD -> HORIZONTAL BAR
'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
Coelhon/MasterRepo.repository | plugin.video.RabbitMovies/resources/lib/sources/disabled/movienight_mv.py | 30 | 3240 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.resolvers import filepup
class source:
def __init__(self):
self.base_link = 'http://movienight.ws'
self.search_link = '/?s=%s'
def get_movie(self, imdb, title, year):
try:
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = client.parseDOM(result, 'div', attrs = {'class': 'home_post_cont.+?'})
title = cleantitle.movie(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'img', ret='title')[0]) for i in result]
result = [(i[0], client.replaceHTMLCodes(i[1])) for i in result]
result = [(i[0], client.parseDOM(i[1], 'a')) for i in result]
result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
result = [i for i in result if title == cleantitle.movie(i[1])]
result = [i[0] for i in result if any(x in i[1] for x in years)][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
quality = re.compile('Quality *: *(.+)').findall(result)
quality = 'SD' if len(quality) == 0 else quality[0]
quality = re.sub('<.+?>', '', quality).strip().upper()
if quality == 'SD': quality = 'SD'
elif quality == 'HD': quality = 'HD'
else: quality = 'CAM'
url = client.parseDOM(result, 'iframe', ret='src')
url = [i for i in url if 'filepup' in i][0]
url = filepup.resolve(url)
if url == None: raise Exception()
sources.append({'source': 'Filepup', 'quality': quality, 'provider': 'Movienight', 'url': url})
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 |
837468220/python-for-android | python3-alpha/python3-src/Lib/copy.py | 51 | 10331 | """Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copyreg import dispatch_table
import builtins
class Error(Exception):
pass
error = Error # backward compatibility
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, float, bool, str, tuple,
frozenset, type, range,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
t = getattr(types, "CodeType", None)
if t is not None:
d[t] = _copy_immutable
for name in ("complex", "unicode"):
t = getattr(builtins, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[bytes] = _deepcopy_atomic
d[str] = _deepcopy_atomic
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[range] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
d = id(x)
try:
return memo[d]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
memo[d] = y
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.items():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.__func__, deepcopy(x.__self__, memo))
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.items():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
def _test():
l = [None, 1, 2, 3.14, 'xyzzy', (1, 2), [3.14, 'abc'],
{'abc': 'ABC'}, (), [], {}]
l1 = copy(l)
print(l1==l)
l1 = map(copy, l)
print(l1==l)
l1 = deepcopy(l)
print(l1==l)
class C:
def __init__(self, arg=None):
self.a = 1
self.arg = arg
if __name__ == '__main__':
import sys
file = sys.argv[0]
else:
file = __file__
self.fp = open(file)
self.fp.close()
def __getstate__(self):
return {'a': self.a, 'arg': self.arg}
def __setstate__(self, state):
for key, value in state.items():
setattr(self, key, value)
def __deepcopy__(self, memo=None):
new = self.__class__(deepcopy(self.arg, memo))
new.a = self.a
return new
c = C('argument sketch')
l.append(c)
l2 = copy(l)
print(l == l2)
print(l)
print(l2)
l2 = deepcopy(l)
print(l == l2)
print(l)
print(l2)
l.append({l[1]: l, 'xyz': l[2]})
l3 = copy(l)
import reprlib
print(map(reprlib.repr, l))
print(map(reprlib.repr, l1))
print(map(reprlib.repr, l2))
print(map(reprlib.repr, l3))
l3 = deepcopy(l)
print(map(reprlib.repr, l))
print(map(reprlib.repr, l1))
print(map(reprlib.repr, l2))
print(map(reprlib.repr, l3))
class odict(dict):
def __init__(self, d = {}):
self.a = 99
dict.__init__(self, d)
def __setitem__(self, k, i):
dict.__setitem__(self, k, i)
self.a
o = odict({"A" : "B"})
x = deepcopy(o)
print(o, x)
if __name__ == '__main__':
_test()
| apache-2.0 |
pplatek/odoo | addons/portal_project/tests/__init__.py | 260 | 1086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NewpTone/stacklab-nova | debian/tmp/usr/lib/python2.7/dist-packages/nova/virt/xenapi/vmops.py | 4 | 68206 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import functools
import itertools
import time
from eventlet import greenthread
import netaddr
from nova.compute import api as compute
from nova.compute import power_state
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import firewall
from nova.virt.xenapi import agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
cfg.IntOpt('xenapi_running_timeout',
default=60,
help='number of seconds to wait for instance '
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
help='The XenAPI VIF driver using XenServer Network APIs.'),
cfg.BoolOpt('xenapi_generate_swap',
default=False,
help='Whether to generate swap '
'(False means fetching it from OVA)'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(xenapi_vmops_opts)
flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.IptablesFirewallDriver.__name__)
RESIZE_TOTAL_STEPS = 5
DEVICE_ROOT = '0'
DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_EPHEMERAL = '3'
DEVICE_CD = '4'
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
a = a.split('.')
b = b.split('.')
# Compare each individual portion of both version strings
for va, vb in zip(a, b):
ret = int(va) - int(vb)
if ret:
return ret
# Fallback to comparing length last
return len(a) - len(b)
def make_step_decorator(context, instance):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
...
@step
def step2():
...
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
the current-step-count by 1, so after::
step1()
the current-step-count would be 1 giving a progress of ``1 / 2 *
100`` or 50%.
"""
step_info = dict(total=0, current=0)
def bump_progress():
step_info['current'] += 1
progress = round(float(step_info['current']) /
step_info['total'] * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
db.instance_update(context, instance['uuid'], {'progress': progress})
def step_decorator(f):
step_info['total'] += 1
@functools.wraps(f)
def inner(*args, **kwargs):
rv = f(*args, **kwargs)
bump_progress()
return rv
return inner
return step_decorator
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session):
self.compute_api = compute.API()
self._session = session
self.poll_rescue_last_ran = None
self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER,
xenapi_session=self._session)
vif_impl = importutils.import_class(FLAGS.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
def list_instances(self):
"""List VM instances."""
# TODO(justinsb): Should we just always use the details method?
# Seems to be the same number of API calls..
name_labels = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name_labels.append(vm_rec["name_label"])
return name_labels
def confirm_migration(self, migration, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info)
def finish_revert_migration(self, instance):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
# Remove the '-orig' suffix (which was added in case the resized VM
# ends up on the source host, common during testing)
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
root_vdi = vm_utils.move_disks(self._session, instance, disk_info)
if resize_instance:
self._resize_instance(instance, root_vdi)
vm_ref = self._create_vm(context, instance, instance['name'],
{'root': root_vdi},
network_info, image_meta)
# 5. Start VM
self._start(instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None):
"""Power on a VM instance"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
self._session.call_xenapi('VM.start_on', vm_ref,
self._session.get_xenapi_host(),
False, False)
def _create_disks(self, context, instance, name_label, image_meta,
block_device_info=None):
disk_image_type = vm_utils.determine_disk_image_type(image_meta)
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label,
instance['image_ref'],
disk_image_type,
block_device_info=block_device_info)
# Just get the VDI ref once
for vdi in vdis.itervalues():
vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid',
vdi['uuid'])
root_vdi = vdis.get('root')
if root_vdi:
self._resize_instance(instance, root_vdi)
return vdis
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
name_label=None, rescue=False):
if name_label is None:
name_label = instance['name']
step = make_step_decorator(context, instance)
@step
def vanity_step(undo_mgr):
# NOTE(sirp): _create_disk will potentially take a *very* long
# time to complete since it has to fetch the image over the
# network and images can be several gigs in size. To avoid
# progress remaining at 0% for too long, which will appear to be
# an error, we insert a "vanity" step to bump the progress up one
# notch above 0.
pass
@step
def create_disks_step(undo_mgr):
vdis = self._create_disks(context, instance, name_label,
image_meta, block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
@step
def create_kernel_ramdisk_step(undo_mgr):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
def undo_create_kernel_ramdisk():
if kernel_file or ramdisk_file:
LOG.debug(_("Removing kernel/ramdisk files from dom0"),
instance=instance)
vm_utils.destroy_kernel_ramdisk(
self._session, kernel_file, ramdisk_file)
undo_mgr.undo_with(undo_create_kernel_ramdisk)
return kernel_file, ramdisk_file
@step
def create_vm_step(undo_mgr, vdis, kernel_file, ramdisk_file):
vm_ref = self._create_vm(context, instance, name_label, vdis,
network_info, image_meta,
kernel_file=kernel_file,
ramdisk_file=ramdisk_file,
rescue=rescue)
def undo_create_vm():
self._destroy(instance, vm_ref, network_info)
undo_mgr.undo_with(undo_create_vm)
return vm_ref
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
# booting the VM, since we can't hotplug block devices
# on non-PV guests
@step
def attach_root_disk_step(undo_mgr, vm_ref):
orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
DEVICE_RESCUE, bootable=False)
@step
def prepare_security_group_filters_step(undo_mgr):
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
# NOTE(salvatore-orlando): setup_basic_filtering might be
# empty or not implemented at all, as basic filter could
# be implemented with VIF rules created by xapi plugin
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
@step
def boot_instance_step(undo_mgr, vm_ref):
self._boot_new_instance(instance, vm_ref, injected_files,
admin_password)
@step
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
@step
def bdev_set_default_root(undo_mgr):
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
undo_mgr = utils.UndoManager()
try:
bdev_set_default_root(undo_mgr)
vanity_step(undo_mgr)
vdis = create_disks_step(undo_mgr)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_step(undo_mgr, vdis, kernel_file, ramdisk_file)
prepare_security_group_filters_step(undo_mgr)
if rescue:
attach_root_disk_step(undo_mgr, vm_ref)
boot_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
except Exception:
msg = _("Failed to spawn, rolling back")
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _create_vm(self, context, instance, name_label, vdis, network_info,
image_meta, kernel_file=None, ramdisk_file=None,
rescue=False):
"""Create VM instance."""
vm_ref = vm_utils.lookup(self._session, name_label)
if vm_ref is not None:
raise exception.InstanceExists(name=name_label)
# Ensure enough free memory is available
if not vm_utils.ensure_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance['uuid'])
disk_image_type = vm_utils.determine_disk_image_type(image_meta)
mode = vm_mode.get_from_instance(instance)
if mode == vm_mode.XEN:
use_pv_kernel = True
elif mode == vm_mode.HVM:
use_pv_kernel = False
else:
use_pv_kernel = vm_utils.determine_is_pv(self._session,
vdis['root']['ref'], disk_image_type, instance['os_type'])
mode = use_pv_kernel and vm_mode.XEN or vm_mode.HVM
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
db.instance_update(nova_context.get_admin_context(),
instance['uuid'], {'vm_mode': mode})
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
# Add disks to VM
self._attach_disks(instance, vm_ref, name_label, disk_image_type,
vdis)
# Alter the image before VM start for network injection.
if FLAGS.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
self._create_vifs(vm_ref, instance, network_info)
self.inject_network_info(instance, network_info, vm_ref)
hostname = instance['hostname']
if rescue:
hostname = 'RESCUE-%s' % hostname
self.inject_hostname(instance, vm_ref, hostname)
self.inject_instance_metadata(instance, vm_ref)
return vm_ref
def _attach_disks(self, instance, vm_ref, name_label, disk_image_type,
vdis):
ctx = nova_context.get_admin_context()
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
LOG.debug(_("Detected ISO image type, creating blank VM "
"for install"), instance=instance)
cd_vdi = vdis.pop('root')
root_vdi = vm_utils.fetch_blank_disk(self._session,
instance['instance_type_id'])
vdis['root'] = root_vdi
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=False)
vm_utils.create_vbd(self._session, vm_ref, cd_vdi['ref'],
DEVICE_CD, vbd_type='CD', bootable=True)
else:
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
instance_type = db.instance_type_get(ctx,
instance['instance_type_id'])
vm_utils.auto_configure_disk(self._session,
root_vdi['ref'],
instance_type['root_gb'])
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=True)
# Attach (optional) swap disk
swap_vdi = vdis.get('swap')
instance_type = db.instance_type_get(ctx, instance['instance_type_id'])
swap_mb = instance_type['swap']
generate_swap = swap_mb and FLAGS.xenapi_generate_swap
if generate_swap:
vm_utils.generate_swap(self._session, instance, vm_ref,
DEVICE_SWAP, name_label, swap_mb)
if swap_vdi:
# We won't be using packaged swap VDI, so destroy it
vm_utils.destroy_vdi(self._session, swap_vdi['ref'])
elif swap_vdi:
# Attach packaged swap VDI to VM
vm_utils.create_vbd(self._session, vm_ref, swap_vdi['ref'],
DEVICE_SWAP, bootable=False)
# Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
vm_utils.generate_ephemeral(self._session, instance, vm_ref,
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password):
"""Boot a new instance and configure it."""
LOG.debug(_('Starting VM'), instance=instance)
self._start(instance, vm_ref)
ctx = nova_context.get_admin_context()
agent_build = db.agent_build_get_by_triple(ctx, 'xen',
instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
else:
LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
'hypervisor': 'xen',
'os': instance['os_type'],
'architecture': instance['architecture']})
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + FLAGS.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
# Update agent, if necessary
# This also waits until the agent starts
version = agent.get_agent_version(self._session, instance, vm_ref)
if version:
LOG.info(_('Instance agent version: %s'), version,
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
agent.agent_update(self._session, instance, vm_ref, agent_build)
# if the guest agent is not available, configure the
# instance, but skip the admin password configuration
no_agent = version is None
# Inject files, if necessary
if injected_files:
# Inject any files, if specified
for path, contents in injected_files:
agent.inject_file(self._session, instance, vm_ref,
path, contents)
# Set admin password, if necessary
if admin_password and not no_agent:
agent.set_admin_password(self._session, instance, vm_ref,
admin_password)
# Reset network config
agent.resetnetwork(self._session, instance, vm_ref)
# Set VCPU weight
inst_type = db.instance_type_get(ctx, instance['instance_type_id'])
vcpu_weight = inst_type['vcpu_weight']
if vcpu_weight is not None:
LOG.debug(_("Setting VCPU weight"), instance=instance)
self._session.call_xenapi('VM.add_to_VCPUs_params', vm_ref,
'weight', str(vcpu_weight))
def _get_vm_opaque_ref(self, instance):
"""Get xapi OpaqueRef from a db record."""
vm_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is None:
raise exception.NotFound(_('Could not find VM with name %s') %
instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
that will bundle the VHDs together and then push the bundle into
Glance.
"""
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label) as vdi_uuids:
vm_utils.upload_image(
context, self._session, instance, vdi_uuids, image_id)
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path, seq_num):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
locals(), instance=instance)
instance_uuid = instance['uuid']
try:
self._session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=instance_uuid, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except self._session.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the _create_disks step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
db.instance_update(context, instance['uuid'], {'progress': progress})
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
# 1. NOOP since we're not transmitting the base-copy separately
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
LOG.debug(_("Resizing down VDI %(cow_uuid)s from "
"%(old_gb)dGB to %(new_gb)dGB"), locals(),
instance=instance)
# 2. Power down the instance before resizing
vm_utils.shutdown_vm(
self._session, instance, vm_ref, hard=False)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Copy VDI, resize partition and filesystem, forget VDI,
# truncate VHD
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
new_ref, new_uuid = vm_utils.resize_disk(self._session,
instance,
vdi_ref,
instance_type)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the new VHD
self._migrate_vhd(instance, new_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_ref)
def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
sr_path):
# 1. Create Snapshot
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label) as vdi_uuids:
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Transfer the immutable VHDs (base-copies)
#
# The first VHD will be the leaf (aka COW) that is being used by
# the VM. For this step, we're only interested in the immutable
# VHDs which are all of the parents of the leaf VHD.
for seq_num, vdi_uuid in itertools.islice(
enumerate(vdi_uuids), 1, None):
self._migrate_vhd(instance, vdi_uuid, dest, sr_path, seq_num)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
vm_utils.shutdown_vm(
self._session, instance, vm_ref, hard=False)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the COW VHD
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid']
self._migrate_vhd(instance, cow_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
:param instance: the instance that owns the VHD in question.
:param dest: the destination host machine.
:param instance_type: instance_type to resize to
"""
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
resize_down = (instance['auto_disk_config'] and
instance['root_gb'] > instance_type['root_gb'])
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
if resize_down:
self._migrate_disk_resizing_down(
context, instance, dest, instance_type, vm_ref, sr_path)
else:
self._migrate_disk_resizing_up(
context, instance, dest, vm_ref, sr_path)
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
# NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
# uses a staging-area (/images/instance<uuid>) and sequence-numbered
# VHDs to figure out how to reconstruct the VDI chain after syncing
disk_info = {}
return disk_info
def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance['root_gb'] * 1024 * 1024 * 1024
if not new_disk_size:
return
# Get current size of VDI
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
new_gb = instance['root_gb']
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"), locals(), instance=instance)
resize_func_name = self.check_resize_func_name()
self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
def check_resize_func_name(self):
"""Check the function name used to resize an instance based
on product_brand and product_version."""
brand = self._session.product_brand
version = self._session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if bool(version) and bool(brand):
xcp = brand == 'XCP'
r1_2_or_above = (
(
version[0] == 1
and version[1] > 1
)
or version[0] > 1)
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def reboot(self, instance, reboot_type):
"""Reboot VM instance."""
# Note (salvatore-orlando): security group rules are not re-enforced
# upon reboot, since this action on the XenAPI drivers does not
# remove existing filters
vm_ref = self._get_vm_opaque_ref(instance)
try:
if reboot_type == "HARD":
self._session.call_xenapi('VM.hard_reboot', vm_ref)
else:
self._session.call_xenapi('VM.clean_reboot', vm_ref)
except self._session.XenAPI.Failure, exc:
details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"),
instance=instance)
self._session.call_xenapi('VM.start', vm_ref, False, False)
return
raise
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
agent.set_admin_password(self._session, instance, vm_ref, new_pass)
def inject_file(self, instance, path, contents):
"""Write a file to the VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
agent.inject_file(self._session, instance, vm_ref, path, contents)
@staticmethod
def _sanitize_xenstore_key(key):
"""
Xenstore only allows the following characters as keys:
ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz
0123456789-/_@
So convert the others to _
Also convert / to _, because that is somewhat like a path
separator.
"""
allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789-_@")
return ''.join([x in allowed_chars and x or '_' for x in key])
def inject_instance_metadata(self, instance, vm_ref):
"""Inject instance metadata into xenstore."""
def store_meta(topdir, data_list):
for item in data_list:
key = self._sanitize_xenstore_key(item['key'])
value = item['value'] or ''
self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key),
jsonutils.dumps(value))
# Store user metadata
store_meta('vm-data/user-metadata', instance['metadata'])
def change_instance_metadata(self, instance, diff):
"""Apply changes to instance metadata to xenstore."""
vm_ref = self._get_vm_opaque_ref(instance)
for key, change in diff.items():
key = self._sanitize_xenstore_key(key)
location = 'vm-data/user-metadata/%s' % key
if change[0] == '-':
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
elif change[0] == '+':
self._add_to_param_xenstore(vm_ref, location,
jsonutils.dumps(change[1]))
try:
self._write_to_xenstore(instance, location, change[1],
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _find_root_vdi_ref(self, vm_ref):
"""Find and return the root vdi ref for a VM."""
if not vm_ref:
return None
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
def _detach_vm_vols(self, instance, vm_ref, block_device_info=None):
"""Detach any external nova/cinder volumes and purge the SRs.
This differs from a normal detach in that the VM has been
shutdown, so there is no need for unplugging VBDs. They do
need to be destroyed, so that the SR can be forgotten.
"""
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_ref in vbd_refs:
other_config = self._session.call_xenapi("VBD.get_other_config",
vbd_ref)
if other_config.get('osvol'):
# this is a nova/cinder volume
try:
sr_ref = volume_utils.find_sr_from_vbd(self._session,
vbd_ref)
vm_utils.destroy_vbd(self._session, vbd_ref)
# Forget SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
except Exception as exc:
LOG.exception(exc)
raise
def _destroy_vdis(self, instance, vm_ref, block_device_info=None):
"""Destroys all VDIs associated with a VM."""
instance_uuid = instance['uuid']
LOG.debug(_("Destroying VDIs for Instance %(instance_uuid)s")
% locals())
vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
if not vdi_refs:
return
for vdi_ref in vdi_refs:
try:
vm_utils.destroy_vdi(self._session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
1. We have neither a ramdisk nor a kernel, in which case we are a
RAW image and can omit this step
2. We have one or the other, in which case, we should flag as an
error
3. We have both, in which case we safely remove both the kernel
and the ramdisk.
"""
instance_uuid = instance['uuid']
if not instance['kernel_id'] and not instance['ramdisk_id']:
# 1. No kernel or ramdisk
LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk "
"deletion"), instance=instance)
return
if not (instance['kernel_id'] and instance['ramdisk_id']):
# 2. We only have kernel xor ramdisk
raise exception.InstanceUnacceptable(instance_id=instance_uuid,
reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session,
vm_ref)
vm_utils.destroy_kernel_ramdisk(self._session, kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"), instance=instance)
def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref):
"""Destroy a rescue instance."""
# Shutdown Rescue VM
vm_rec = self._session.call_xenapi("VM.get_record", rescue_vm_ref)
state = vm_utils.compile_info(vm_rec)['state']
if state != power_state.SHUTDOWN:
self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref)
# Destroy Rescue VDIs
vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref)
root_vdi_ref = self._find_root_vdi_ref(original_vm_ref)
vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
LOG.info(_("Destroying VM"), instance=instance)
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
# vm_ref is checked correctly where necessary.
vm_ref = vm_utils.lookup(self._session, instance['name'])
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
return self._destroy(instance, vm_ref, network_info,
block_device_info=block_device_info)
def _destroy(self, instance, vm_ref, network_info=None,
block_device_info=None):
"""Destroys VM instance by performing:
1. A shutdown
2. Destroying associated VDIs.
3. Destroying kernel and ramdisk files (if necessary).
4. Destroying that actual VM record.
"""
if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."),
instance=instance)
return
vm_utils.shutdown_vm(self._session, instance, vm_ref)
# Destroy VDIs
self._detach_vm_vols(instance, vm_ref, block_device_info)
self._destroy_vdis(instance, vm_ref, block_device_info)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(
instance, network_info=network_info)
def pause(self, instance):
"""Pause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.pause', vm_ref)
def unpause(self, instance):
"""Unpause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.unpause', vm_ref)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._acquire_bootlock(vm_ref)
self._session.call_xenapi('VM.suspend', vm_ref)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._session.call_xenapi('VM.resume', vm_ref, False, True)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
- shutdown the instance VM.
- set 'bootlock' to prevent the instance from starting in rescue.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
rescue_name_label = '%s-rescue' % instance['name']
rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
def unrescue(self, instance):
"""Unrescue the specified instance.
- unplug the instance VM's disk from the rescue VM.
- teardown the rescue VM.
- release the bootlock to allow the instance VM to start.
"""
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(
instance_id=instance['uuid'])
original_vm_ref = self._get_vm_opaque_ref(instance)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
def power_off(self, instance):
"""Power off the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
except exception.NotFound:
LOG.warning(_("VM is not present, skipping power off..."),
instance=instance)
def power_on(self, instance):
"""Power on the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._start(instance, vm_ref)
def _cancel_stale_tasks(self, timeout, task):
"""Cancel the given tasks that are older than the given timeout."""
task_refs = self._session.call_xenapi("task.get_by_name_label", task)
for task_ref in task_refs:
task_rec = self._session.call_xenapi("task.get_record", task_ref)
task_created = timeutils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
def poll_rebooting_instances(self, timeout):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
reboot state for >= the given timeout
"""
# NOTE(jk0): All existing clean_reboot tasks must be cancelled before
# we can kick off the hard_reboot tasks.
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
instances = db.instance_get_all_hung_in_rebooting(ctxt, timeout)
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def poll_rescued_instances(self, timeout):
"""Look for expirable rescued instances.
- forcibly exit rescue mode for any instances that have been
in rescue mode for >= the provided timeout
"""
last_ran = self.poll_rescue_last_ran
if not last_ran:
# We need a base time to start tracking.
self.poll_rescue_last_ran = timeutils.utcnow()
return
if not timeutils.is_older_than(last_ran, timeout):
# Do not run. Let's bail.
return
# Update the time tracker and proceed.
self.poll_rescue_last_ran = timeutils.utcnow()
rescue_vms = []
for instance in self.list_instances():
if instance.endswith("-rescue"):
rescue_vms.append(dict(name=instance,
vm_ref=vm_utils.lookup(self._session,
instance)))
for vm in rescue_vms:
rescue_vm_ref = vm["vm_ref"]
original_name = vm["name"].split("-rescue", 1)[0]
original_vm_ref = vm_utils.lookup(self._session, original_name)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
self._release_bootlock(original_vm_ref)
self._session.call_xenapi("VM.start", original_vm_ref, False,
False)
def get_info(self, instance, vm_ref=None):
"""Return data about VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_diagnostics(vm_rec)
def get_all_bw_usage(self, start_time, stop_time=None):
"""Return bandwidth usage info for each interface on each
running VM"""
try:
metrics = vm_utils.compile_metrics(start_time, stop_time)
except exception.CouldNotFetchMetrics:
LOG.exception(_("Could not get bandwidth info."))
return {}
bw = {}
for uuid, data in metrics.iteritems():
vm_ref = self._session.call_xenapi("VM.get_by_uuid", uuid)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
vif_map = {}
for vif in [self._session.call_xenapi("VIF.get_record", vrec)
for vrec in vm_rec['VIFs']]:
vif_map[vif['device']] = vif['MAC']
name = vm_rec['name_label']
if 'nova_uuid' not in vm_rec['other_config']:
continue
vifs_bw = bw.setdefault(name, {})
for key, val in data.iteritems():
if key.startswith('vif_'):
vname = key.split('_')[1]
vif_bw = vifs_bw.setdefault(vif_map[vname], {})
if key.endswith('tx'):
vif_bw['bw_out'] = int(val)
if key.endswith('rx'):
vif_bw['bw_in'] = int(val)
return bw
def get_console_output(self, instance):
"""Return snapshot of console."""
# TODO(armando-migliaccio): implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
# NOTE(johannes): This can fail if the VM object hasn't been created
# yet on the dom0. Since that step happens fairly late in the build
# process, there's a potential for a race condition here. Until the
# VM object is created, return back a 409 error instead of a 404
# error.
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
if instance['vm_state'] != vm_states.BUILDING:
raise
LOG.info(_('Fetching VM ref while BUILDING failed'),
instance=instance)
raise exception.InstanceNotReady(instance_id=instance['uuid'])
session_id = self._session.get_session_id()
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
return {'host': FLAGS.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
"""convert a network info vif to injectable instance data"""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def convert_route(route):
return {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
network = vif['network']
v4_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 4]
v6_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 6]
# NOTE(tr3buchet): routes and DNS come from all subnets
routes = [convert_route(route) for subnet in network['subnets']
for route in subnet['routes']]
dns = [get_ip(ip) for subnet in network['subnets']
for ip in subnet['dns']]
info_dict = {'label': network['label'],
'mac': vif['address']}
if v4_subnets:
# NOTE(tr3buchet): gateway and broadcast from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway'] = get_ip(v4_subnets[0]['gateway'])
info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast)
info_dict['ips'] = [fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']]
if v6_subnets:
# NOTE(tr3buchet): gateway from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway'])
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet)
for subnet in v6_subnets
for ip in subnet['ips']]
if routes:
info_dict['routes'] = routes
if dns:
info_dict['dns'] = list(set(dns))
return info_dict
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what vm_utils.lookup(session, instance['name']) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
for vif in network_info:
xs_data = self._vif_xenstore_data(vif)
location = ('vm-data/networking/%s' %
vif['address'].replace(':', ''))
self._add_to_param_xenstore(vm_ref,
location,
jsonutils.dumps(xs_data))
try:
self._write_to_xenstore(instance, location, xs_data,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _create_vifs(self, vm_ref, instance, network_info):
"""Creates vifs for an instance."""
LOG.debug(_("Creating vifs"), instance=instance)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.call_xenapi("VM.get_record", vm_ref)
for device, vif in enumerate(network_info):
vif_rec = self.vif_driver.plug(instance, vif,
vm_ref=vm_ref, device=device)
network_ref = vif_rec['network']
LOG.debug(_('Creating VIF for network %(network_ref)s'),
locals(), instance=instance)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'),
locals(), instance=instance)
def plug_vifs(self, instance, network_info):
"""Set up VIF networking on the host."""
for device, vif in enumerate(network_info):
self.vif_driver.plug(instance, vif, device=device)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def reset_network(self, instance):
"""Calls resetnetwork method in agent."""
vm_ref = self._get_vm_opaque_ref(instance)
agent.resetnetwork(self._session, instance, vm_ref)
def inject_hostname(self, instance, vm_ref, hostname):
"""Inject the hostname of the instance into the xenstore."""
if instance['os_type'] == "windows":
# NOTE(jk0): Windows hostnames can only be <= 15 chars.
hostname = hostname[:15]
LOG.debug(_("Injecting hostname to xenstore"), instance=instance)
self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname)
def _write_to_xenstore(self, instance, path, value, vm_ref=None):
"""
Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
return self._make_plugin_call('xenstore.py', 'write_record', instance,
vm_ref=vm_ref, path=path,
value=jsonutils.dumps(value))
def _delete_from_xenstore(self, instance, path, vm_ref=None):
"""
Deletes the value from the xenstore record for the given VM at
the specified location. A XenAPIPlugin.PluginError will be
raised if any error is encountered in the delete process.
"""
return self._make_plugin_call('xenstore.py', 'delete_record', instance,
vm_ref=vm_ref, path=path)
def _make_plugin_call(self, plugin, method, instance, vm_ref=None,
**addl_args):
"""
Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
args = {'dom_id': vm_rec['domid']}
args.update(addl_args)
try:
return self._session.call_plugin(plugin, method, args)
except self._session.XenAPI.Failure, e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. args=%(args)r'),
locals(), instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'error', 'message': err_msg}
return None
def _add_to_param_xenstore(self, vm_ref, key, val):
"""
Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
it is overwritten
"""
self._remove_from_param_xenstore(vm_ref, key)
self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val)
def _remove_from_param_xenstore(self, vm_ref, key):
"""
Takes a single key and removes it from the xenstore parameter
record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
""" recreates security group rules for every instance """
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
""" recreates security group rules for every instance """
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
""" recreates security group rules for specified instance """
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def unfilter_instance(self, instance_ref, network_info):
"""Removes filters for each VIF of the specified instance."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
current_aggregate = db.aggregate_get_by_host(context, FLAGS.host,
key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=FLAGS.host)
try:
return current_aggregate.metadetails[hostname]
except KeyError:
reason = _('Destination host:%(hostname)s must be in the same '
'aggregate as the source server')
raise exception.MigrationError(reason=reason % locals())
def _ensure_host_in_aggregate(self, context, hostname):
self._get_host_uuid_from_aggregate(context, hostname)
def _get_host_opaque_ref(self, context, hostname):
host_uuid = self._get_host_uuid_from_aggregate(context, hostname)
return self._session.call_xenapi("host.get_by_uuid", host_uuid)
def _migrate_receive(self, ctxt):
destref = self._session.get_xenapi_host()
# Get the network to for migrate.
# This is the one associated with the pif marked management. From cli:
# uuid=`xe pif-list --minimal management=true`
# xe pif-param-get param-name=network-uuid uuid=$uuid
expr = 'field "management" = "true"'
pifs = self._session.call_xenapi('PIF.get_all_records_where',
expr)
if len(pifs) != 1:
raise exception.MigrationError('No suitable network for migrate')
nwref = pifs[pifs.keys()[0]]['network']
try:
options = {}
migrate_data = self._session.call_xenapi("host.migrate_receive",
destref,
nwref,
options)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Receive failed'))
return migrate_data
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
if block_migration:
migrate_send_data = self._migrate_receive(ctxt)
destination_sr_ref = vm_utils.safe_find_sr(self._session)
dest_check_data = {
"block_migration": block_migration,
"migrate_data": {"migrate_send_data": migrate_send_data,
"destination_sr_ref": destination_sr_ref}}
return dest_check_data
else:
src = instance_ref['host']
self._ensure_host_in_aggregate(ctxt, src)
# TODO(johngarbutt) we currently assume
# instance is on a SR shared with other destination
# block migration work will be able to resolve this
return None
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
""" Check if it is possible to execute live migration
on the source side.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
destination, includes block_migration flag
"""
if dest_check_data and 'migrate_data' in dest_check_data:
vm_ref = self._get_vm_opaque_ref(instance_ref)
migrate_data = dest_check_data['migrate_data']
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('VM.assert_can_migrate'
'failed'))
def _generate_vdi_map(self, destination_sr_ref, vm_ref):
"""generate a vdi_map for _call_live_migrate_command """
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command"""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref)
vif_map = {}
options = {}
self._session.call_xenapi(command_name, vm_ref,
migrate_send_data, True,
vdi_map, vif_map, options)
def live_migrate(self, context, instance, destination_hostname,
post_method, recover_method, block_migration,
migrate_data=None):
try:
vm_ref = self._get_vm_opaque_ref(instance)
if block_migration:
if not migrate_data:
raise exception.InvalidParameterValue('Block Migration '
'requires migrate data from destination')
try:
self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Send failed'))
else:
host_ref = self._get_host_opaque_ref(context,
destination_hostname)
self._session.call_xenapi("VM.pool_migrate", vm_ref,
host_ref, {})
post_method(context, instance, destination_hostname,
block_migration)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
| apache-2.0 |
mokieyue/mopidy | mopidy/commands.py | 4 | 15539 | from __future__ import absolute_import, print_function, unicode_literals
import argparse
import collections
import contextlib
import logging
import os
import signal
import sys
import pykka
from mopidy import config as config_lib, exceptions
from mopidy.audio import Audio
from mopidy.core import Core
from mopidy.internal import deps, process, timer, versioning
from mopidy.internal.gi import GLib
logger = logging.getLogger(__name__)
_default_config = []
for base in GLib.get_system_config_dirs() + [GLib.get_user_config_dir()]:
_default_config.append(os.path.join(base, b'mopidy', b'mopidy.conf'))
DEFAULT_CONFIG = b':'.join(_default_config)
def config_files_type(value):
return value.split(b':')
def config_override_type(value):
try:
section, remainder = value.split(b'/', 1)
key, value = remainder.split(b'=', 1)
return (section.strip(), key.strip(), value.strip())
except ValueError:
raise argparse.ArgumentTypeError(
'%s must have the format section/key=value' % value)
class _ParserError(Exception):
def __init__(self, message):
self.message = message
class _HelpError(Exception):
pass
class _ArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise _ParserError(message)
class _HelpAction(argparse.Action):
def __init__(self, option_strings, dest=None, help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest or argparse.SUPPRESS,
default=argparse.SUPPRESS,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
raise _HelpError()
class Command(object):
"""Command parser and runner for building trees of commands.
This class provides a wraper around :class:`argparse.ArgumentParser`
for handling this type of command line application in a better way than
argprases own sub-parser handling.
"""
help = None
#: Help text to display in help output.
def __init__(self):
self._children = collections.OrderedDict()
self._arguments = []
self._overrides = {}
def _build(self):
actions = []
parser = _ArgumentParser(add_help=False)
parser.register('action', 'help', _HelpAction)
for args, kwargs in self._arguments:
actions.append(parser.add_argument(*args, **kwargs))
parser.add_argument('_args', nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
return parser, actions
def add_child(self, name, command):
"""Add a child parser to consider using.
:param name: name to use for the sub-command that is being added.
:type name: string
"""
self._children[name] = command
def add_argument(self, *args, **kwargs):
"""Add an argument to the parser.
This method takes all the same arguments as the
:class:`argparse.ArgumentParser` version of this method.
"""
self._arguments.append((args, kwargs))
def set(self, **kwargs):
"""Override a value in the finaly result of parsing."""
self._overrides.update(kwargs)
def exit(self, status_code=0, message=None, usage=None):
"""Optionally print a message and exit."""
print('\n\n'.join(m for m in (usage, message) if m))
sys.exit(status_code)
def format_usage(self, prog=None):
"""Format usage for current parser."""
actions = self._build()[1]
prog = prog or os.path.basename(sys.argv[0])
return self._usage(actions, prog) + '\n'
def _usage(self, actions, prog):
formatter = argparse.HelpFormatter(prog)
formatter.add_usage(None, actions, [])
return formatter.format_help().strip()
def format_help(self, prog=None):
"""Format help for current parser and children."""
actions = self._build()[1]
prog = prog or os.path.basename(sys.argv[0])
formatter = argparse.HelpFormatter(prog)
formatter.add_usage(None, actions, [])
if self.help:
formatter.add_text(self.help)
if actions:
formatter.add_text('OPTIONS:')
formatter.start_section(None)
formatter.add_arguments(actions)
formatter.end_section()
subhelp = []
for name, child in self._children.items():
child._subhelp(name, subhelp)
if subhelp:
formatter.add_text('COMMANDS:')
subhelp.insert(0, '')
return formatter.format_help() + '\n'.join(subhelp)
def _subhelp(self, name, result):
actions = self._build()[1]
if self.help or actions:
formatter = argparse.HelpFormatter(name)
formatter.add_usage(None, actions, [], '')
formatter.start_section(None)
formatter.add_text(self.help)
formatter.start_section(None)
formatter.add_arguments(actions)
formatter.end_section()
formatter.end_section()
result.append(formatter.format_help())
for childname, child in self._children.items():
child._subhelp(' '.join((name, childname)), result)
def parse(self, args, prog=None):
"""Parse command line arguments.
Will recursively parse commands until a final parser is found or an
error occurs. In the case of errors we will print a message and exit.
Otherwise, any overrides are applied and the current parser stored
in the command attribute of the return value.
:param args: list of arguments to parse
:type args: list of strings
:param prog: name to use for program
:type prog: string
:rtype: :class:`argparse.Namespace`
"""
prog = prog or os.path.basename(sys.argv[0])
try:
return self._parse(
args, argparse.Namespace(), self._overrides.copy(), prog)
except _HelpError:
self.exit(0, self.format_help(prog))
def _parse(self, args, namespace, overrides, prog):
overrides.update(self._overrides)
parser, actions = self._build()
try:
result = parser.parse_args(args, namespace)
except _ParserError as e:
self.exit(1, e.message, self._usage(actions, prog))
if not result._args:
for attr, value in overrides.items():
setattr(result, attr, value)
delattr(result, '_args')
result.command = self
return result
child = result._args.pop(0)
if child not in self._children:
usage = self._usage(actions, prog)
self.exit(1, 'unrecognized command: %s' % child, usage)
return self._children[child]._parse(
result._args, result, overrides, ' '.join([prog, child]))
def run(self, *args, **kwargs):
"""Run the command.
Must be implemented by sub-classes that are not simply an intermediate
in the command namespace.
"""
raise NotImplementedError
@contextlib.contextmanager
def _actor_error_handling(name):
try:
yield
except exceptions.BackendError as exc:
logger.error(
'Backend (%s) initialization error: %s', name, exc.message)
except exceptions.FrontendError as exc:
logger.error(
'Frontend (%s) initialization error: %s', name, exc.message)
except exceptions.MixerError as exc:
logger.error(
'Mixer (%s) initialization error: %s', name, exc.message)
except Exception:
logger.exception('Got un-handled exception from %s', name)
# TODO: move out of this utility class
class RootCommand(Command):
def __init__(self):
super(RootCommand, self).__init__()
self.set(base_verbosity_level=0)
self.add_argument(
'-h', '--help',
action='help', help='Show this message and exit')
self.add_argument(
'--version', action='version',
version='Mopidy %s' % versioning.get_version())
self.add_argument(
'-q', '--quiet',
action='store_const', const=-1, dest='verbosity_level',
help='less output (warning level)')
self.add_argument(
'-v', '--verbose',
action='count', dest='verbosity_level', default=0,
help='more output (repeat up to 3 times for even more)')
self.add_argument(
'--save-debug-log',
action='store_true', dest='save_debug_log',
help='save debug log to "./mopidy.log"')
self.add_argument(
'--config',
action='store', dest='config_files', type=config_files_type,
default=DEFAULT_CONFIG, metavar='FILES',
help='config files to use, colon seperated, later files override')
self.add_argument(
'-o', '--option',
action='append', dest='config_overrides',
type=config_override_type, metavar='OPTIONS',
help='`section/key=value` values to override config options')
def run(self, args, config):
def on_sigterm(loop):
logger.info('GLib mainloop got SIGTERM. Exiting...')
loop.quit()
loop = GLib.MainLoop()
GLib.unix_signal_add(
GLib.PRIORITY_DEFAULT, signal.SIGTERM, on_sigterm, loop)
mixer_class = self.get_mixer_class(config, args.registry['mixer'])
backend_classes = args.registry['backend']
frontend_classes = args.registry['frontend']
exit_status_code = 0
try:
mixer = None
if mixer_class is not None:
mixer = self.start_mixer(config, mixer_class)
if mixer:
self.configure_mixer(config, mixer)
audio = self.start_audio(config, mixer)
backends = self.start_backends(config, backend_classes, audio)
core = self.start_core(config, mixer, backends, audio)
self.start_frontends(config, frontend_classes, core)
logger.info('Starting GLib mainloop')
loop.run()
except (exceptions.BackendError,
exceptions.FrontendError,
exceptions.MixerError):
logger.info('Initialization error. Exiting...')
exit_status_code = 1
except KeyboardInterrupt:
logger.info('Interrupted. Exiting...')
except Exception:
logger.exception('Uncaught exception')
finally:
loop.quit()
self.stop_frontends(frontend_classes)
self.stop_core()
self.stop_backends(backend_classes)
self.stop_audio()
if mixer_class is not None:
self.stop_mixer(mixer_class)
process.stop_remaining_actors()
return exit_status_code
def get_mixer_class(self, config, mixer_classes):
logger.debug(
'Available Mopidy mixers: %s',
', '.join(m.__name__ for m in mixer_classes) or 'none')
if config['audio']['mixer'] == 'none':
logger.debug('Mixer disabled')
return None
selected_mixers = [
m for m in mixer_classes if m.name == config['audio']['mixer']]
if len(selected_mixers) != 1:
logger.error(
'Did not find unique mixer "%s". Alternatives are: %s',
config['audio']['mixer'],
', '.join([m.name for m in mixer_classes]) + ', none' or
'none')
process.exit_process()
return selected_mixers[0]
def start_mixer(self, config, mixer_class):
logger.info('Starting Mopidy mixer: %s', mixer_class.__name__)
with _actor_error_handling(mixer_class.__name__):
mixer = mixer_class.start(config=config).proxy()
try:
mixer.ping().get()
return mixer
except pykka.ActorDeadError as exc:
logger.error('Actor died: %s', exc)
return None
def configure_mixer(self, config, mixer):
volume = config['audio']['mixer_volume']
if volume is not None:
mixer.set_volume(volume)
logger.info('Mixer volume set to %d', volume)
else:
logger.debug('Mixer volume left unchanged')
def start_audio(self, config, mixer):
logger.info('Starting Mopidy audio')
return Audio.start(config=config, mixer=mixer).proxy()
def start_backends(self, config, backend_classes, audio):
logger.info(
'Starting Mopidy backends: %s',
', '.join(b.__name__ for b in backend_classes) or 'none')
backends = []
for backend_class in backend_classes:
with _actor_error_handling(backend_class.__name__):
with timer.time_logger(backend_class.__name__):
backend = backend_class.start(
config=config, audio=audio).proxy()
backends.append(backend)
# Block until all on_starts have finished, letting them run in parallel
for backend in backends[:]:
try:
backend.ping().get()
except pykka.ActorDeadError as exc:
backends.remove(backend)
logger.error('Actor died: %s', exc)
return backends
def start_core(self, config, mixer, backends, audio):
logger.info('Starting Mopidy core')
return Core.start(
config=config, mixer=mixer, backends=backends, audio=audio).proxy()
def start_frontends(self, config, frontend_classes, core):
logger.info(
'Starting Mopidy frontends: %s',
', '.join(f.__name__ for f in frontend_classes) or 'none')
for frontend_class in frontend_classes:
with _actor_error_handling(frontend_class.__name__):
with timer.time_logger(frontend_class.__name__):
frontend_class.start(config=config, core=core)
def stop_frontends(self, frontend_classes):
logger.info('Stopping Mopidy frontends')
for frontend_class in frontend_classes:
process.stop_actors_by_class(frontend_class)
def stop_core(self):
logger.info('Stopping Mopidy core')
process.stop_actors_by_class(Core)
def stop_backends(self, backend_classes):
logger.info('Stopping Mopidy backends')
for backend_class in backend_classes:
process.stop_actors_by_class(backend_class)
def stop_audio(self):
logger.info('Stopping Mopidy audio')
process.stop_actors_by_class(Audio)
def stop_mixer(self, mixer_class):
logger.info('Stopping Mopidy mixer')
process.stop_actors_by_class(mixer_class)
class ConfigCommand(Command):
help = 'Show currently active configuration.'
def __init__(self):
super(ConfigCommand, self).__init__()
self.set(base_verbosity_level=-1)
def run(self, config, errors, schemas):
print(config_lib.format(config, schemas, errors))
return 0
class DepsCommand(Command):
help = 'Show dependencies and debug information.'
def __init__(self):
super(DepsCommand, self).__init__()
self.set(base_verbosity_level=-1)
def run(self):
print(deps.format_dependency_list())
return 0
| apache-2.0 |
hainm/pythran | pythran/optimizations/dead_code_elimination.py | 4 | 3556 | """ DeadCodeElimination remove useless code. """
from pythran.analyses import PureExpressions, UseDefChain
from pythran.openmp import OMPDirective
from pythran.passmanager import Transformation
import pythran.metadata as metadata
import ast
class DeadCodeElimination(Transformation):
"""
Remove useless statement like:
- assignment to unused variables
- remove alone pure statement
- remove empty if
>>> import ast
>>> from pythran import passmanager, backend
>>> pm = passmanager.PassManager("test")
>>> node = ast.parse("def foo(): a = [2, 3]; return 1")
>>> _, node = pm.apply(DeadCodeElimination, node)
>>> print pm.dump(backend.Python, node)
def foo():
pass
return 1
>>> node = ast.parse("def foo(): 'a simple string'; return 1")
>>> _, node = pm.apply(DeadCodeElimination, node)
>>> print pm.dump(backend.Python, node)
def foo():
pass
return 1
>>> node = ast.parse('''
... def bar(a):
... return a
... def foo(a):
... bar(a)
... return 1''')
>>> _, node = pm.apply(DeadCodeElimination, node)
>>> print pm.dump(backend.Python, node)
def bar(a):
return a
def foo(a):
pass
return 1
"""
def __init__(self):
super(DeadCodeElimination, self).__init__(PureExpressions,
UseDefChain)
def used_target(self, node):
if isinstance(node, ast.Name):
udc = self.use_def_chain[node.id]
def is_use(x):
return udc.node[x]['action'] in ("U", "UD")
use_count = len(filter(is_use, udc.nodes()))
return use_count != 0
return True
def visit_Assign(self, node):
node.targets = filter(self.used_target, node.targets)
if node.targets:
return node
self.update = True
if node.value in self.pure_expressions:
return ast.Pass()
else:
return ast.Expr(value=node.value)
def visit_Expr(self, node):
if (node in self.pure_expressions and
not isinstance(node.value, ast.Yield)):
self.update = True
return ast.Pass()
self.generic_visit(node)
return node
def visit_If(self, node):
self.generic_visit(node)
have_body = any(not isinstance(x, ast.Pass) for x in node.body)
have_else = any(not isinstance(x, ast.Pass) for x in node.orelse)
# If the "body" is empty but "else content" is useful, switch branches
# and remove else content
if not have_body and have_else:
test = ast.UnaryOp(op=ast.Not(), operand=node.test)
self.update = True
return ast.If(test=test, body=node.orelse, orelse=list())
# if neither "if" and "else" are useful, keep test if it is not pure
elif not have_body:
self.update = True
if node.test in self.pure_expressions:
return ast.Pass()
else:
node = ast.Expr(value=node.test)
self.generic_visit(node)
return node
def visit(self, node):
""" Add OMPDirective from the old node to the new one. """
old_omp = metadata.get(node, OMPDirective)
node = super(DeadCodeElimination, self).visit(node)
if not metadata.get(node, OMPDirective):
for omp_directive in old_omp:
metadata.add(node, omp_directive)
return node
| bsd-3-clause |
thundernet8/WRGameVideos-Server | venv/lib/python2.7/site-packages/sqlalchemy/ext/compiler.py | 81 | 15770 | # ext/compiler.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides an API for creation of custom ClauseElements and compilers.
Synopsis
========
Usage involves the creation of one or more
:class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or
more callables defining its compilation::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import ColumnClause
class MyColumn(ColumnClause):
pass
@compiles(MyColumn)
def compile_mycolumn(element, compiler, **kw):
return "[%s]" % element.name
Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`,
the base expression element for named column objects. The ``compiles``
decorator registers itself with the ``MyColumn`` class so that it is invoked
when the object is compiled to a string::
from sqlalchemy import select
s = select([MyColumn('x'), MyColumn('y')])
print str(s)
Produces::
SELECT [x], [y]
Dialect-specific compilation rules
==================================
Compilers can also be made dialect-specific. The appropriate compiler will be
invoked for the dialect in use::
from sqlalchemy.schema import DDLElement
class AlterColumn(DDLElement):
def __init__(self, column, cmd):
self.column = column
self.cmd = cmd
@compiles(AlterColumn)
def visit_alter_column(element, compiler, **kw):
return "ALTER COLUMN %s ..." % element.column.name
@compiles(AlterColumn, 'postgresql')
def visit_alter_column(element, compiler, **kw):
return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name,
element.column.name)
The second ``visit_alter_table`` will be invoked when any ``postgresql``
dialect is used.
Compiling sub-elements of a custom expression construct
=======================================================
The ``compiler`` argument is the
:class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object
can be inspected for any information about the in-progress compilation,
including ``compiler.dialect``, ``compiler.statement`` etc. The
:class:`~sqlalchemy.sql.compiler.SQLCompiler` and
:class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()``
method which can be used for compilation of embedded attributes::
from sqlalchemy.sql.expression import Executable, ClauseElement
class InsertFromSelect(Executable, ClauseElement):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s (%s)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select)
)
insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5))
print insert
Produces::
"INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z
FROM mytable WHERE mytable.x > :x_1)"
.. note::
The above ``InsertFromSelect`` construct is only an example, this actual
functionality is already available using the
:meth:`.Insert.from_select` method.
.. note::
The above ``InsertFromSelect`` construct probably wants to have "autocommit"
enabled. See :ref:`enabling_compiled_autocommit` for this step.
Cross Compiling between SQL and DDL compilers
---------------------------------------------
SQL and DDL constructs are each compiled using different base compilers -
``SQLCompiler`` and ``DDLCompiler``. A common need is to access the
compilation rules of SQL expressions from within a DDL expression. The
``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as
below where we generate a CHECK constraint that embeds a SQL expression::
@compiles(MyConstraint)
def compile_my_constraint(constraint, ddlcompiler, **kw):
return "CONSTRAINT %s CHECK (%s)" % (
constraint.name,
ddlcompiler.sql_compiler.process(constraint.expression)
)
.. _enabling_compiled_autocommit:
Enabling Autocommit on a Construct
==================================
Recall from the section :ref:`autocommit` that the :class:`.Engine`, when
asked to execute a construct in the absence of a user-defined transaction,
detects if the given construct represents DML or DDL, that is, a data
modification or data definition statement, which requires (or may require,
in the case of DDL) that the transaction generated by the DBAPI be committed
(recall that DBAPI always has a transaction going on regardless of what
SQLAlchemy does). Checking for this is actually accomplished by checking for
the "autocommit" execution option on the construct. When building a
construct like an INSERT derivation, a new DDL type, or perhaps a stored
procedure that alters data, the "autocommit" option needs to be set in order
for the statement to function with "connectionless" execution
(as described in :ref:`dbengine_implicit`).
Currently a quick way to do this is to subclass :class:`.Executable`, then
add the "autocommit" flag to the ``_execution_options`` dictionary (note this
is a "frozen" dictionary which supplies a generative ``union()`` method)::
from sqlalchemy.sql.expression import Executable, ClauseElement
class MyInsertThing(Executable, ClauseElement):
_execution_options = \\
Executable._execution_options.union({'autocommit': True})
More succinctly, if the construct is truly similar to an INSERT, UPDATE, or
DELETE, :class:`.UpdateBase` can be used, which already is a subclass
of :class:`.Executable`, :class:`.ClauseElement` and includes the
``autocommit`` flag::
from sqlalchemy.sql.expression import UpdateBase
class MyInsertThing(UpdateBase):
def __init__(self, ...):
...
DDL elements that subclass :class:`.DDLElement` already have the
"autocommit" flag turned on.
Changing the default compilation of existing constructs
=======================================================
The compiler extension applies just as well to the existing constructs. When
overriding the compilation of a built in SQL construct, the @compiles
decorator is invoked upon the appropriate class (be sure to use the class,
i.e. ``Insert`` or ``Select``, instead of the creation function such
as ``insert()`` or ``select()``).
Within the new compilation function, to get at the "original" compilation
routine, use the appropriate visit_XXX method - this
because compiler.process() will call upon the overriding routine and cause
an endless loop. Such as, to add "prefix" to all insert statements::
from sqlalchemy.sql.expression import Insert
@compiles(Insert)
def prefix_inserts(insert, compiler, **kw):
return compiler.visit_insert(insert.prefix_with("some prefix"), **kw)
The above compiler will prefix all INSERT statements with "some prefix" when
compiled.
.. _type_compilation_extension:
Changing Compilation of Types
=============================
``compiler`` works for types, too, such as below where we implement the
MS-SQL specific 'max' keyword for ``String``/``VARCHAR``::
@compiles(String, 'mssql')
@compiles(VARCHAR, 'mssql')
def compile_varchar(element, compiler, **kw):
if element.length == 'max':
return "VARCHAR('max')"
else:
return compiler.visit_VARCHAR(element, **kw)
foo = Table('foo', metadata,
Column('data', VARCHAR('max'))
)
Subclassing Guidelines
======================
A big part of using the compiler extension is subclassing SQLAlchemy
expression constructs. To make this easier, the expression and
schema packages feature a set of "bases" intended for common tasks.
A synopsis is as follows:
* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root
expression class. Any SQL expression can be derived from this base, and is
probably the best choice for longer constructs such as specialized INSERT
statements.
* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all
"column-like" elements. Anything that you'd place in the "columns" clause of
a SELECT statement (as well as order by and group by) can derive from this -
the object will automatically have Python "comparison" behavior.
:class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a
``type`` member which is expression's return type. This can be established
at the instance level in the constructor, or at the class level if its
generally constant::
class timestamp(ColumnElement):
type = TIMESTAMP()
* :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a
``ColumnElement`` and a "from clause" like object, and represents a SQL
function or stored procedure type of call. Since most databases support
statements along the line of "SELECT FROM <some function>"
``FunctionElement`` adds in the ability to be used in the FROM clause of a
``select()`` construct::
from sqlalchemy.sql.expression import FunctionElement
class coalesce(FunctionElement):
name = 'coalesce'
@compiles(coalesce)
def compile(element, compiler, **kw):
return "coalesce(%s)" % compiler.process(element.clauses)
@compiles(coalesce, 'oracle')
def compile(element, compiler, **kw):
if len(element.clauses) > 2:
raise TypeError("coalesce only supports two arguments on Oracle")
return "nvl(%s)" % compiler.process(element.clauses)
* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions,
like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement``
subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``.
``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the
``execute_at()`` method, allowing the construct to be invoked during CREATE
TABLE and DROP TABLE sequences.
* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which
should be used with any expression class that represents a "standalone"
SQL statement that can be passed directly to an ``execute()`` method. It
is already implicit within ``DDLElement`` and ``FunctionElement``.
Further Examples
================
"UTC timestamp" function
-------------------------
A function that works like "CURRENT_TIMESTAMP" except applies the
appropriate conversions so that the time is in UTC time. Timestamps are best
stored in relational databases as UTC, without time zones. UTC so that your
database doesn't think time has gone backwards in the hour when daylight
savings ends, without timezones because timezones are like character
encodings - they're best applied only at the endpoints of an application
(i.e. convert to UTC upon user input, re-apply desired timezone upon display).
For Postgresql and Microsoft SQL Server::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
class utcnow(expression.FunctionElement):
type = DateTime()
@compiles(utcnow, 'postgresql')
def pg_utcnow(element, compiler, **kw):
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, 'mssql')
def ms_utcnow(element, compiler, **kw):
return "GETUTCDATE()"
Example usage::
from sqlalchemy import (
Table, Column, Integer, String, DateTime, MetaData
)
metadata = MetaData()
event = Table("event", metadata,
Column("id", Integer, primary_key=True),
Column("description", String(50), nullable=False),
Column("timestamp", DateTime, server_default=utcnow())
)
"GREATEST" function
-------------------
The "GREATEST" function is given any number of arguments and returns the one
that is of the highest value - its equivalent to Python's ``max``
function. A SQL standard version versus a CASE based version which only
accommodates two arguments::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import Numeric
class greatest(expression.FunctionElement):
type = Numeric()
name = 'greatest'
@compiles(greatest)
def default_greatest(element, compiler, **kw):
return compiler.visit_function(element)
@compiles(greatest, 'sqlite')
@compiles(greatest, 'mssql')
@compiles(greatest, 'oracle')
def case_greatest(element, compiler, **kw):
arg1, arg2 = list(element.clauses)
return "CASE WHEN %s > %s THEN %s ELSE %s END" % (
compiler.process(arg1),
compiler.process(arg2),
compiler.process(arg1),
compiler.process(arg2),
)
Example usage::
Session.query(Account).\\
filter(
greatest(
Account.checking_balance,
Account.savings_balance) > 10000
)
"false" expression
------------------
Render a "false" constant expression, rendering as "0" on platforms that
don't have a "false" constant::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
class sql_false(expression.ColumnElement):
pass
@compiles(sql_false)
def default_false(element, compiler, **kw):
return "false"
@compiles(sql_false, 'mssql')
@compiles(sql_false, 'mysql')
@compiles(sql_false, 'oracle')
def int_false(element, compiler, **kw):
return "0"
Example usage::
from sqlalchemy import select, union_all
exp = union_all(
select([users.c.name, sql_false().label("enrolled")]),
select([customers.c.name, customers.c.enrolled])
)
"""
from .. import exc
from ..sql import visitors
def compiles(class_, *specs):
"""Register a function as a compiler for a
given :class:`.ClauseElement` type."""
def decorate(fn):
existing = class_.__dict__.get('_compiler_dispatcher', None)
existing_dispatch = class_.__dict__.get('_compiler_dispatch')
if not existing:
existing = _dispatcher()
if existing_dispatch:
existing.specs['default'] = existing_dispatch
# TODO: why is the lambda needed ?
setattr(class_, '_compiler_dispatch',
lambda *arg, **kw: existing(*arg, **kw))
setattr(class_, '_compiler_dispatcher', existing)
if specs:
for s in specs:
existing.specs[s] = fn
else:
existing.specs['default'] = fn
return fn
return decorate
def deregister(class_):
"""Remove all custom compilers associated with a given
:class:`.ClauseElement` type."""
if hasattr(class_, '_compiler_dispatcher'):
# regenerate default _compiler_dispatch
visitors._generate_dispatch(class_)
# remove custom directive
del class_._compiler_dispatcher
class _dispatcher(object):
def __init__(self):
self.specs = {}
def __call__(self, element, compiler, **kw):
# TODO: yes, this could also switch off of DBAPI in use.
fn = self.specs.get(compiler.dialect.name, None)
if not fn:
try:
fn = self.specs['default']
except KeyError:
raise exc.CompileError(
"%s construct has no default "
"compilation handler." % type(element))
return fn(element, compiler, **kw)
| gpl-2.0 |
sealuzh/ContextBasedAnalytics | model/connection.py | 1 | 1087 | class Connection(object):
def __init__(self, method, status, url, timestamp):
self.method = method
self.status = status
self.url = url
self.timestamp = timestamp
self.label = self.assign_label()
def assign_label(self):
parameter_url_split = self.url.split('?')
if len(parameter_url_split) > 1:
return parameter_url_split[0]
else:
return self.url
@staticmethod
def create_from_log(log_statement, timestamp):
# TODO: actually crawl the method regexp and apply it to get unique urls
# possible log statement "PUT /jurgen-updates-new/887b7ae6-de80-4445-ab4b-019a0d9a2d73 HTTP/1.1\" 201 99
# structure {HTTP_METHOD} {base-url-path}?{url-parameters} HTTP/?.? {HTTP-Status} {??}
http_methods = [u"GET", u"POST", u"PUT", u"DELETE", u"PATCH"]
log_structure = log_statement.split(" ")
method = log_structure[0].lstrip('"')
# preconditions
if not (method in http_methods):
return None
url = log_structure[1]
status = log_structure[3]
return Connection(method, status, url, timestamp)
| apache-2.0 |
tailorian/Sick-Beard | lib/guessit/language.py | 19 | 13850 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, base_text_type, u, s
from guessit.fileutils import load_file_in_same_dir
from guessit.textutils import find_words
from guessit.country import Country
import re
import logging
__all__ = [ 'is_iso_language', 'is_language', 'lang_set', 'Language',
'ALL_LANGUAGES', 'ALL_LANGUAGES_NAMES', 'UNDETERMINED',
'search_language', 'guess_language' ]
log = logging.getLogger(__name__)
# downloaded from http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
#
# Description of the fields:
# "An alpha-3 (bibliographic) code, an alpha-3 (terminologic) code (when given),
# an alpha-2 code (when given), an English name, and a French name of a language
# are all separated by pipe (|) characters."
_iso639_contents = load_file_in_same_dir(__file__, 'ISO-639-2_utf-8.txt')
# drop the BOM from the beginning of the file
_iso639_contents = _iso639_contents[1:]
language_matrix = [ l.strip().split('|')
for l in _iso639_contents.strip().split('\n') ]
# update information in the language matrix
language_matrix += [['mol', '', 'mo', 'Moldavian', 'moldave'],
['ass', '', '', 'Assyrian', 'assyrien']]
for lang in language_matrix:
# remove unused languages that shadow other common ones with a non-official form
if (lang[2] == 'se' or # Northern Sami shadows Swedish
lang[2] == 'br'): # Breton shadows Brazilian
lang[2] = ''
# add missing information
if lang[0] == 'und':
lang[2] = 'un'
if lang[0] == 'srp':
lang[1] = 'scc' # from OpenSubtitles
lng3 = frozenset(l[0] for l in language_matrix if l[0])
lng3term = frozenset(l[1] for l in language_matrix if l[1])
lng2 = frozenset(l[2] for l in language_matrix if l[2])
lng_en_name = frozenset(lng for l in language_matrix
for lng in l[3].lower().split('; ') if lng)
lng_fr_name = frozenset(lng for l in language_matrix
for lng in l[4].lower().split('; ') if lng)
lng_all_names = lng3 | lng3term | lng2 | lng_en_name | lng_fr_name
lng3_to_lng3term = dict((l[0], l[1]) for l in language_matrix if l[1])
lng3term_to_lng3 = dict((l[1], l[0]) for l in language_matrix if l[1])
lng3_to_lng2 = dict((l[0], l[2]) for l in language_matrix if l[2])
lng2_to_lng3 = dict((l[2], l[0]) for l in language_matrix if l[2])
# we only return the first given english name, hoping it is the most used one
lng3_to_lng_en_name = dict((l[0], l[3].split('; ')[0])
for l in language_matrix if l[3])
lng_en_name_to_lng3 = dict((en_name.lower(), l[0])
for l in language_matrix if l[3]
for en_name in l[3].split('; '))
# we only return the first given french name, hoping it is the most used one
lng3_to_lng_fr_name = dict((l[0], l[4].split('; ')[0])
for l in language_matrix if l[4])
lng_fr_name_to_lng3 = dict((fr_name.lower(), l[0])
for l in language_matrix if l[4]
for fr_name in l[4].split('; '))
# contains a list of exceptions: strings that should be parsed as a language
# but which are not in an ISO form
lng_exceptions = { 'unknown': ('und', None),
'inconnu': ('und', None),
'unk': ('und', None),
'un': ('und', None),
'gr': ('gre', None),
'greek': ('gre', None),
'esp': ('spa', None),
'español': ('spa', None),
'se': ('swe', None),
'po': ('pt', 'br'),
'pb': ('pt', 'br'),
'pob': ('pt', 'br'),
'br': ('pt', 'br'),
'brazilian': ('pt', 'br'),
'català': ('cat', None),
'cz': ('cze', None),
'ua': ('ukr', None),
'cn': ('chi', None),
'chs': ('chi', None),
'jp': ('jpn', None),
'scr': ('hrv', None)
}
def is_iso_language(language):
return language.lower() in lng_all_names
def is_language(language):
return is_iso_language(language) or language in lng_exceptions
def lang_set(languages, strict=False):
"""Return a set of guessit.Language created from their given string
representation.
if strict is True, then this will raise an exception if any language
could not be identified.
"""
return set(Language(l, strict=strict) for l in languages)
class Language(UnicodeMixin):
"""This class represents a human language.
You can initialize it with pretty much anything, as it knows conversion
from ISO-639 2-letter and 3-letter codes, English and French names.
You can also distinguish languages for specific countries, such as
Portuguese and Brazilian Portuguese.
There are various properties on the language object that give you the
representation of the language for a specific usage, such as .alpha3
to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles
language code.
>>> Language('fr')
Language(French)
>>> s(Language('eng').french_name)
'anglais'
>>> s(Language('pt(br)').country.english_name)
'Brazil'
>>> s(Language('Español (Latinoamérica)').country.english_name)
'Latin America'
>>> Language('Spanish (Latin America)') == Language('Español (Latinoamérica)')
True
>>> s(Language('zz', strict=False).english_name)
'Undetermined'
>>> s(Language('pt(br)').opensubtitles)
'pob'
"""
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self, language, country=None, strict=False, scheme=None):
language = u(language.strip().lower())
with_country = (Language._with_country_regexp.match(language) or
Language._with_country_regexp2.match(language))
if with_country:
self.lang = Language(with_country.group(1)).lang
self.country = Country(with_country.group(2))
return
self.lang = None
self.country = Country(country) if country else None
# first look for scheme specific languages
if scheme == 'opensubtitles':
if language == 'br':
self.lang = 'bre'
return
elif language == 'se':
self.lang = 'sme'
return
elif scheme is not None:
log.warning('Unrecognized scheme: "%s" - Proceeding with standard one' % scheme)
# look for ISO language codes
if len(language) == 2:
self.lang = lng2_to_lng3.get(language)
elif len(language) == 3:
self.lang = (language
if language in lng3
else lng3term_to_lng3.get(language))
else:
self.lang = (lng_en_name_to_lng3.get(language) or
lng_fr_name_to_lng3.get(language))
# general language exceptions
if self.lang is None and language in lng_exceptions:
lang, country = lng_exceptions[language]
self.lang = Language(lang).alpha3
self.country = Country(country) if country else None
msg = 'The given string "%s" could not be identified as a language' % language
if self.lang is None and strict:
raise ValueError(msg)
if self.lang is None:
log.debug(msg)
self.lang = 'und'
@property
def alpha2(self):
return lng3_to_lng2[self.lang]
@property
def alpha3(self):
return self.lang
@property
def alpha3term(self):
return lng3_to_lng3term[self.lang]
@property
def english_name(self):
return lng3_to_lng_en_name[self.lang]
@property
def french_name(self):
return lng3_to_lng_fr_name[self.lang]
@property
def opensubtitles(self):
if self.lang == 'por' and self.country and self.country.alpha2 == 'br':
return 'pob'
elif self.lang in ['gre', 'srp']:
return self.alpha3term
return self.alpha3
@property
def tmdb(self):
if self.country:
return '%s-%s' % (self.alpha2, self.country.alpha2.upper())
return self.alpha2
def __hash__(self):
return hash(self.lang)
def __eq__(self, other):
if isinstance(other, Language):
return self.lang == other.lang
if isinstance(other, base_text_type):
try:
return self == Language(other)
except ValueError:
return False
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return self.lang != 'und'
def __unicode__(self):
if self.country:
return '%s(%s)' % (self.english_name, self.country.alpha2)
else:
return self.english_name
def __repr__(self):
if self.country:
return 'Language(%s, country=%s)' % (self.english_name, self.country)
else:
return 'Language(%s)' % self.english_name
UNDETERMINED = Language('und')
ALL_LANGUAGES = frozenset(Language(lng) for lng in lng_all_names) - frozenset([UNDETERMINED])
ALL_LANGUAGES_NAMES = lng_all_names
def search_language(string, lang_filter=None):
"""Looks for language patterns, and if found return the language object,
its group span and an associated confidence.
you can specify a list of allowed languages using the lang_filter argument,
as in lang_filter = [ 'fr', 'eng', 'spanish' ]
>>> search_language('movie [en].avi')
(Language(English), (7, 9), 0.8)
>>> search_language('the zen fat cat and the gay mad men got a new fan', lang_filter = ['en', 'fr', 'es'])
(None, None, None)
"""
# list of common words which could be interpreted as languages, but which
# are far too common to be able to say they represent a language in the
# middle of a string (where they most likely carry their commmon meaning)
lng_common_words = frozenset([
# english words
'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to',
'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan',
'fry', 'cop', 'zen', 'gay', 'fat', 'cherokee', 'got', 'an', 'as',
'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi',
# french words
'bas', 'de', 'le', 'son', 'vo', 'vf', 'ne', 'ca', 'ce', 'et', 'que',
'mal', 'est', 'vol', 'or', 'mon', 'se',
# spanish words
'la', 'el', 'del', 'por', 'mar',
# other
'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii',
'vi', 'ben', 'da', 'lt'
])
sep = r'[](){} \._-+'
if lang_filter:
lang_filter = lang_set(lang_filter)
slow = ' %s ' % string.lower()
confidence = 1.0 # for all of them
for lang in set(find_words(slow)) & lng_all_names:
if lang in lng_common_words:
continue
pos = slow.find(lang)
if pos != -1:
end = pos + len(lang)
# make sure our word is always surrounded by separators
if slow[pos - 1] not in sep or slow[end] not in sep:
continue
language = Language(slow[pos:end])
if lang_filter and language not in lang_filter:
continue
# only allow those languages that have a 2-letter code, those that
# don't are too esoteric and probably false matches
if language.lang not in lng3_to_lng2:
continue
# confidence depends on lng2, lng3, english name, ...
if len(lang) == 2:
confidence = 0.8
elif len(lang) == 3:
confidence = 0.9
else:
# Note: we could either be really confident that we found a
# language or assume that full language names are too
# common words and lower their confidence accordingly
confidence = 0.3 # going with the low-confidence route here
return language, (pos - 1, end - 1), confidence
return None, None, None
def guess_language(text):
"""Guess the language in which a body of text is written.
This uses the external guess-language python module, and will fail and return
Language(Undetermined) if it is not installed.
"""
try:
from guess_language import guessLanguage
return Language(guessLanguage(text))
except ImportError:
log.error('Cannot detect the language of the given text body, missing dependency: guess-language')
log.error('Please install it from PyPI, by doing eg: pip install guess-language')
return UNDETERMINED
| gpl-3.0 |
bmanojlovic/ansible | lib/ansible/modules/network/panos/panos_admpwd.py | 32 | 5961 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_admpwd
short_description: change admin password of PAN-OS device using SSH with SSH key
description:
- Change the admin password of PAN-OS via SSH using a SSH key for authentication.
- Useful for AWS instances where the first login should be done via SSH.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- paramiko
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
username:
description:
- username for initial authentication
required: false
default: "admin"
key_filename:
description:
- filename of the SSH Key to use for authentication
required: true
newpassword:
description:
- password to configure for admin on the PAN-OS device
required: true
'''
EXAMPLES = '''
# Tries for 10 times to set the admin password of 192.168.1.1 to "badpassword"
# via SSH, authenticating using key /tmp/ssh.key
- name: set admin password
panos_admpwd:
ip_address: "192.168.1.1"
username: "admin"
key_filename: "/tmp/ssh.key"
newpassword: "badpassword"
register: result
until: not result|failed
retries: 10
delay: 30
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "Last login: Fri Sep 16 11:09:20 2016 from 10.35.34.56.....Configuration committed successfully"
'''
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
from ansible.module_utils.basic import AnsibleModule
import time
import sys
try:
import paramiko
HAS_LIB=True
except ImportError:
HAS_LIB=False
_PROMPTBUFF = 4096
def wait_with_timeout(module, shell, prompt, timeout=60):
now = time.time()
result = ""
while True:
if shell.recv_ready():
result += shell.recv(_PROMPTBUFF)
endresult = result.strip()
if len(endresult) != 0 and endresult[-1] == prompt:
break
if time.time()-now > timeout:
module.fail_json(msg="Timeout waiting for prompt")
return result
def set_panwfw_password(module, ip_address, key_filename, newpassword, username):
stdout = ""
ssh = paramiko.SSHClient()
# add policy to accept all host keys, I haven't found
# a way to retrieve the instance SSH key fingerprint from AWS
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip_address, username=username, key_filename=key_filename)
shell = ssh.invoke_shell()
# wait for the shell to start
buff = wait_with_timeout(module, shell, ">")
stdout += buff
# step into config mode
shell.send('configure\n')
# wait for the config prompt
buff = wait_with_timeout(module, shell, "#")
stdout += buff
if module.check_mode:
# exit and close connection
shell.send('exit\n')
ssh.close()
return False, 'Connection test successful. Password left intact.'
# set admin password
shell.send('set mgt-config users ' + username + ' password\n')
# wait for the password prompt
buff = wait_with_timeout(module, shell, ":")
stdout += buff
# enter password for the first time
shell.send(newpassword+'\n')
# wait for the password prompt
buff = wait_with_timeout(module, shell, ":")
stdout += buff
# enter password for the second time
shell.send(newpassword+'\n')
# wait for the config mode prompt
buff = wait_with_timeout(module, shell, "#")
stdout += buff
# commit !
shell.send('commit\n')
# wait for the prompt
buff = wait_with_timeout(module, shell, "#", 120)
stdout += buff
if 'success' not in buff:
module.fail_json(msg="Error setting " + username + " password: " + stdout)
# exit
shell.send('exit\n')
ssh.close()
return True, stdout
def main():
argument_spec = dict(
ip_address=dict(required=True),
username=dict(default='admin'),
key_filename=dict(required=True),
newpassword=dict(no_log=True, required=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_LIB:
module.fail_json(msg='paramiko is required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
key_filename = module.params["key_filename"]
if not key_filename:
module.fail_json(msg="key_filename should be specified")
newpassword = module.params["newpassword"]
if not newpassword:
module.fail_json(msg="newpassword is required")
username = module.params['username']
try:
changed, stdout = set_panwfw_password(module, ip_address, key_filename, newpassword, username)
module.exit_json(changed=changed, stdout=stdout)
except Exception:
x = sys.exc_info()[1]
module.fail_json(msg=x)
if __name__ == '__main__':
main()
| gpl-3.0 |
jymannob/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ringtv.py | 19 | 1952 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class RingTVIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)'
_TEST = {
"url": "http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30",
"file": "857645.mp4",
"md5": "d25945f5df41cdca2d2587165ac28720",
"info_dict": {
"title": 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
"description": 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id').split('-')[0]
webpage = self._download_webpage(url, video_id)
if mobj.group('type') == 'news':
video_id = self._search_regex(
r'''(?x)<iframe[^>]+src="http://cms\.springboardplatform\.com/
embed_iframe/[0-9]+/video/([0-9]+)/''',
webpage, 'real video ID')
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'addthis:description="([^"]+)"',
webpage, 'description', fatal=False)
final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" % video_id
thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" % video_id
return {
'id': video_id,
'url': final_url,
'title': title,
'thumbnail': thumbnail_url,
'description': description,
}
| gpl-3.0 |
medialab/reanalyse | outside/views.py | 1 | 34992 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib, os
from django.core import serializers
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.models import User
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponse, HttpRequest, HttpResponseRedirect
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from django.utils.translation import ugettext as _
from mimetypes import guess_extension, guess_type
from reanalyseapp.models import Enquete, Tag, Texte, AccessRequest
from glue.models import Pin, Page
from glue.forms import AddPageForm, AddPinForm, EditPinForm
from outside.models import Enquiry, Subscriber, Confirmation_code
from outside.sites import OUTSIDE_SITES_AVAILABLE
from outside.forms import LoginForm, AddEnquiryForm, SubscriberForm, SignupForm, AccessRequestForm, ChangePasswordForm, ReinitializePasswordForm
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import messages
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.cache import cache
#TEI PART
from reanalyseapp.utils import *
from reanalyseapp.models import *
from reanalyseapp.imexport import *
from reanalyseapp.forms import *
from reanalyseapp.visualization import *
from reanalyseapp.search import *
# Search with haystack
from haystack.views import *
from haystack.forms import *
from haystack.query import *
import zipfile, zlib
# settings.py
LOGIN_URL = '/%s/login/' % settings.ROOT_DIRECTORY_NAME
#
# Outside
# =======
#
def index( request ):
data = shared_context( request, tags=[ "index" ] )
try:
data['page'] = Page.objects.get( slug="index", language=data['language'])
except Page.DoesNotExist:
p_en = Page( title="Home Page", language='EN', slug="index")
p_en.save()
p_fr = Page( title="Home Page", language='FR', slug="index")
p_fr.save()
data['page'] = p_fr if data['language'] == 'FR' else p_en
# load all pins without page
data['pins'] = Pin.objects.filter(language=data['language'], page__slug="index" ).order_by("-id")
# get news
data['news'] = _get_news( data )
return render_to_response( "%s/index.html" % data['template'], RequestContext(request, data ) )
def news( request ):
data = shared_context( request, tags=[ "news" ] )
# load all pins without page
data['pins'] = _get_news( data )
return render_to_response("%s/blog.html" % data['template'], RequestContext(request, data ) )
def _get_news( data ):
return Pin.objects.filter(language=data['language'], page__isnull=True, enquiry__isnull=True, parent__isnull=True ).order_by("-id")
def contacts( request ):
data = shared_context( request, tags=[ "contacts" ] )
# load all pins without page (aka news)
data['pins'] = Pin.objects.filter(language=data['language'], page__isnull=True, parent__isnull=True ).order_by("-id")
if request.user.is_authenticated():
subscriber = Subscriber.objects.get(user=request.user.id)
#Fill form with user infos
data['subscriber_form'] = SubscriberForm( auto_id="id_subscriber_%s", initial={'email': subscriber.user.email,
'username': request.user.username,
'first_name': request.user.first_name,
'last_name': request.user.last_name,
'affiliation': subscriber.affiliation,
'status': subscriber.status,
'action':'ADD'
} )
else:
data['subscriber_form'] = SubscriberForm(auto_id="id_subscriber_%s", initial={'action':'ADD'})
return render_to_response("%s/contacts.html" % data['template'], RequestContext(request, data ) )
def page( request, page_slug ):
data = shared_context( request, tags=[ page_slug ] )
data['page'] = get_object_or_404(Page, slug=page_slug, language=data['language'] )
data['pins'] = Pin.objects.filter( page__slug=page_slug, language=data['language'], parent=None)
return render_to_response("%s/page.html" % 'enquete', RequestContext(request, data ) )
def enquete( request, enquete_id ):
data = shared_context( request, tags=[ "enquetes", "focus-on-enquete", 'visualizations' ] )
data['enquete'] = get_object_or_404( Enquete, id=enquete_id )
data['disabled'] = [ t.slug for t in data['enquete'].tags.filter( type=Tag.DISABLE_VISUALIZATION ) ]
try:
data['enquiry'] = Enquiry.objects.get( enquete=enquete_id, language=data['language'] )
except Enquiry.DoesNotExist,e:
pass
# data['enquiry'] = None
return render_to_response('enquete/enquete.html', RequestContext(request, data ) )
"""
if( settings.REANALYSEURL == 'http://bequali.fr' ) :
messages.add_message(request, messages.ERROR, 'Cette enquête n\'est pas encore consultable')
viewurl = reverse('outside.views.enquetes')
return redirect(viewurl)
else :
return render_to_response('enquete/enquete.html', RequestContext(request, data ) )
"""
def enquete_metadata( request, enquete_id ):
data = shared_context( request, tags=[ "enquetes","metadata" ] )
enquete = get_object_or_404( Enquete, id=enquete_id )
enquete.meta = enquete.meta_items()
data['enquete'] = enquete
try:
data['enquiry'] = Enquiry.objects.get( enquete=enquete_id, language=data['language'] )
except Enquiry.DoesNotExist,e:
pass
return render_to_response('enquete/metadata.html', RequestContext(request, data ) )
@login_required( login_url=LOGIN_URL )
#@permission_required('reanalyseapp.can_browse')
def enquete_download( request, enquete_id ):
#Check if the user has access to the files
if( not request.user.has_perm('reanalyseapp.can_browse') ):
try:
AccessRequest.objects.get(user=request.user.id, enquete=enquete_id, is_activated=True)
except AccessRequest.DoesNotExist:
request.flash['notice'] = _("You don't have access to this document, please ask for access <a class='blue-link' href='%s'>here</a> to ask for permission.") % ( reverse('outside.views.access_request', kwargs={'enquete_id':enquete_id}) )
viewurl = reverse('outside.views.enquete', kwargs={'enquete_id':enquete_id})
return redirect(viewurl)
else:
pass
else:
pass
enquete = get_object_or_404( Enquete, id=enquete_id )
zippath = settings.REANALYSESAMPLE_STUDIES_FILES+'/downloads/enquete-'+str(enquete.id)+'.zip'
"""zf = zipfile.ZipFile( zippath, mode='w' )
#return HttpResponse(settings.REANALYSESAMPLE_STUDIES_FILES+'/downloads/enquete-'+str(enquete.id)+'.zip')
zf = zipdir(settings.REANALYSESAMPLE_STUDIES_FILES+'/downloads/enquete-'+str(enquete.id), zf)"""
"""
zippath = os.path.join( "/tmp/", "enquete_%s.zip" % enquete.id )
zf = zipfile.ZipFile( zippath, mode='w' )
for t in Texte.objects.filter( enquete=enquete ):
if('é'.decode('utf-8') in t.locationpath):
t.locationpath= t.locationpath.replace('é'.decode('utf-8'), 'e')
if os.path.isfile(t.locationpath.decode('utf-8')):
if( t.locationpath.find('_ol.') or t.locationpath.find('_dl.') ):
zf.write( t.locationpath, compress_type=zipfile.ZIP_DEFLATED,
arcname= t.locationpath.split('/', 7)[7])
"""
response = HttpResponse( open( zippath , 'r' ) , content_type="application/gzip" )
response['Content-Description'] = "File Transfer";
response['Content-Disposition'] = "attachment; filename=enquete-%s.zip" % ( enquete.id )
return response
def zipdir(path, zip):
for root, dirs, files in os.walk(path):
for file in files:
zip.write(os.path.join(root, file), compress_type=zipfile.ZIP_DEFLATED, arcname= os.path.join(root, file).split('/', 6)[6])
return zip
@login_required( login_url=LOGIN_URL )
#@permission_required('reanalyseapp.can_browse')
def document( request, document_id ):
data = shared_context( request, tags=[ "enquetes","metadata" ] )
data['document'] = document = get_object_or_404( Texte, id=document_id )
locationpath = str(document.locationpath)
data['document'].spec_id = locationpath.split('/')[-1].replace('_', ' _ ')
data['enquete'] = enquete = document.enquete
data['mimetype'] = guess_type( document.locationpath )[0]
data['document'].locationpath = data['document'].locationpath.split('/', 5)[5]
###### ANY DOCUMENT
e = Enquete.objects.get(id=document.enquete.id)
texte = Texte.objects.get(id=document_id)
ctx = {'enquete':texte.enquete,'texte':texte,'bodyid':'e','pageid':'documents'}
######################################### TEI
if texte.doctype=='TEI':
###### RELATED VIZ
# we can take all related viz if we want
#ctx.update({'visualizations':getRelatedViz(textes=[texte])})
# now testing with only the textstreamtimeline
try:
streamtimelineviz = Visualization.objects.get(textes=texte,viztype='TexteStreamTimeline')
except:
try:
streamtimelineviz = Visualization.objects.filter(textes=texte,viztype='TexteStreamTimeline')[0]
except:
streamtimelineviz = None
ctx.update({'visualization':streamtimelineviz})
maxTextPart = texte.sentence_set.aggregate(Max('i')).values()[0]
if request.GET.get('highlight'):
ctx.update({'highlight':request.GET.get('highlight')})
if request.GET.get('around'):
around = int(request.GET.get('around'))
minPart = max(0,around-2)
maxPart = min(maxTextPart,around+2)
else:
minPart = request.GET.get('from',0)
maxPart = request.GET.get('to',maxTextPart)
ctx.update({'minpart':minPart,'maxpart':maxPart,'totalmaxparts':maxTextPart})
### CODES_PARAVERBAL DICT FOR LEGEND (see globalvars)
newPARVBCODES={}
newPARVBCODES['Transcription'] = ['comment']
newPARVBCODES['Verbatim'] = []
newPARVBCODES={}
newPARVBCODES['Transcription'] = []
newPARVBCODES['Verbatim'] = []
#return HttpResponse(f, 'text')
for code,label,css in PARVBCODES['Verbatim'] :
import commands
a = commands.getoutput('grep -l %s %s' % (code, texte.locationpath) )
if(a != ""):
newPARVBCODES['Verbatim'] += [[code, label, css]]
for code,label,css in PARVBCODES['Transcription'] :
a = commands.getoutput('grep -l %s %s' % (code, texte.locationpath) )
if(a != ""):
newPARVBCODES['Transcription'] += [[code, label, css]]
ctx.update({'paraverbal':newPARVBCODES})
#ctx.update({'paraverbal':PARVBCODES})
### CODES_TREETAGGER DICT FOR display
ctx.update({'codes_treetagger':CODES_TREETAGGER})
### COLORS FOR SPEAKERS
speakersColors = getRandomSpeakersColorsDict(e,texte)
ctx.update({'speakersColors':speakersColors})
### SPEAKERS
inv = texte.speaker_set.filter(ddi_type="INV")
spk = texte.speaker_set.filter(ddi_type="SPK")
pro = texte.speaker_set.filter(ddi_type="PRO")
ctx.update({'speakers':{'inv':inv,'spk':spk,'pro':pro}})
#return HttpResponse(document.locationpath, 'text')
if( not request.user.has_perm('reanalyseapp.can_browse') ):
#Check if the user has access to the files
try:
req = AccessRequest.objects.get(user=request.user.id, enquete=document.enquete.id, is_activated=True)
except AccessRequest.DoesNotExist:
viewurl = reverse('outside.views.enquete', kwargs={'enquete_id':document.enquete.id})
request.flash['notice'] = _("You don't have access to this document, please ask for access <a class='blue-link' href='%s'>here</a> to ask for permission.") % ( reverse('outside.views.access_request', kwargs={'enquete_id':document.enquete.id}) )
return redirect(viewurl)
else:
pass
else:
pass
return render_to_response('enquete/document.html',ctx, RequestContext(request, data ) )
"""
from lxml import etree
if(document.doctype == "TEI"):
try:
xml_input = etree.parse(document.locationpath)
xslt_root = etree.parse("/var/opt/reanalyse/static/xsl/tei.xsl")
transform = etree.XSLT(xslt_root)
#return HttpResponse( str(transform(xml_input)) , mimetype='texte' )
except Exception, e:
return HttpResponse( str(e) , mimetype='application/xml' )
else:
return render_to_response('enquete/document.html', {'xslt_render':transform(xml_input)}, RequestContext(request, data ) )
return render_to_response('enquete/document.html', RequestContext(request, data ) )"""
@login_required( login_url=LOGIN_URL )
#@permission_required('reanalyseapp.can_browse')
def document_download( request, document_id ):
data = shared_context( request )
document = get_object_or_404( Texte, id=document_id )
if( not request.user.has_perm('reanalyseapp.can_browse') ):
#Check if the user has access to the files
try:
AccessRequest.objects.get(user=request.user.id, enquete=document.enquete.id, is_activated=True)
except AccessRequest.DoesNotExist:
request.flash['notice'] = _("You don't have access to this document, please ask for access <a class='blue-link' href='%s'>here</a> to ask for permission.") % ( reverse('outside.views.access_request', kwargs={'enquete_id':document.enquete.id}) )
viewurl = reverse('outside.views.enquete', kwargs={'enquete_id':document.enquete.id})
return redirect(viewurl)
else:
pass
else:
pass
mimetype = guess_type( document.locationpath )[0]
try:
extension = guess_extension( mimetype )
content_type = mimetype
except AttributeError, e:
filetitle, extension = os.path.splitext( document.locationpath )
content_type = "application/octet-stream"
response = HttpResponse( open( document.locationpath , 'r' ) , content_type=content_type )
response['Content-Description'] = "File Transfer";
response['Content-Disposition'] = "attachment; filename=%s-%s-%s%s" % ( document.enquete.id, document.id, document.name, extension )
return response
@login_required( login_url=LOGIN_URL )
#@permission_required('reanalyseapp.can_browse')
def document_embed( request, document_id ):
data = shared_context( request )
document = get_object_or_404( Texte, id=document_id )
mimetype = guess_type( document.locationpath )[0]
if( not request.user.has_perm('reanalyseapp.can_browse') ):
#Check if the user has access to the files
try:
AccessRequest.objects.get(user=request.user.id, enquete=document.enquete.id, is_activated=True)
except AccessRequest.DoesNotExist:
request.flash['notice'] = _("You don't have access to this document, please ask for access <a class='blue-link' href='%s'>here</a> to ask for permission.") % ( reverse('outside.views.access_request', kwargs={'enquete_id':document.enquete.id}) )
viewurl = reverse('outside.views.enquete', kwargs={'enquete_id':document.enquete.id})
return redirect(viewurl)
else:
pass
else:
pass
try:
extension = guess_extension( mimetype )
content_type = mimetype
except AttributeError, e:
filetitle, extension = os.path.splitext( document.locationpath )
content_type = "application/octet-stream"
return HttpResponse( open( document.locationpath , 'r' ) , mimetype=content_type )
from django.utils import simplejson
def enquiry( request, enquete_id ):
data = shared_context( request, tags=[ "enquetes","enquiry" ] )
try:
data['enquiry'] = Enquiry.objects.get( enquete__id=enquete_id, language=data['language'])
except Enquiry.DoesNotExist:
request.flash['notice'] = _("There is no research on this research")
return redirect(reverse('outside.views.enquetes'))
else:
data['enquete'] = data['enquiry'].enquete
data['sections'] = data['enquiry'].pins.order_by(*["sort","-id"])
data['j_sections'] = serializers.serialize("json", data['enquiry'].pins.order_by(*["sort","-id"]))
return render_to_response('enquete/enquiry.html', RequestContext(request, data ) )
def enquiries( request ):
data = shared_context( request, tags=[ "enquiries" ] )
try:
data['page'] = Page.objects.get( slug="enquiries", language=data['language'])
except Page.DoesNotExist:
p_en = Page( title="studies on studies", language='EN', slug="enquiries")
p_en.save()
p_fr = Page( title="enquêtes sur les enquêtes", language='FR', slug="enquiries")
p_fr.save()
data['page'] = p_fr if data['language'] == 'FR' else p_en
data['enquiries'] = Enquiry.objects.filter( language=data['language'] )
return render_to_response('enquete/enquiries.html', RequestContext(request, data ) )
def enquetes( request ):
data = shared_context( request, tags=[ "enquetes" ] )
data['enquetes'] = Enquete.objects.all()
data['page'] = get_object_or_404(Page, slug="enquetes", language=data['language'] )
data['pins'] = Pin.objects.filter( page__slug="enquetes", language=data['language'], parent=None)
return render_to_response("enquete/enquetes.html", RequestContext(request, data ) )
def download_view( request, pin_slug ):
data = shared_context( request )
pin = get_object_or_404(Pin, slug=pin_slug, language=data['language'] )
data['root'] = settings.MEDIA_ROOT
try:
extension = guess_extension( pin.mimetype )
content_type = pin.mimetype
except AttributeError, e:
filetitle, extension = os.path.splitext( pin.local.url )
content_type = "application/octet-stream"
response = HttpResponse( open( os.path.join( settings.MEDIA_ROOT, urllib.unquote( pin.local.url ) ), 'r' ) , content_type=content_type )
response['Content-Description'] = "File Transfer";
response['Content-Disposition'] = "attachment; filename=%s%s" % ( pin_slug, extension )
return response
def legal( request ):
data = shared_context( request, tags=[ "legal" ] )
try:
data['page'] = Page.objects.get( slug="legal-notice", language=data['language'])
except Page.DoesNotExist:
p_en = Page( title="Legal", language='EN', slug="legal-notice")
p_en.save()
p_fr = Page( title="Mentions légales", language='FR', slug="legal-notice")
p_fr.save()
data['page'] = p_fr if data['language'] == 'FR' else p_en
# load all pins without page
data['pins'] = Pin.objects.filter(language=data['language'], page__slug="legal-notice" ).order_by("-id")
# get news
# data['news'] = Pin.objects.filter(language=data['language'], page__isnull=True, status=Pin.published ).order_by("-id")
return render_to_response( "%s/legal.html" % data['template'], RequestContext(request, data ) )
def login_view( request ):
if request.user.is_authenticated():
return redirect( reverse('outside_index') )
form = LoginForm( request.POST, auto_id="id_login_%s" )
data = shared_context( request, tags=[ "index" ] )
data['login_form'] = form
data['signup_url'] = reverse('outside_signup_generic')
#return HttpResponse(data['signup_url'])
return render_to_response('outside/login.html', RequestContext(request, data ) )
def access_request(request, enquete_id=None):
data = shared_context( request, tags=[ "enquetes", "access_request" ] )
if enquete_id is not None:
data['enquete'] = get_object_or_404( Enquete, id=enquete_id )
data['enquetes'] = Enquete.objects.all()
#If connected ...
if not request.user.is_authenticated():
return redirect(LOGIN_URL+'?next='+reverse('outside.views.access_request', kwargs={'enquete_id':enquete_id}))
else:
#Verify if he has already requested the enquete
try:
access = AccessRequest.objects.get(user=request.user.id, enquete=enquete_id, )
except AccessRequest.DoesNotExist:
if( request.user.has_perm('reanalyseapp.can_browse') ):
request.flash['notice'] = _("You don't need to ask for an acces because you are an admin, but you can test")
try:
subscriber = Subscriber.objects.get(user=request.user.id)
except Subscriber.DoesNotExist:
pass
#redirect to creation profileS
else:
#Fill form with user infos
data['access_request_form'] = AccessRequestForm( auto_id="id_access_request_%s", initial={'email': subscriber.user.email,
'username': request.user.username,
'first_name': request.user.first_name,
'last_name': request.user.last_name,
'affiliation': subscriber.affiliation,
'status': subscriber.status,
'enquete': enquete_id
} )
data['access_request_form']['enquete'].editable = False
else:
viewurl = reverse('outside.views.enquete', kwargs={'enquete_id':enquete_id})
if(access.is_activated == True):
error_str = _('You already have access to this research.')
else:
error_str = _('You already asked for this research, you will be notified when your access is granted.')
request.flash['notice'] = error_str
return redirect(viewurl)
return render_to_response("enquete/access_form.html", RequestContext(request, data ) )
def signup( request, enquete_id=None ):
if enquete_id is not None:
data = shared_context( request, tags=[ "enquetes", "signup" ] )
data['enquete'] = get_object_or_404( Enquete, id=enquete_id )
else:
data = shared_context( request, tags=[ "signup" ] )
data['signup_form'] = SignupForm( auto_id="id_signup_%s" )
# load all pins without page (aka news)
# data['pins'] = Pin.objects.filter(language=data['language'], page__isnull=True ).order_by("-id")
return render_to_response("%s/signup.html" % data['template'], RequestContext(request, data ) )
def confirm( request, token, user_id, action ):
import string
import random
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
subscriber = get_object_or_404( Subscriber, user__id=user_id )
confirmation_code = get_object_or_404( Confirmation_code, code=token, action=action, activated = True )
data = shared_context( request, tags=[ "confirm" ] )
if(action == 'signup'):
subject, from_email, to = _("Signup request"), _("Bequali Team")+"<equipe@bequali.fr>", settings.EMAIL_ADMINS
path = '%s%s' % (settings.REANALYSEURL, reverse('admin:auth_user_change', args=[subscriber.user.id]) )
html_content = render_to_string('email/signup.html',
{'action':'admin_notification',
'prenom': subscriber.first_name,
'nom': subscriber.last_name,
'email': subscriber.user.email,
'affiliation': subscriber.affiliation,
'status': unicode(dict(Subscriber.STATUS_CHOICES)[subscriber.status], 'utf-8'),
'description': subscriber.description,
'admin_url' : path})
text_content = html_content.replace('<br/>', '\n')
msg = EmailMultiAlternatives(subject, text_content, from_email, to)
msg.attach_alternative(html_content, 'text/html')
msg.content_subtype = 'html'
msg.send()
confirmation_code.activated = False
confirmation_code.save()
user = subscriber.user
user.is_active = True
user.save()
elif (action == "reinitPass") :
subject, from_email, to = _("beQuali password reinitialization"), _("Bequali Team")+"<equipe@bequali.fr>", subscriber.user.email
login_view = reverse('outside.views.login_view')
change_password_view = reverse('outside.views.change_password')
login_url = '%s%s' % (settings.REANALYSEURL, login_view )
change_password_url = '%s%s' % (settings.REANALYSEURL, change_password_view )
rand_pass = id_generator()
user = subscriber.user
user.set_password(rand_pass)
user.save()
html_content = render_to_string('email/reinitialize_password.html',
{'action':'reinitialize_notification',
'username':subscriber.user.username,
'prenom': subscriber.first_name,
'password':rand_pass,
'nom': subscriber.last_name,
'login_url' : login_url,
'change_password_url': change_password_url,})
text_content = html_content.replace('<br/>', '\n')
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, 'text/html')
msg.content_subtype = 'html'
msg.send()
confirmation_code.activated = False
confirmation_code.save()
data['action'] = action
data['email'] = subscriber.user.email
return render_to_response("%s/confirm.html" % data['template'], RequestContext(request, data ) )
def logout_view( request ):
logout( request )
return redirect( 'outside_index' )
def studies( request ):
data = shared_context( request, tags=[ "studies" ] )
#data['studies'] = Enquete.objects.all().order_by('-id')
#data['page'] = Bean.objects.get( slug='studies', type='PAGE', language=data['language'] )
return render_to_response('outside/studies.html', RequestContext(request, data ) )
def shared_context( request, tags=[], previous_context={} ):
# startup
d = previous_context
d['tags'] = tags
d['site'] = settings.OUTSIDE_SITE_NAME
d['sites_available'] = OUTSIDE_SITES_AVAILABLE
d['stylesheet'] = settings.OUTSIDE_THEME
d['template'] = settings.OUTSIDE_TEMPLATE_DIR
d['REANALYSEURL'] = settings.REANALYSEURL
d['next'] = request.path
# if it is not auth, pull loginform
if request.user.is_authenticated():
load_edit_mode( request, d )
else:
d['login_form'] = LoginForm()
# load language and share it inside context
load_language( request, d )
d['pages'] = [ p for p in Page.objects.exclude(slug="legal-notice").filter( language=d['language'], activated=True ).order_by(*['sort','id']) ] # menu up. type PAGE should be translated via django trans tamplate tags.
return d
def load_edit_mode( request, d ):
d['has_edit_mode'] = request.user.groups.filter(name="CONTENT EDITOR").count() != 0
# check permission
if not d['has_edit_mode']:
d['edit_mode'] = False;
return
# check enable action
if request.REQUEST.get('enable-edit-mode', None) is not None:
request.session['edit_mode'] = True
elif request.REQUEST.get('disable-edit-mode', None) is not None:
request.session['edit_mode'] = False
d['edit_mode'] = request.session['edit_mode'] if request.session.get('edit_mode', False ) else False
# add editable field
if d['edit_mode']:
d['add_page_form'] = AddPageForm( auto_id="id_add_page_%s" )
d['add_pin_form'] = AddPinForm( auto_id="id_add_pin_%s" )
d['edit_pin_form'] = EditPinForm( auto_id="id_edit_pin_%s" )
d['add_enquiry_form'] = AddEnquiryForm( auto_id="id_add_enquiry_%s" )
#d['pageaddform'] = PageAddForm(auto_id="id_page_%s")
#
# Load language features into view.
# d is context dictionary
#
def load_language( request, d ):
from django.utils.translation import activate
language = request.GET.get('lang', None)
# default: FR, hack
d['language'] = language = 'FR'
activate(language)
return language
if language is None:
# load from somewhere
language = request.LANGUAGE_CODE
elif language in ['en','fr'] :
# @todo: a better language match in settings.LANGUAGES
if hasattr(request, 'session'):
request.session['django_language'] = language
else:
#response.set_cookie(settings.LANGUAGE_COOKIE_NAME, language)
activate(language)
else:
d['warnings'] = _('language not found')
language = 'fr'
d['available_languages'] = settings.LANGUAGES
d['language'] = language.upper()
return language
@login_required( login_url=LOGIN_URL )
def change_password(request):
data = shared_context( request, tags=[ "form", "change_password" ] )
data['change_password_form'] = ChangePasswordForm( auto_id="id_change_password_%s", initial={'username': request.user.username} )
data['change_password_form']['username'].editable = False;
return render_to_response("hub/change_password.html", RequestContext(request, data ) )
@login_required( login_url=LOGIN_URL )
def edit_profile(request):
data = shared_context( request, tags=[ "form", "edit_profile" ] )
form = SubscriberForm( auto_id="id_subscriber_%s")
subscriber = Subscriber.objects.get(user=request.user.id)
#Fill form with user infos
data['edit_profile_form'] = SubscriberForm( auto_id="id_edit_profile_%s", initial={'email': subscriber.user.email,
'first_name': subscriber.first_name,
'last_name':subscriber.last_name,
'email':subscriber.email,
'affiliation':subscriber.affiliation,
'status':subscriber.status,
'description':subscriber.description,
'action':'EDIT',
} )
return render_to_response("hub/edit_profile.html", RequestContext(request, data ) )
@login_required( login_url=LOGIN_URL )
def create_profile(request):
#return HttpResponse( request.user.id, mimetype='texte' )
try:
subscriber = Subscriber.objects.get(user=request.user.id)
except Subscriber.DoesNotExist:
data = shared_context( request, tags=[ "form", "create_profile" ] )
form = SubscriberForm( auto_id="id_subscriber_%s")
#Fill form with user infos
data['create_profile_form'] = SubscriberForm( auto_id="id_subscriber_%s", initial={'action':'ADD'})
else:
request.flash['notice'] = _("You already have a profile")
return redirect( request.REQUEST.get('next', 'outside_index') )
return render_to_response("hub/create_profile.html", RequestContext(request, data ) )
def reinitialize_password(request):
data = shared_context( request, tags=[ "form", "reinitialize_passwd" ] )
data['reinitialize_password_form'] = ReinitializePasswordForm(auto_id="id_reinitialize_password_%s")
return render_to_response("hub/reinitialize_passwd.html", RequestContext(request, data ) )
def download_page( request, enquete_id ):
data = shared_context( request, tags=[ "download" ] )
if( not request.user.has_perm('reanalyseapp.can_browse') ):
try:
AccessRequest.objects.get(user=request.user.id, enquete=enquete_id, is_activated=True)
except AccessRequest.DoesNotExist:
request.flash['notice'] = _("You don't have access to this document, please ask for access <a class='blue-link' href='%s'>here</a> to ask for permission.") % ( reverse('outside.views.access_request', kwargs={'enquete_id':enquete_id}) )
viewurl = reverse('outside.views.enquete', kwargs={'enquete_id':enquete_id})
return redirect(viewurl)
else:
pass
if enquete_id is not None:
data['enquete'] = get_object_or_404( Enquete, id=enquete_id )
data['dl_link'] = reverse('outside_enquete_download', args=[enquete_id])
return render_to_response("hub/download.html", RequestContext(request, data ) )
def evGetJson(request,eid,vid):
v = Visualization.objects.get(enquete__id=eid,id=vid)
return HttpResponse(v.json, mimetype="application/json")
###########################################################################
@login_required
def evSaveHtml(request,eid,vid):
v = Visualization.objects.get(enquete__id=eid,id=vid)
thehtml = request.POST
v.contenthtml = thehtml
v.save()
return HttpResponse("done", mimetype="application/json")
from django.views.decorators.cache import cache_page
def dGetHtmlContent(request,eid,did):
texte = Texte.objects.get(id=did)
sStart = request.GET.get('from',0)
sEnd = request.GET.get('to',0)
key = "timeparts_%s_%s" % ( eid, did )
timeparts = cache.get(key)
if (timeparts == None) :
timeparts = getTextContent(texte,sStart,sEnd)
cache.set(key, timeparts, 1000)
if request.GET.get('highlight'):
ctx.update({'highlight':request.GET.get('highlight')})
ctx={'timeparts':timeparts}
return render_to_response('bq_render_d.html', ctx, context_instance=RequestContext(request))
###################################################################################################################################
@login_required
def enquete_admin(request):
### unique foldername if some upload is done
sessionFolderName = "up_"+str(time())
ctx = {'bodyid':'admin','foldname':sessionFolderName}
"""
### todo: move that somewhere else to do it just when website/database is reset
try:
init_users()
except:
donothing=1
### check if solr launched, relaunch it if needed
if checkSolrProcess():
ctx.update({'solrstatus':'was off. but refreshing this page has relaunched it. wait 5,7s and refresh again to be sure'})
else:
ctx.update({'solrstatus':'is running !'})
ctx.update({'staffemail':settings.STAFF_EMAIL})
### log file
#logger.info("Looking at ADMIN page")
wantedCount = int(request.GET.get('log','50'))
log_django = getTailOfFile(settings.REANALYSELOGDJANGO,wantedCount)
#log_solr = getTailOfFile(settings.REANALYSELOGSOLR,wantedCount)
ctx.update({'log_django':log_django})
### solr path
ctx.update({'BASE_URL':settings.BASE_URL,'solr_url':settings.SOLR_URL})
"""
### all enquetes
ctx.update({'enquetes':Enquete.objects.all()})
"""
### default page is 'users'
ctx.update({'page':request.GET.get('page','users')})
### static pages : (they are also loaded one at at time on the home page) load them all now
for name in ['project','method','access']:
for lan in ['en','fr']:
nothing = getStaticHtmlContent(name,lan)
### users
users={}
users['header']=['username','name','email','status','group','full study access','joined','last login']
users['rows']=[]
for u in User.objects.order_by('id'):
uTab=[]
uTab.append('<a href="'+settings.BASE_URL+'admin/auth/user/'+str(u.id)+'">'+u.username+'</a>')
uTab.append(u.last_name +" "+ u.first_name)
uTab.append(u.email)
# STATUS (activated?)
sstr="need to be activated..."
if u.is_active:
sstr="activated"
uTab.append(sstr)
# GROUPS
gstr=""
if u.is_staff:
gstr="STAFF "
for g in u.groups.all():
gstr+=g.name+" "
uTab.append(gstr)
# PERMISSIONS
pstr=""
for e in Enquete.objects.order_by('id'):
if u.has_perm('reanalyseapp.can_explore_'+str(e.id)):
pstr+="["+str(e.id)+"] "+e.name+"<br/>"
uTab.append(pstr)
# DATES JOINED LASTLOGIN
uTab.append(u.date_joined.strftime("%a %x"))
uTab.append(u.last_login.strftime("%a %d at %Hh%M"))
users['rows'].append(uTab)
ctx.update({'users':users})
"""
### upload of available studies
serverAvailableStudies = []
for foldername in os.listdir(settings.REANALYSESAMPLE_STUDIES_FILES):
#logger.info("Listing existing study folder: "+foldername)
serverAvailableStudies.append({'foldername':foldername})
ctx.update({'serverAvailableStudies':serverAvailableStudies})
data = shared_context( request, tags=[ "enquete_admin" ] )
ctx.update({'data':data})
return render_to_response('enquete/enquete_admin.html', ctx , context_instance=RequestContext(request, data))
| lgpl-3.0 |
tealover/nova | nova/tests/unit/fake_server_actions.py | 97 | 4562 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova import db
FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
FAKE_REQUEST_ID1 = 'req-3293a3f1-b44c-4609-b8d2-d81b105636b8'
FAKE_REQUEST_ID2 = 'req-25517360-b757-47d3-be45-0e8d2a01b36a'
FAKE_ACTION_ID1 = 123
FAKE_ACTION_ID2 = 456
FAKE_ACTIONS = {
FAKE_UUID: {
FAKE_REQUEST_ID1: {'id': FAKE_ACTION_ID1,
'action': 'reboot',
'instance_uuid': FAKE_UUID,
'request_id': FAKE_REQUEST_ID1,
'project_id': '147',
'user_id': '789',
'start_time': datetime.datetime(
2012, 12, 5, 0, 0, 0, 0),
'finish_time': None,
'message': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
},
FAKE_REQUEST_ID2: {'id': FAKE_ACTION_ID2,
'action': 'resize',
'instance_uuid': FAKE_UUID,
'request_id': FAKE_REQUEST_ID2,
'user_id': '789',
'project_id': '842',
'start_time': datetime.datetime(
2012, 12, 5, 1, 0, 0, 0),
'finish_time': None,
'message': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
}
}
FAKE_EVENTS = {
FAKE_ACTION_ID1: [{'id': 1,
'action_id': FAKE_ACTION_ID1,
'event': 'schedule',
'start_time': datetime.datetime(
2012, 12, 5, 1, 0, 2, 0),
'finish_time': datetime.datetime(
2012, 12, 5, 1, 2, 0, 0),
'result': 'Success',
'traceback': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
},
{'id': 2,
'action_id': FAKE_ACTION_ID1,
'event': 'compute_create',
'start_time': datetime.datetime(
2012, 12, 5, 1, 3, 0, 0),
'finish_time': datetime.datetime(
2012, 12, 5, 1, 4, 0, 0),
'result': 'Success',
'traceback': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
],
FAKE_ACTION_ID2: [{'id': 3,
'action_id': FAKE_ACTION_ID2,
'event': 'schedule',
'start_time': datetime.datetime(
2012, 12, 5, 3, 0, 0, 0),
'finish_time': datetime.datetime(
2012, 12, 5, 3, 2, 0, 0),
'result': 'Error',
'traceback': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
]
}
def fake_action_event_start(*args):
return FAKE_EVENTS[FAKE_ACTION_ID1][0]
def fake_action_event_finish(*args):
return FAKE_EVENTS[FAKE_ACTION_ID1][0]
def stub_out_action_events(stubs):
stubs.Set(db, 'action_event_start', fake_action_event_start)
stubs.Set(db, 'action_event_finish', fake_action_event_finish)
| apache-2.0 |
johankaito/fufuka | graph-tool/src/graph_tool/topology/__init__.py | 2 | 70368 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# graph_tool -- a general graph manipulation python module
#
# Copyright (C) 2006-2015 Tiago de Paula Peixoto <tiago@skewed.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
``graph_tool.topology`` - Assessing graph topology
--------------------------------------------------
Summary
+++++++
.. autosummary::
:nosignatures:
shortest_distance
shortest_path
pseudo_diameter
similarity
isomorphism
subgraph_isomorphism
mark_subgraph
max_cardinality_matching
max_independent_vertex_set
min_spanning_tree
random_spanning_tree
dominator_tree
topological_sort
transitive_closure
tsp_tour
sequential_vertex_coloring
label_components
label_biconnected_components
label_largest_component
label_out_component
kcore_decomposition
is_bipartite
is_DAG
is_planar
make_maximal_planar
edge_reciprocity
Contents
++++++++
"""
from __future__ import division, absolute_import, print_function
from .. dl_import import dl_import
dl_import("from . import libgraph_tool_topology")
from .. import _prop, Vector_int32_t, _check_prop_writable, \
_check_prop_scalar, _check_prop_vector, Graph, PropertyMap, GraphView,\
libcore, _get_rng, _degree, perfect_prop_hash
from .. stats import label_self_loops
import random, sys, numpy, collections
__all__ = ["isomorphism", "subgraph_isomorphism", "mark_subgraph",
"max_cardinality_matching", "max_independent_vertex_set",
"min_spanning_tree", "random_spanning_tree", "dominator_tree",
"topological_sort", "transitive_closure", "tsp_tour",
"sequential_vertex_coloring", "label_components",
"label_largest_component", "label_biconnected_components",
"label_out_component", "kcore_decomposition", "shortest_distance",
"shortest_path", "pseudo_diameter", "is_bipartite", "is_DAG",
"is_planar", "make_maximal_planar", "similarity", "edge_reciprocity"]
def similarity(g1, g2, label1=None, label2=None, norm=True):
r"""Return the adjacency similarity between the two graphs.
Parameters
----------
g1 : :class:`~graph_tool.Graph`
First graph to be compared.
g2 : :class:`~graph_tool.Graph`
Second graph to be compared.
label1 : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Vertex labels for the first graph to be used in comparison. If not
supplied, the vertex indexes are used.
label2 : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Vertex labels for the second graph to be used in comparison. If not
supplied, the vertex indexes are used.
norm : bool (optional, default: ``True``)
If ``True``, the returned value is normalized by the total number of
edges.
Returns
-------
similarity : float
Adjacency similarity value.
Notes
-----
The adjacency similarity is the sum of equal entries in the adjacency
matrix, given a vertex ordering determined by the vertex labels. In other
words it counts the number of edges which have the same source and target
labels in both graphs.
The algorithm runs with complexity :math:`O(E_1 + V_1 + E_2 + V_2)`.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(100, lambda: (3,3))
>>> u = g.copy()
>>> gt.similarity(u, g)
1.0
>>> gt.random_rewire(u)
24
>>> gt.similarity(u, g)
0.04666666666666667
"""
if label1 is None:
label1 = g1.vertex_index
if label2 is None:
label2 = g2.vertex_index
if label1.value_type() != label2.value_type():
try:
label2 = label2.copy(label1.value_type())
except ValueError:
label1 = label1.copy(label2.value_type())
if label1.is_writable() or label2.is_writable():
s = libgraph_tool_topology.\
similarity(g1._Graph__graph, g2._Graph__graph,
_prop("v", g1, label1), _prop("v", g2, label2))
else:
s = libgraph_tool_topology.\
similarity_fast(g1._Graph__graph, g2._Graph__graph,
_prop("v", g1, label1), _prop("v", g2, label2))
if not g1.is_directed() or not g2.is_directed():
s /= 2
if norm:
s /= float(max(g1.num_edges(), g2.num_edges()))
return s
def isomorphism(g1, g2, vertex_inv1=None, vertex_inv2=None, isomap=False):
r"""Check whether two graphs are isomorphic.
Parameters
----------
g1 : :class:`~graph_tool.Graph`
First graph.
g2 : :class:`~graph_tool.Graph`
Second graph.
vertex_inv1 : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
Vertex invariant of the first graph. Only vertices with with the same
invariants are considered in the isomorphism.
vertex_inv2 : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
Vertex invariant of the second graph. Only vertices with with the same
invariants are considered in the isomorphism.
isomap : ``bool`` (optional, default: ``False``)
If ``True``, a vertex :class:`~graph_tool.PropertyMap` with the
isomorphism mapping is returned as well.
Returns
-------
is_isomorphism : ``bool``
``True`` if both graphs are isomorphic, otherwise ``False``.
isomap : :class:`~graph_tool.PropertyMap`
Isomorphism mapping corresponding to a property map belonging to the
first graph which maps its vertices to their corresponding vertices of
the second graph.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(100, lambda: (3,3))
>>> g2 = gt.Graph(g)
>>> gt.isomorphism(g, g2)
True
>>> g.add_edge(g.vertex(0), g.vertex(1))
<...>
>>> gt.isomorphism(g, g2)
False
"""
imap = g1.new_vertex_property("int32_t")
if vertex_inv1 is None:
vertex_inv1 = g1.degree_property_map("total").copy("int64_t")
else:
vertex_inv1 = vertex_inv1.copy("int64_t")
d = g1.degree_property_map("total")
vertex_inv1.fa += (vertex_inv1.fa.max() + 1) * d.a
if vertex_inv2 is None:
vertex_inv2 = g2.degree_property_map("total").copy("int64_t")
else:
vertex_inv2 = vertex_inv2.copy("int64_t")
d = g2.degree_property_map("total")
vertex_inv2.fa += (vertex_inv2.fa.max() + 1) * d.a
inv_max = max(vertex_inv1.fa.max(),vertex_inv2.fa.max()) + 1
l1 = label_self_loops(g1, mark_only=True)
if l1.fa.max() > 0:
g1 = GraphView(g1, efilt=1 - l1.fa)
l2 = label_self_loops(g2, mark_only=True)
if l2.fa.max() > 0:
g2 = GraphView(g2, efilt=1 - l2.fa)
iso = libgraph_tool_topology.\
check_isomorphism(g1._Graph__graph, g2._Graph__graph,
_prop("v", g1, vertex_inv1),
_prop("v", g2, vertex_inv2),
inv_max,
_prop("v", g1, imap))
if isomap:
return iso, imap
else:
return iso
def subgraph_isomorphism(sub, g, max_n=0, vertex_label=None, edge_label=None,
induced=False, subgraph=True):
r"""Obtain all subgraph isomorphisms of `sub` in `g` (or at most `max_n` subgraphs, if `max_n > 0`).
Parameters
----------
sub : :class:`~graph_tool.Graph`
Subgraph for which to be searched.
g : :class:`~graph_tool.Graph`
Graph in which the search is performed.
max_n : int (optional, default: `0`)
Maximum number of matches to find. If `max_n == 0`, all matches are
found.
vertex_label : pair of :class:`~graph_tool.PropertyMap` (optional, default: `None`)
If provided, this should be a pair of :class:`~graph_tool.PropertyMap`
objects, belonging to `sub` and `g` (in this order), which specify vertex labels
which should match, in addition to the topological isomorphism.
edge_label : pair of :class:`~graph_tool.PropertyMap` (optional, default: `None`)
If provided, this should be a pair of :class:`~graph_tool.PropertyMap`
objects, belonging to `sub` and `g` (in this order), which specify edge labels
which should match, in addition to the topological isomorphism.
induced : bool (optional, default: False)
If `True`, only node-induced subgraphs are found.
subgraph : bool (optional, default: True)
If `False`, all non-subgraph isomorphisms between `sub` and `g` are
found.
Returns
-------
vertex_maps : list of :class:`~graph_tool.PropertyMap` objects
List containing vertex property map objects which indicate different
isomorphism mappings. The property maps vertices in `sub` to the
corresponding vertex index in `g`.
Notes
-----
The implementation is based on the VF2 algorithm, introduced by Cordella et al.
[cordella-improved-2001]_ [cordella-subgraph-2004]_. The spatial complexity
is of order :math:`O(V)`, where :math:`V` is the (maximum) number of vertices
of the two graphs. Time complexity is :math:`O(V^2)` in the best case and
:math:`O(V!\times V)` in the worst case.
Examples
--------
>>> from numpy.random import poisson
>>> g = gt.complete_graph(30)
>>> sub = gt.complete_graph(10)
>>> vm = gt.subgraph_isomorphism(sub, g, max_n=100)
>>> print(len(vm))
100
>>> for i in range(len(vm)):
... g.set_vertex_filter(None)
... g.set_edge_filter(None)
... vmask, emask = gt.mark_subgraph(g, sub, vm[i])
... g.set_vertex_filter(vmask)
... g.set_edge_filter(emask)
... assert gt.isomorphism(g, sub)
>>> g.set_vertex_filter(None)
>>> g.set_edge_filter(None)
>>> ewidth = g.copy_property(emask, value_type="double")
>>> ewidth.a += 0.5
>>> ewidth.a *= 2
>>> gt.graph_draw(g, vertex_fill_color=vmask, edge_color=emask,
... edge_pen_width=ewidth, output_size=(200, 200),
... output="subgraph-iso-embed.pdf")
<...>
>>> gt.graph_draw(sub, output_size=(200, 200), output="subgraph-iso.pdf")
<...>
.. testcode::
:hide:
gt.graph_draw(g, vertex_fill_color=vmask, edge_color=emask,
edge_pen_width=ewidth, output_size=(200, 200),
output="subgraph-iso-embed.png")
gt.graph_draw(sub, output_size=(200, 200), output="subgraph-iso.png")
.. image:: subgraph-iso.*
.. image:: subgraph-iso-embed.*
**Left:** Subgraph searched, **Right:** One isomorphic subgraph found in main graph.
References
----------
.. [cordella-improved-2001] L. P. Cordella, P. Foggia, C. Sansone, and M. Vento,
"An improved algorithm for matching large graphs.", 3rd IAPR-TC15 Workshop
on Graph-based Representations in Pattern Recognition, pp. 149-159, Cuen, 2001.
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.5342
.. [cordella-subgraph-2004] L. P. Cordella, P. Foggia, C. Sansone, and M. Vento,
"A (Sub)Graph Isomorphism Algorithm for Matching Large Graphs.",
IEEE Trans. Pattern Anal. Mach. Intell., vol. 26, no. 10, pp. 1367-1372, 2004.
:doi:`10.1109/TPAMI.2004.75`
.. [boost-subgraph-iso] http://www.boost.org/libs/graph/doc/vf2_sub_graph_iso.html
.. [subgraph-isormophism-wikipedia] http://en.wikipedia.org/wiki/Subgraph_isomorphism_problem
"""
if sub.num_vertices() == 0:
raise ValueError("Cannot search for an empty subgraph.")
if vertex_label is None:
vertex_label = (None, None)
elif vertex_label[0].value_type() != vertex_label[1].value_type():
raise ValueError("Both vertex label property maps must be of the same type!")
elif vertex_label[0].value_type() != "int32_t":
vertex_label = perfect_prop_hash(vertex_label, htype="int32_t")
if edge_label is None:
edge_label = (None, None)
elif edge_label[0].value_type() != edge_label[1].value_type():
raise ValueError("Both edge label property maps must be of the same type!")
elif edge_label[0].value_type() != "int32_t":
edge_label = perfect_prop_hash(edge_label, htype="int32_t")
vmaps = []
libgraph_tool_topology.\
subgraph_isomorphism(sub._Graph__graph, g._Graph__graph,
_prop("v", sub, vertex_label[0]),
_prop("v", g, vertex_label[1]),
_prop("e", sub, edge_label[0]),
_prop("e", g, edge_label[1]),
vmaps, max_n, induced, not subgraph)
for i in range(len(vmaps)):
vmaps[i] = PropertyMap(vmaps[i], sub, "v")
return vmaps
def mark_subgraph(g, sub, vmap, vmask=None, emask=None):
r"""
Mark a given subgraph `sub` on the graph `g`.
The mapping must be provided by the `vmap` and `emap` parameters,
which map vertices/edges of `sub` to indexes of the corresponding
vertices/edges in `g`.
This returns a vertex and an edge property map, with value type 'bool',
indicating whether or not a vertex/edge in `g` corresponds to the subgraph
`sub`.
"""
if vmask is None:
vmask = g.new_vertex_property("bool")
if emask is None:
emask = g.new_edge_property("bool")
vmask.a = False
emask.a = False
for v in sub.vertices():
w = g.vertex(vmap[v])
vmask[w] = True
us = set([g.vertex(vmap[x]) for x in v.out_neighbours()])
for ew in w.out_edges():
if ew.target() in us:
emask[ew] = True
return vmask, emask
def min_spanning_tree(g, weights=None, root=None, tree_map=None):
"""
Return the minimum spanning tree of a given graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
weights : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
The edge weights. If provided, the minimum spanning tree will minimize
the edge weights.
root : :class:`~graph_tool.Vertex` (optional, default: `None`)
Root of the minimum spanning tree. If this is provided, Prim's algorithm
is used. Otherwise, Kruskal's algorithm is used.
tree_map : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
If provided, the edge tree map will be written in this property map.
Returns
-------
tree_map : :class:`~graph_tool.PropertyMap`
Edge property map with mark the tree edges: 1 for tree edge, 0
otherwise.
Notes
-----
The algorithm runs with :math:`O(E\log E)` complexity, or :math:`O(E\log V)`
if `root` is specified.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> from numpy.random import random
>>> g, pos = gt.triangulation(random((400, 2)) * 10, type="delaunay")
>>> weight = g.new_edge_property("double")
>>> for e in g.edges():
... weight[e] = linalg.norm(pos[e.target()].a - pos[e.source()].a)
>>> tree = gt.min_spanning_tree(g, weights=weight)
>>> gt.graph_draw(g, pos=pos, output="triang_orig.pdf")
<...>
>>> g.set_edge_filter(tree)
>>> gt.graph_draw(g, pos=pos, output="triang_min_span_tree.pdf")
<...>
.. testcode::
:hide:
gt.graph_draw(g, pos=pos, output="triang_orig.png")
gt.graph_draw(g, pos=pos, output="triang_min_span_tree.png")
.. image:: triang_orig.*
:width: 400px
.. image:: triang_min_span_tree.*
:width: 400px
*Left:* Original graph, *Right:* The minimum spanning tree.
References
----------
.. [kruskal-shortest-1956] J. B. Kruskal. "On the shortest spanning subtree
of a graph and the traveling salesman problem", In Proceedings of the
American Mathematical Society, volume 7, pages 48-50, 1956.
:doi:`10.1090/S0002-9939-1956-0078686-7`
.. [prim-shortest-1957] R. Prim. "Shortest connection networks and some
generalizations", Bell System Technical Journal, 36:1389-1401, 1957.
.. [boost-mst] http://www.boost.org/libs/graph/doc/graph_theory_review.html#sec:minimum-spanning-tree
.. [mst-wiki] http://en.wikipedia.org/wiki/Minimum_spanning_tree
"""
if tree_map is None:
tree_map = g.new_edge_property("bool")
if tree_map.value_type() != "bool":
raise ValueError("edge property 'tree_map' must be of value type bool.")
u = GraphView(g, directed=False)
if root is None:
libgraph_tool_topology.\
get_kruskal_spanning_tree(u._Graph__graph,
_prop("e", g, weights),
_prop("e", g, tree_map))
else:
libgraph_tool_topology.\
get_prim_spanning_tree(u._Graph__graph, int(root),
_prop("e", g, weights),
_prop("e", g, tree_map))
return tree_map
def random_spanning_tree(g, weights=None, root=None, tree_map=None):
"""
Return a random spanning tree of a given graph, which can be directed or
undirected.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
weights : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
The edge weights. If provided, the probability of a particular spanning
tree being selected is the product of its edge weights.
root : :class:`~graph_tool.Vertex` (optional, default: `None`)
Root of the spanning tree. If not provided, it will be selected randomly.
tree_map : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
If provided, the edge tree map will be written in this property map.
Returns
-------
tree_map : :class:`~graph_tool.PropertyMap`
Edge property map with mark the tree edges: 1 for tree edge, 0
otherwise.
Notes
-----
The typical running time for random graphs is :math:`O(N\log N)`.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> from numpy.random import random
>>> g, pos = gt.triangulation(random((400, 2)) * 10, type="delaunay")
>>> weight = g.new_edge_property("double")
>>> for e in g.edges():
... weight[e] = linalg.norm(pos[e.target()].a - pos[e.source()].a)
>>> tree = gt.random_spanning_tree(g, weights=weight)
>>> gt.graph_draw(g, pos=pos, output="rtriang_orig.pdf")
<...>
>>> g.set_edge_filter(tree)
>>> gt.graph_draw(g, pos=pos, output="triang_random_span_tree.pdf")
<...>
.. testcode::
:hide:
gt.graph_draw(g, pos=pos, output="rtriang_orig.png")
gt.graph_draw(g, pos=pos, output="triang_random_span_tree.png")
.. image:: rtriang_orig.*
:width: 400px
.. image:: triang_random_span_tree.*
:width: 400px
*Left:* Original graph, *Right:* A random spanning tree.
References
----------
.. [wilson-generating-1996] David Bruce Wilson, "Generating random spanning
trees more quickly than the cover time", Proceedings of the twenty-eighth
annual ACM symposium on Theory of computing, Pages 296-303, ACM New York,
1996, :doi:`10.1145/237814.237880`
.. [boost-rst] http://www.boost.org/libs/graph/doc/random_spanning_tree.html
"""
if tree_map is None:
tree_map = g.new_edge_property("bool")
if tree_map.value_type() != "bool":
raise ValueError("edge property 'tree_map' must be of value type bool.")
if root is None:
root = g.vertex(numpy.random.randint(0, g.num_vertices()),
use_index=False)
# we need to restrict ourselves to the in-component of root
l = label_out_component(GraphView(g, reversed=True), root)
u = GraphView(g, vfilt=l)
if u.num_vertices() != g.num_vertices():
raise ValueError("There must be a path from all vertices to the root vertex: %d" % int(root) )
libgraph_tool_topology.\
random_spanning_tree(g._Graph__graph, int(root),
_prop("e", g, weights),
_prop("e", g, tree_map), _get_rng())
return tree_map
def dominator_tree(g, root, dom_map=None):
"""Return a vertex property map the dominator vertices for each vertex.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
root : :class:`~graph_tool.Vertex`
The root vertex.
dom_map : :class:`~graph_tool.PropertyMap` (optional, default: None)
If provided, the dominator map will be written in this property map.
Returns
-------
dom_map : :class:`~graph_tool.PropertyMap`
The dominator map. It contains for each vertex, the index of its
dominator vertex.
Notes
-----
A vertex u dominates a vertex v, if every path of directed graph from the
entry to v must go through u.
The algorithm runs with :math:`O((V+E)\log (V+E))` complexity.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(100, lambda: (2, 2))
>>> tree = gt.min_spanning_tree(g)
>>> g.set_edge_filter(tree)
>>> root = [v for v in g.vertices() if v.in_degree() == 0]
>>> dom = gt.dominator_tree(g, root[0])
>>> print(dom.a)
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
References
----------
.. [dominator-bgl] http://www.boost.org/libs/graph/doc/lengauer_tarjan_dominator.htm
"""
if dom_map is None:
dom_map = g.new_vertex_property("int32_t")
if dom_map.value_type() != "int32_t":
raise ValueError("vertex property 'dom_map' must be of value type" +
" int32_t.")
if not g.is_directed():
raise ValueError("dominator tree requires a directed graph.")
libgraph_tool_topology.\
dominator_tree(g._Graph__graph, int(root),
_prop("v", g, dom_map))
return dom_map
def topological_sort(g):
"""
Return the topological sort of the given graph. It is returned as an array
of vertex indexes, in the sort order.
Notes
-----
The topological sort algorithm creates a linear ordering of the vertices
such that if edge (u,v) appears in the graph, then u comes before v in the
ordering. The graph must be a directed acyclic graph (DAG).
The time complexity is :math:`O(V + E)`.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(30, lambda: (3, 3))
>>> tree = gt.min_spanning_tree(g)
>>> g.set_edge_filter(tree)
>>> sort = gt.topological_sort(g)
>>> print(sort)
[29 28 27 26 23 24 22 21 20 18 17 16 15 14 11 10 9 6 5 4 19 12 13 3 2
25 1 0 7 8]
References
----------
.. [topological-boost] http://www.boost.org/libs/graph/doc/topological_sort.html
.. [topological-wiki] http://en.wikipedia.org/wiki/Topological_sorting
"""
topological_order = Vector_int32_t()
is_DAG = libgraph_tool_topology.\
topological_sort(g._Graph__graph, topological_order)
if not is_DAG:
raise ValueError("Graph is not a directed acylic graph (DAG).");
return topological_order.a[::-1].copy()
def transitive_closure(g):
"""Return the transitive closure graph of g.
Notes
-----
The transitive closure of a graph G = (V,E) is a graph G* = (V,E*) such that
E* contains an edge (u,v) if and only if G contains a path (of at least one
edge) from u to v. The transitive_closure() function transforms the input
graph g into the transitive closure graph tc.
The time complexity (worst-case) is :math:`O(VE)`.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(30, lambda: (3, 3))
>>> tc = gt.transitive_closure(g)
References
----------
.. [transitive-boost] http://www.boost.org/libs/graph/doc/transitive_closure.html
.. [transitive-wiki] http://en.wikipedia.org/wiki/Transitive_closure
"""
if not g.is_directed():
raise ValueError("graph must be directed for transitive closure.")
tg = Graph()
libgraph_tool_topology.transitive_closure(g._Graph__graph,
tg._Graph__graph)
return tg
def label_components(g, vprop=None, directed=None, attractors=False):
"""
Label the components to which each vertex in the graph belongs. If the
graph is directed, it finds the strongly connected components.
A property map with the component labels is returned, together with an
histogram of component labels.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
vprop : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Vertex property to store the component labels. If none is supplied, one
is created.
directed : bool (optional, default: ``None``)
Treat graph as directed or not, independently of its actual
directionality.
attractors : bool (optional, default: ``False``)
If ``True``, and the graph is directed, an additional array with Boolean
values is returned, specifying if the strongly connected components are
attractors or not.
Returns
-------
comp : :class:`~graph_tool.PropertyMap`
Vertex property map with component labels.
hist : :class:`~numpy.ndarray`
Histogram of component labels.
is_attractor : :class:`~numpy.ndarray`
A Boolean array specifying if the strongly connected components are
attractors or not. This returned only if ``attractors == True``, and the
graph is directed.
Notes
-----
The components are arbitrarily labeled from 0 to N-1, where N is the total
number of components.
The algorithm runs in :math:`O(V + E)` time.
Examples
--------
.. testcode::
:hide:
numpy.random.seed(43)
gt.seed_rng(43)
>>> g = gt.random_graph(100, lambda: (poisson(2), poisson(2)))
>>> comp, hist, is_attractor = gt.label_components(g, attractors=True)
>>> print(comp.a)
[13 13 13 13 14 12 13 15 16 13 17 19 13 13 13 20 13 13 13 10 13 13 22 13 13
4 13 13 2 23 13 13 24 13 13 26 27 13 13 13 13 0 13 13 3 13 13 13 28 1
6 13 13 13 13 5 13 13 13 13 13 13 13 9 13 11 13 29 13 13 13 13 18 13 30
31 13 13 32 13 33 34 35 13 13 21 13 25 8 36 13 13 13 13 13 37 13 13 7 13]
>>> print(hist)
[ 1 1 1 1 1 1 1 1 1 1 1 1 1 63 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1]
>>> print(is_attractor)
[ True False True True True False False True False True True True
True False True False False False False False False False False False
False False False False False False False False False True False True
False False]
"""
if vprop is None:
vprop = g.new_vertex_property("int32_t")
_check_prop_writable(vprop, name="vprop")
_check_prop_scalar(vprop, name="vprop")
if directed is not None:
g = GraphView(g, directed=directed)
hist = libgraph_tool_topology.\
label_components(g._Graph__graph, _prop("v", g, vprop))
if attractors and g.is_directed() and directed != False:
is_attractor = numpy.ones(len(hist), dtype="bool")
libgraph_tool_topology.\
label_attractors(g._Graph__graph, _prop("v", g, vprop),
is_attractor)
return vprop, hist, is_attractor
else:
return vprop, hist
def label_largest_component(g, directed=None):
"""
Label the largest component in the graph. If the graph is directed, then the
largest strongly connected component is labelled.
A property map with a boolean label is returned.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
directed : bool (optional, default:None)
Treat graph as directed or not, independently of its actual
directionality.
Returns
-------
comp : :class:`~graph_tool.PropertyMap`
Boolean vertex property map which labels the largest component.
Notes
-----
The algorithm runs in :math:`O(V + E)` time.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(100, lambda: poisson(1), directed=False)
>>> l = gt.label_largest_component(g)
>>> print(l.a)
[0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0
1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0
0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 0 1 0 0 1 0]
>>> u = gt.GraphView(g, vfilt=l) # extract the largest component as a graph
>>> print(u.num_vertices())
18
"""
label = g.new_vertex_property("bool")
c, h = label_components(g, directed=directed)
vfilt, inv = g.get_vertex_filter()
label.fa = c.fa == h.argmax()
return label
def label_out_component(g, root):
"""
Label the out-component (or simply the component for undirected graphs) of a
root vertex.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
root : :class:`~graph_tool.Vertex`
The root vertex.
Returns
-------
comp : :class:`~graph_tool.PropertyMap`
Boolean vertex property map which labels the out-component.
Notes
-----
The algorithm runs in :math:`O(V + E)` time.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(100, lambda: poisson(2.2), directed=False)
>>> l = gt.label_out_component(g, g.vertex(2))
>>> print(l.a)
[1 1 1 1 1 1 1 0 1 1 1 0 1 1 0 1 1 1 0 1 1 0 1 1 1 1 1 1 1 1 0 0 0 1 1 1 1
1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 0 1 1 1 1 1 0 0
1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 1 1 1 0]
The in-component can be obtained by reversing the graph.
>>> l = gt.label_out_component(gt.GraphView(g, reversed=True, directed=True),
... g.vertex(1))
>>> print(l.a)
[0 1 1 1 1 0 1 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 1 1 0 0 0 1 1 0 0 0 0 1 0 1
1 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 1 1 1 1 0 0 0 1 1 0 0 0 0 0 0 0 1 0 0
1 0 0 0 0 1 1 1 0 0 1 1 0 0 0 1 1 0 1 1 0 0 1 0 1 0]
"""
label = g.new_vertex_property("bool")
libgraph_tool_topology.\
label_out_component(g._Graph__graph, int(root),
_prop("v", g, label))
return label
def label_biconnected_components(g, eprop=None, vprop=None):
"""
Label the edges of biconnected components, and the vertices which are
articulation points.
An edge property map with the component labels is returned, together a
boolean vertex map marking the articulation points, and an histogram of
component labels.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
eprop : :class:`~graph_tool.PropertyMap` (optional, default: None)
Edge property to label the biconnected components.
vprop : :class:`~graph_tool.PropertyMap` (optional, default: None)
Vertex property to mark the articulation points. If none is supplied,
one is created.
Returns
-------
bicomp : :class:`~graph_tool.PropertyMap`
Edge property map with the biconnected component labels.
articulation : :class:`~graph_tool.PropertyMap`
Boolean vertex property map which has value 1 for each vertex which is
an articulation point, and zero otherwise.
nc : int
Number of biconnected components.
Notes
-----
A connected graph is biconnected if the removal of any single vertex (and
all edges incident on that vertex) can not disconnect the graph. More
generally, the biconnected components of a graph are the maximal subsets of
vertices such that the removal of a vertex from a particular component will
not disconnect the component. Unlike connected components, vertices may
belong to multiple biconnected components: those vertices that belong to
more than one biconnected component are called "articulation points" or,
equivalently, "cut vertices". Articulation points are vertices whose removal
would increase the number of connected components in the graph. Thus, a
graph without articulation points is biconnected. Vertices can be present in
multiple biconnected components, but each edge can only be contained in a
single biconnected component.
The algorithm runs in :math:`O(V + E)` time.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(100, lambda: poisson(2), directed=False)
>>> comp, art, hist = gt.label_biconnected_components(g)
>>> print(comp.a)
[31 42 41 41 41 21 2 41 41 19 41 33 41 41 12 41 40 41 41 41 41 41 41 8 41
10 41 32 28 30 41 41 41 5 41 41 41 41 39 38 41 41 41 41 45 44 41 41 22 41
41 41 0 41 41 41 41 41 41 41 41 7 13 41 20 41 41 41 41 34 9 41 41 4 43
18 41 41 15 29 1 41 41 41 41 6 41 25 23 35 16 24 37 11 3 36 17 26 27 14
41]
>>> print(art.a)
[1 0 1 1 0 1 0 0 0 1 0 0 1 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0
1 1 0 0 1 0 0 0 1 1 0 0 0 1 0 1 0 1 0 0 1 0 0 0 0 1 1 0 1 0 0 0 0 0 0 1 1
1 0 0 0 0 0 0 1 1 0 0 0 1 0 1 1 0 0 0 1 0 0 0 1 0 0]
>>> print(hist)
[ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 56 1 1 1 1]
"""
if vprop is None:
vprop = g.new_vertex_property("bool")
if eprop is None:
eprop = g.new_edge_property("int32_t")
_check_prop_writable(vprop, name="vprop")
_check_prop_scalar(vprop, name="vprop")
_check_prop_writable(eprop, name="eprop")
_check_prop_scalar(eprop, name="eprop")
g = GraphView(g, directed=False)
hist = libgraph_tool_topology.\
label_biconnected_components(g._Graph__graph, _prop("e", g, eprop),
_prop("v", g, vprop))
return eprop, vprop, hist
def kcore_decomposition(g, deg="out", vprop=None):
"""
Perform a k-core decomposition of the given graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
deg : string
Degree to be used for the decomposition. It can be either "in", "out" or
"total", for in-, out-, or total degree of the vertices.
vprop : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Vertex property to store the decomposition. If ``None`` is supplied,
one is created.
Returns
-------
kval : :class:`~graph_tool.PropertyMap`
Vertex property map with the k-core decomposition, i.e. a given vertex v
belongs to the ``kval[v]``-core.
Notes
-----
The k-core is a maximal set of vertices such that its induced subgraph only
contains vertices with degree larger than or equal to k.
This algorithm is described in [batagelk-algorithm]_ and runs in :math:`O(V + E)`
time.
Examples
--------
>>> g = gt.collection.data["netscience"]
>>> g = gt.GraphView(g, vfilt=gt.label_largest_component(g))
>>> kcore = gt.kcore_decomposition(g)
>>> gt.graph_draw(g, pos=g.vp["pos"], vertex_fill_color=kcore, vertex_text=kcore, output="netsci-kcore.pdf")
<...>
.. testcode::
:hide:
gt.graph_draw(g, pos=g.vp["pos"], vertex_fill_color=kcore, vertex_text=kcore, output="netsci-kcore.png")
.. figure:: netsci-kcore.*
:align: center
K-core decomposition of a network of network scientists.
References
----------
.. [k-core] http://en.wikipedia.org/wiki/Degeneracy_%28graph_theory%29
.. [batagelk-algorithm] Vladimir Batagelj, Matjaž Zaveršnik, "Fast
algorithms for determining (generalized) core groups in social
networks", Advances in Data Analysis and Classification
Volume 5, Issue 2, pp 129-145 (2011), :DOI:`10.1007/s11634-010-0079-y`,
:arxiv:`cs/0310049`
"""
if vprop is None:
vprop = g.new_vertex_property("int32_t")
_check_prop_writable(vprop, name="vprop")
_check_prop_scalar(vprop, name="vprop")
if deg not in ["in", "out", "total"]:
raise ValueError("invalid degree: " + str(deg))
if g.is_directed():
if deg == "out":
g = GraphView(g, reversed=True)
if deg == "total":
g = GraphView(g, directed=False)
libgraph_tool_topology.\
kcore_decomposition(g._Graph__graph, _prop("v", g, vprop),
_degree(g, deg))
return vprop
def shortest_distance(g, source=None, target=None, weights=None, max_dist=None,
directed=None, dense=False, dist_map=None,
pred_map=False):
"""Calculate the distance from a source to a target vertex, or to of all
vertices from a given source, or the all pairs shortest paths, if the source
is not specified.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
source : :class:`~graph_tool.Vertex` (optional, default: None)
Source vertex of the search. If unspecified, the all pairs shortest
distances are computed.
target : :class:`~graph_tool.Vertex` or iterable of such objects (optional, default: None)
Target vertex (or vertices) of the search. If unspecified, the distance
to all vertices from the source will be computed.
weights : :class:`~graph_tool.PropertyMap` (optional, default: None)
The edge weights. If provided, the minimum spanning tree will minimize
the edge weights.
max_dist : scalar value (optional, default: None)
If specified, this limits the maximum distance of the vertices
searched. This parameter has no effect if source is None.
directed : bool (optional, default:None)
Treat graph as directed or not, independently of its actual
directionality.
dense : bool (optional, default: False)
If true, and source is None, the Floyd-Warshall algorithm is used,
otherwise the Johnson algorithm is used. If source is not None, this option
has no effect.
dist_map : :class:`~graph_tool.PropertyMap` (optional, default: None)
Vertex property to store the distances. If none is supplied, one
is created.
pred_map : bool (optional, default: False)
If true, a vertex property map with the predecessors is returned.
Ignored if source=None.
Returns
-------
dist_map : :class:`~graph_tool.PropertyMap`
Vertex property map with the distances from source. If source is 'None',
it will have a vector value type, with the distances to every vertex.
Notes
-----
If a source is given, the distances are calculated with a breadth-first
search (BFS) or Dijkstra's algorithm [dijkstra]_, if weights are given. If
source is not given, the distances are calculated with Johnson's algorithm
[johnson-apsp]_. If dense=True, the Floyd-Warshall algorithm
[floyd-warshall-apsp]_ is used instead.
If source is specified, the algorithm runs in :math:`O(V + E)` time, or
:math:`O(V \log V)` if weights are given. If source is not specified, it
runs in :math:`O(VE\log V)` time, or :math:`O(V^3)` if dense == True.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> from numpy.random import poisson
>>> g = gt.random_graph(100, lambda: (poisson(3), poisson(3)))
>>> dist = gt.shortest_distance(g, source=g.vertex(0))
>>> print(dist.a)
[ 0 1 5 4 2147483647 4
9 5 8 5 7 6
3 5 6 8 3 3
5 6 2147483647 1 4 5
5 2 5 7 4 5
5 5 4 4 5 2
5 2147483647 5 2 2147483647 6
5 6 6 2 5 4
3 6 5 4 4 5
3 3 5 5 1 5
4 6 3 4 3 3
7 5 5 4 2147483647 2147483647
2 5 3 5 5 6
3 5 6 6 5 4
5 3 6 3 4 2147483647
4 6 4 4 4 4
6 5 4 4]
>>>
>>> dist = gt.shortest_distance(g)
>>> print(dist[g.vertex(0)].a)
[ 0 1 5 4 2147483647 4
9 5 8 5 7 6
3 5 6 8 3 3
5 6 2147483647 1 4 5
5 2 5 7 4 5
5 5 4 4 5 2
5 2147483647 5 2 2147483647 6
5 6 6 2 5 4
3 6 5 4 4 5
3 3 5 5 1 5
4 6 3 4 3 3
7 5 5 4 2147483647 2147483647
2 5 3 5 5 6
3 5 6 6 5 4
5 3 6 3 4 2147483647
4 6 4 4 4 4
6 5 4 4]
>>> dist = gt.shortest_distance(g, source=g.vertex(0), target=g.vertex(2))
>>> print (dist)
5
>>> dist = gt.shortest_distance(g, source=g.vertex(0), target=[g.vertex(2), g.vertex(6)])
>>> print (dist)
[5 9]
References
----------
.. [bfs] Edward Moore, "The shortest path through a maze", International
Symposium on the Theory of Switching (1959), Harvard University Press.
.. [bfs-boost] http://www.boost.org/libs/graph/doc/breadth_first_search.html
.. [dijkstra] E. Dijkstra, "A note on two problems in connexion with
graphs." Numerische Mathematik, 1:269-271, 1959.
.. [dijkstra-boost] http://www.boost.org/libs/graph/doc/dijkstra_shortest_paths.html
.. [johnson-apsp] http://www.boost.org/libs/graph/doc/johnson_all_pairs_shortest.html
.. [floyd-warshall-apsp] http://www.boost.org/libs/graph/doc/floyd_warshall_shortest.html
"""
if isinstance(target, collections.Iterable):
target = numpy.asarray(target, dtype="int64")
elif target is None:
target = numpy.array([], dtype="int64")
else:
target = numpy.asarray([int(target)], dtype="int64")
if weights is None:
dist_type = 'int32_t'
else:
dist_type = weights.value_type()
if dist_map is None:
if source is not None:
dist_map = g.new_vertex_property(dist_type)
else:
dist_map = g.new_vertex_property("vector<%s>" % dist_type)
_check_prop_writable(dist_map, name="dist_map")
if source is not None:
_check_prop_scalar(dist_map, name="dist_map")
else:
_check_prop_vector(dist_map, name="dist_map")
if max_dist is None:
max_dist = 0
if directed is not None:
u = GraphView(g, directed=directed)
else:
u = g
if source is not None:
pmap = g.copy_property(u.vertex_index, value_type="int64_t")
libgraph_tool_topology.get_dists(g._Graph__graph,
int(source),
target,
_prop("v", g, dist_map),
_prop("e", g, weights),
_prop("v", g, pmap),
float(max_dist))
else:
libgraph_tool_topology.get_all_dists(u._Graph__graph,
_prop("v", g, dist_map),
_prop("e", g, weights), dense)
if source is not None and len(target) > 0:
if len(target) == 1:
dist_map = dist_map.a[target[0]]
else:
dist_map = numpy.array(dist_map.a[target])
if source is not None and pred_map:
return dist_map, pmap
else:
return dist_map
def shortest_path(g, source, target, weights=None, pred_map=None):
"""
Return the shortest path from `source` to `target`.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
source : :class:`~graph_tool.Vertex`
Source vertex of the search.
target : :class:`~graph_tool.Vertex`
Target vertex of the search.
weights : :class:`~graph_tool.PropertyMap` (optional, default: None)
The edge weights.
pred_map : :class:`~graph_tool.PropertyMap` (optional, default: None)
Vertex property map with the predecessors in the search tree. If this is
provided, the shortest paths are not computed, and are obtained directly
from this map.
Returns
-------
vertex_list : list of :class:`~graph_tool.Vertex`
List of vertices from `source` to `target` in the shortest path.
edge_list : list of :class:`~graph_tool.Edge`
List of edges from `source` to `target` in the shortest path.
Notes
-----
The paths are computed with a breadth-first search (BFS) or Dijkstra's
algorithm [dijkstra]_, if weights are given.
The algorithm runs in :math:`O(V + E)` time, or :math:`O(V \log V)` if
weights are given.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(43)
gt.seed_rng(43)
>>> from numpy.random import poisson
>>> g = gt.random_graph(300, lambda: (poisson(4), poisson(4)))
>>> vlist, elist = gt.shortest_path(g, g.vertex(10), g.vertex(11))
>>> print([str(v) for v in vlist])
['10', '11']
>>> print([str(e) for e in elist])
['(10, 11)']
References
----------
.. [bfs] Edward Moore, "The shortest path through a maze", International
Symposium on the Theory of Switching (1959), Harvard University
Press
.. [bfs-boost] http://www.boost.org/libs/graph/doc/breadth_first_search.html
.. [dijkstra] E. Dijkstra, "A note on two problems in connexion with
graphs." Numerische Mathematik, 1:269-271, 1959.
.. [dijkstra-boost] http://www.boost.org/libs/graph/doc/dijkstra_shortest_paths.html
"""
if pred_map is None:
pred_map = shortest_distance(g, source, target,
weights=weights,
pred_map=True)[1]
if pred_map[target] == int(target): # no path to target
return [], []
vlist = [target]
elist = []
if weights is not None:
max_w = weights.a.max() + 1
else:
max_w = None
v = target
while v != source:
p = g.vertex(pred_map[v])
min_w = max_w
pe = None
s = None
for e in v.in_edges() if g.is_directed() else v.out_edges():
s = e.source() if g.is_directed() else e.target()
if s == p:
if weights is not None:
if weights[e] < min_w:
min_w = weights[e]
pe = e
else:
pe = e
break
elist.insert(0, pe)
vlist.insert(0, p)
v = p
return vlist, elist
def pseudo_diameter(g, source=None, weights=None):
"""
Compute the pseudo-diameter of the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
source : :class:`~graph_tool.Vertex` (optional, default: `None`)
Source vertex of the search. If not supplied, the first vertex
in the graph will be chosen.
weights : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
The edge weights.
Returns
-------
pseudo_diameter : int
The pseudo-diameter of the graph.
end_points : pair of :class:`~graph_tool.Vertex`
The two vertices which correspond to the pseudo-diameter found.
Notes
-----
The pseudo-diameter is an approximate graph diameter. It is obtained by
starting from a vertex `source`, and finds a vertex `target` that is
farthest away from `source`. This process is repeated by treating
`target` as the new starting vertex, and ends when the graph distance no
longer increases. A vertex from the last level set that has the smallest
degree is chosen as the final starting vertex u, and a traversal is done
to see if the graph distance can be increased. This graph distance is
taken to be the pseudo-diameter.
The paths are computed with a breadth-first search (BFS) or Dijkstra's
algorithm [dijkstra]_, if weights are given.
The algorithm runs in :math:`O(V + E)` time, or :math:`O(V \log V)` if
weights are given.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> from numpy.random import poisson
>>> g = gt.random_graph(300, lambda: (poisson(3), poisson(3)))
>>> dist, ends = gt.pseudo_diameter(g)
>>> print(dist)
10.0
>>> print(int(ends[0]), int(ends[1]))
0 165
References
----------
.. [pseudo-diameter] http://en.wikipedia.org/wiki/Distance_%28graph_theory%29
"""
if source is None:
source = g.vertex(0, use_index=False)
dist, target = 0, source
while True:
new_source = target
new_target, new_dist = libgraph_tool_topology.get_diam(g._Graph__graph,
int(new_source),
_prop("e", g, weights))
if new_dist > dist:
target = new_target
source = new_source
dist = new_dist
else:
break
return dist, (g.vertex(source), g.vertex(target))
def is_bipartite(g, partition=False):
"""
Test if the graph is bipartite.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
partition : bool (optional, default: ``False``)
If ``True``, return the two partitions in case the graph is bipartite.
Returns
-------
is_bipartite : bool
Whether or not the graph is bipartite.
partition : :class:`~graph_tool.PropertyMap` (only if `partition=True`)
A vertex property map with the graph partitioning (or `None`) if the
graph is not bipartite.
Notes
-----
An undirected graph is bipartite if one can partition its set of vertices
into two sets, such that all edges go from one set to the other.
This algorithm runs in :math:`O(V + E)` time.
Examples
--------
>>> g = gt.lattice([10, 10])
>>> is_bi, part = gt.is_bipartite(g, partition=True)
>>> print(is_bi)
True
>>> gt.graph_draw(g, vertex_fill_color=part, output_size=(300, 300), output="bipartite.pdf")
<...>
.. testcode::
:hide:
gt.graph_draw(g, vertex_fill_color=part, output_size=(300, 300), output="bipartite.png")
.. figure:: bipartite.*
:align: center
Bipartition of a 2D lattice.
References
----------
.. [boost-bipartite] http://www.boost.org/libs/graph/doc/is_bipartite.html
"""
if partition:
part = g.new_vertex_property("bool")
else:
part = None
g = GraphView(g, directed=False)
is_bi = libgraph_tool_topology.is_bipartite(g._Graph__graph,
_prop("v", g, part))
if not is_bi and part is not None:
part.a = 0
if partition:
return is_bi, part
else:
return is_bi
def is_planar(g, embedding=False, kuratowski=False):
"""
Test if the graph is planar.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
embedding : bool (optional, default: False)
If true, return a mapping from vertices to the clockwise order of
out-edges in the planar embedding.
kuratowski : bool (optional, default: False)
If true, the minimal set of edges that form the obstructing Kuratowski
subgraph will be returned as a property map, if the graph is not planar.
Returns
-------
is_planar : bool
Whether or not the graph is planar.
embedding : :class:`~graph_tool.PropertyMap` (only if `embedding=True`)
A vertex property map with the out-edges indexes in clockwise order in
the planar embedding,
kuratowski : :class:`~graph_tool.PropertyMap` (only if `kuratowski=True`)
An edge property map with the minimal set of edges that form the
obstructing Kuratowski subgraph (if the value of kuratowski[e] is 1,
the edge belongs to the set)
Notes
-----
A graph is planar if it can be drawn in two-dimensional space without any of
its edges crossing. This algorithm performs the Boyer-Myrvold planarity
testing [boyer-myrvold]_. See [boost-planarity]_ for more details.
This algorithm runs in :math:`O(V)` time.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> from numpy.random import random
>>> g = gt.triangulation(random((100,2)))[0]
>>> p, embed_order = gt.is_planar(g, embedding=True)
>>> print(p)
True
>>> print(list(embed_order[g.vertex(0)]))
[0, 1, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2]
>>> g = gt.random_graph(100, lambda: 4, directed=False)
>>> p, kur = gt.is_planar(g, kuratowski=True)
>>> print(p)
False
>>> g.set_edge_filter(kur, True)
>>> gt.graph_draw(g, output_size=(300, 300), output="kuratowski.pdf")
<...>
.. testcode::
:hide:
gt.graph_draw(g, output_size=(300, 300), output="kuratowski.png")
.. figure:: kuratowski.*
:align: center
Obstructing Kuratowski subgraph of a random graph.
References
----------
.. [boyer-myrvold] John M. Boyer and Wendy J. Myrvold, "On the Cutting Edge:
Simplified O(n) Planarity by Edge Addition" Journal of Graph Algorithms
and Applications, 8(2): 241-273, 2004. http://www.emis.ams.org/journals/JGAA/accepted/2004/BoyerMyrvold2004.8.3.pdf
.. [boost-planarity] http://www.boost.org/libs/graph/doc/boyer_myrvold.html
"""
u = GraphView(g, directed=False)
if embedding:
embed = g.new_vertex_property("vector<int>")
else:
embed = None
if kuratowski:
kur = g.new_edge_property("bool")
else:
kur = None
is_planar = libgraph_tool_topology.is_planar(u._Graph__graph,
_prop("v", g, embed),
_prop("e", g, kur))
ret = [is_planar]
if embed is not None:
ret.append(embed)
if kur is not None:
ret.append(kur)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
def make_maximal_planar(g, unfilter=False):
"""
Add edges to the graph to make it maximally planar.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used. It must be a biconnected planar graph with at least 3
vertices.
Notes
-----
A graph is maximal planar if no additional edges can be added to it without
creating a non-planar graph. By Euler's formula, a maximal planar graph with
V > 2 vertices always has 3V - 6 edges and 2V - 4 faces.
The input graph to make_maximal_planar() must be a biconnected planar graph
with at least 3 vertices.
This algorithm runs in :math:`O(V + E)` time.
Examples
--------
>>> g = gt.lattice([42, 42])
>>> gt.make_maximal_planar(g)
>>> gt.is_planar(g)
True
>>> print(g.num_vertices(), g.num_edges())
1764 5286
>>> gt.graph_draw(g, output_size=(300, 300), output="maximal_planar.pdf")
<...>
.. testcode::
:hide:
gt.graph_draw(g, output_size=(300, 300), output="maximal_planar.png")
.. figure:: maximal_planar.*
:align: center
A maximally planar graph.
References
----------
.. [boost-planarity] http://www.boost.org/libs/graph/doc/make_maximal_planar.html
"""
g = GraphView(g, directed=False)
libgraph_tool_topology.maximal_planar(g._Graph__graph)
def is_DAG(g):
"""
Return `True` if the graph is a directed acyclic graph (DAG).
Notes
-----
The time complexity is :math:`O(V + E)`.
Examples
--------
.. testcode::
:hide:
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(30, lambda: (3, 3))
>>> print(gt.is_DAG(g))
False
>>> tree = gt.min_spanning_tree(g)
>>> g.set_edge_filter(tree)
>>> print(gt.is_DAG(g))
True
References
----------
.. [DAG-wiki] http://en.wikipedia.org/wiki/Directed_acyclic_graph
"""
topological_order = Vector_int32_t()
is_DAG = libgraph_tool_topology.\
topological_sort(g._Graph__graph, topological_order)
return is_DAG
def max_cardinality_matching(g, heuristic=False, weight=None, minimize=True,
match=None):
r"""Find a maximum cardinality matching in the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
heuristic : bool (optional, default: `False`)
If true, a random heuristic will be used, which runs in linear time.
weight : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
If provided, the matching will minimize the edge weights (or maximize
if ``minimize == False``). This option has no effect if
``heuristic == False``.
minimize : bool (optional, default: `True`)
If `True`, the matching will minimize the weights, otherwise they will
be maximized. This option has no effect if ``heuristic == False``.
match : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
Edge property map where the matching will be specified.
Returns
-------
match : :class:`~graph_tool.PropertyMap`
Boolean edge property map where the matching is specified.
Notes
-----
A *matching* is a subset of the edges of a graph such that no two edges
share a common vertex. A *maximum cardinality matching* has maximum size
over all matchings in the graph.
If the parameter ``weight`` is provided, as well as ``heuristic == True`` a
matching with maximum cardinality *and* maximum (or minimum) weight is
returned.
If ``heuristic == True`` the algorithm does not necessarily return the
maximum matching, instead the focus is to run on linear time.
This algorithm runs in time :math:`O(EV\times\alpha(E,V))`, where
:math:`\alpha(m,n)` is a slow growing function that is at most 4 for any
feasible input. If `heuristic == True`, the algorithm runs in time
:math:`O(V + E)`.
For a more detailed description, see [boost-max-matching]_.
Examples
--------
.. testcode::
:hide:
numpy.random.seed(43)
gt.seed_rng(43)
>>> g = gt.GraphView(gt.price_network(300), directed=False)
>>> res = gt.max_cardinality_matching(g)
>>> print(res[1])
True
>>> w = res[0].copy("double")
>>> w.a = 2 * w.a + 2
>>> gt.graph_draw(g, edge_color=res[0], edge_pen_width=w, vertex_fill_color="grey",
... output="max_card_match.pdf")
<...>
.. testcode::
:hide:
gt.graph_draw(g, edge_color=res[0], edge_pen_width=w, vertex_fill_color="grey",
output="max_card_match.png")
.. figure:: max_card_match.*
:align: center
Edges belonging to the matching are in yellow.
References
----------
.. [boost-max-matching] http://www.boost.org/libs/graph/doc/maximum_matching.html
.. [matching-heuristic] B. Hendrickson and R. Leland. "A Multilevel Algorithm
for Partitioning Graphs." In S. Karin, editor, Proc. Supercomputing ’95,
San Diego. ACM Press, New York, 1995, :doi:`10.1145/224170.224228`
"""
if match is None:
match = g.new_edge_property("bool")
_check_prop_scalar(match, "match")
_check_prop_writable(match, "match")
if weight is not None:
_check_prop_scalar(weight, "weight")
u = GraphView(g, directed=False)
if not heuristic:
check = libgraph_tool_flow.\
max_cardinality_matching(u._Graph__graph, _prop("e", u, match))
return match, check
else:
libgraph_tool_topology.\
random_matching(u._Graph__graph, _prop("e", u, weight),
_prop("e", u, match), minimize, _get_rng())
return match
def max_independent_vertex_set(g, high_deg=False, mivs=None):
r"""Find a maximal independent vertex set in the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
high_deg : bool (optional, default: `False`)
If `True`, vertices with high degree will be included first in the set,
otherwise they will be included last.
mivs : :class:`~graph_tool.PropertyMap` (optional, default: `None`)
Vertex property map where the vertex set will be specified.
Returns
-------
mivs : :class:`~graph_tool.PropertyMap`
Boolean vertex property map where the set is specified.
Notes
-----
A maximal independent vertex set is an independent set such that adding any
other vertex to the set forces the set to contain an edge between two
vertices of the set.
This implements the algorithm described in [mivs-luby]_, which runs in time
:math:`O(V + E)`.
Examples
--------
.. testcode::
:hide:
numpy.random.seed(43)
gt.seed_rng(43)
>>> g = gt.GraphView(gt.price_network(300), directed=False)
>>> res = gt.max_independent_vertex_set(g)
>>> gt.graph_draw(g, vertex_fill_color=res, output="mivs.pdf")
<...>
.. testcode::
:hide:
gt.graph_draw(g, vertex_fill_color=res, output="mivs.png")
.. figure:: mivs.*
:align: center
Vertices belonging to the set are in yellow.
References
----------
.. [mivs-wikipedia] http://en.wikipedia.org/wiki/Independent_set_%28graph_theory%29
.. [mivs-luby] Luby, M., "A simple parallel algorithm for the maximal independent set problem",
Proc. 17th Symposium on Theory of Computing, Association for Computing Machinery, pp. 1-10, (1985)
:doi:`10.1145/22145.22146`.
"""
if mivs is None:
mivs = g.new_vertex_property("bool")
_check_prop_scalar(mivs, "mivs")
_check_prop_writable(mivs, "mivs")
u = GraphView(g, directed=False)
libgraph_tool_topology.\
maximal_vertex_set(u._Graph__graph, _prop("v", u, mivs), high_deg,
_get_rng())
mivs = g.own_property(mivs)
return mivs
def edge_reciprocity(g):
r"""Calculate the edge reciprocity of the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used
edges.
Returns
-------
reciprocity : float
The reciprocity value.
Notes
-----
The edge [reciprocity]_ is defined as :math:`E^\leftrightarrow/E`, where
:math:`E^\leftrightarrow` and :math:`E` are the number of bidirectional and
all edges in the graph, respectively.
The algorithm runs with complexity :math:`O(E + V)`.
Examples
--------
>>> g = gt.Graph()
>>> g.add_vertex(2)
<...>
>>> g.add_edge(g.vertex(0), g.vertex(1))
<Edge object with source '0' and target '1' at 0x33bc710>
>>> gt.edge_reciprocity(g)
0.0
>>> g.add_edge(g.vertex(1), g.vertex(0))
<Edge object with source '1' and target '0' at 0x33bc7a0>
>>> gt.edge_reciprocity(g)
1.0
References
----------
.. [reciprocity] S. Wasserman and K. Faust, "Social Network Analysis".
(Cambridge University Press, Cambridge, 1994)
.. [lopez-reciprocity-2007] Gorka Zamora-López, Vinko Zlatić, Changsong Zhou, Hrvoje Štefančić, and Jürgen Kurths
"Reciprocity of networks with degree correlations and arbitrary degree sequences", Phys. Rev. E 77, 016106 (2008)
:doi:`10.1103/PhysRevE.77.016106`, :arxiv:`0706.3372`
"""
r = libgraph_tool_topology.reciprocity(g._Graph__graph)
return r
def tsp_tour(g, src, weight=None):
"""Return a traveling salesman tour of the graph, which is guaranteed to be
twice as long as the optimal tour in the worst case.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
src : :class:`~graph_tool.Vertex`
The source (and target) of the tour.
weight : :class:`~graph_tool.PropertyMap` (optional, default: None)
Edge weights.
Returns
-------
tour : :class:`numpy.ndarray`
List of vertex indexes corresponding to the tour.
Notes
-----
The algorithm runs with :math:`O(E\log V)` complexity.
Examples
--------
>>> g = gt.lattice([10, 10])
>>> tour = gt.tsp_tour(g, g.vertex(0))
>>> print(tour)
[ 0 1 2 11 12 21 22 31 32 41 42 51 52 61 62 71 72 81 82 83 73 84 74 85 75
86 76 87 77 88 78 68 58 67 57 66 56 65 55 64 54 63 53 43 33 23 13 3 4 5
6 7 8 89 79 69 59 49 39 48 38 47 37 46 36 45 35 44 34 24 14 25 15 26 16
27 17 28 18 29 19 9 91 92 93 94 95 96 97 98 99 10 20 30 40 50 60 70 80 90
0]
References
----------
.. [tsp-bgl] http://www.boost.org/libs/graph/doc/metric_tsp_approx.html
.. [tsp] http://en.wikipedia.org/wiki/Travelling_salesman_problem
"""
tour = libgraph_tool_topology.\
get_tsp(g._Graph__graph, int(src), _prop("e", g, weight))
return tour.a.copy()
def sequential_vertex_coloring(g, order=None, color=None):
"""Returns a vertex coloring of the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
order : :class:`~graph_tool.PropertyMap` (optional, default: None)
Order with which the vertices will be colored.
color : :class:`~graph_tool.PropertyMap` (optional, default: None)
Integer-valued vertex property map to store the colors.
Returns
-------
color : :class:`~graph_tool.PropertyMap`
Integer-valued vertex property map with the vertex colors.
Notes
-----
The time complexity is :math:`O(V(d+k))`, where :math:`V` is the number of
vertices, :math:`d` is the maximum degree of the vertices in the graph, and
:math:`k` is the number of colors used.
Examples
--------
>>> g = gt.lattice([10, 10])
>>> colors = gt.sequential_vertex_coloring(g)
>>> print(colors.a)
[0 1 0 1 0 1 0 1 0 1 1 0 1 0 1 0 1 0 1 0 0 1 0 1 0 1 0 1 0 1 1 0 1 0 1 0 1
0 1 0 0 1 0 1 0 1 0 1 0 1 1 0 1 0 1 0 1 0 1 0 0 1 0 1 0 1 0 1 0 1 1 0 1 0
1 0 1 0 1 0 0 1 0 1 0 1 0 1 0 1 1 0 1 0 1 0 1 0 1 0]
References
----------
.. [sgc-bgl] http://www.boost.org/libs/graph/doc/sequential_vertex_coloring.html
.. [graph-coloring] http://en.wikipedia.org/wiki/Graph_coloring
"""
if order is None:
order = g.vertex_index
if color is None:
color = g.new_vertex_property("int")
libgraph_tool_topology.\
sequential_coloring(g._Graph__graph,
_prop("v", g, order),
_prop("v", g, color))
return color
from .. flow import libgraph_tool_flow
| apache-2.0 |
brakhane/panda3d | direct/src/leveleditor/SceneGraphUIBase.py | 6 | 13334 | """
Defines Scene Graph tree UI Base
"""
import wx
from pandac.PandaModules import *
from .ActionMgr import *
from . import ObjectGlobals as OG
class SceneGraphUIDropTarget(wx.TextDropTarget):
def __init__(self, editor):
print("in SceneGraphUIDropTarget::init...")
wx.TextDropTarget.__init__(self)
self.editor = editor
def OnDropText(self, x, y, text):
print("in SceneGraphUIDropTarget::OnDropText...")
self.editor.ui.sceneGraphUI.changeHierarchy(text, x, y)
class SceneGraphUIBase(wx.Panel):
def __init__(self, parent, editor):
wx.Panel.__init__(self, parent)
self.editor = editor
self.tree = wx.TreeCtrl(self, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.TR_MULTIPLE|wx.TR_DEFAULT_STYLE,
validator=wx.DefaultValidator, name="treeCtrl")
self.root = self.tree.AddRoot('render')
self.tree.SetItemPyData(self.root, "render")
self.shouldShowPandaObjChildren = False
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.tree, 1, wx.EXPAND, 0)
self.SetSizer(sizer); self.Layout()
parentSizer = wx.BoxSizer(wx.VERTICAL)
parentSizer.Add(self, 1, wx.EXPAND, 0)
parent.SetSizer(parentSizer); parent.Layout()
parent.SetDropTarget(SceneGraphUIDropTarget(self.editor))
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.onSelected)
self.tree.Bind(wx.EVT_TREE_BEGIN_DRAG, self.onBeginDrag)
self.currItem = None
self.currObj = None
self.menu = wx.Menu()
self.populateMenu()
self.Bind(wx.EVT_CONTEXT_MENU, self.onShowPopup)
def reset(self):
#import pdb;set_trace()
itemList = list()
item, cookie = self.tree.GetFirstChild(self.root)
while item:
itemList.append(item)
item, cookie = self.tree.GetNextChild(self.root, cookie)
for item in itemList:
self.tree.Delete(item)
def traversePandaObjects(self, parent, objNodePath):
itemId = self.tree.GetItemPyData(parent)
i = 0
for child in objNodePath.getChildren():
if child.hasTag('OBJRoot'):
# since they are already shown in scene graph tree
continue
namestr = "%s.%s"%(child.node().getType(), child.node().getName())
newItem = self.tree.PrependItem(parent, namestr)
newItemId = "%s.%s"%(itemId, i)
self.tree.SetItemPyData(newItem, newItemId)
# recursing...
self.traversePandaObjects(newItem, child)
i = i + 1
def addPandaObjectChildren(self, parent):
# first, find Panda Object's NodePath of the item
itemId = self.tree.GetItemPyData(parent)
if itemId == "render":
return
obj = self.editor.objectMgr.findObjectById(itemId)
if obj is None:
return
objNodePath = obj[OG.OBJ_NP]
self.traversePandaObjects(parent, objNodePath)
item, cookie = self.tree.GetFirstChild(parent)
while item:
# recursing...
self.addPandaObjectChildren(item)
item, cookie = self.tree.GetNextChild(parent, cookie)
def removePandaObjectChildren(self, parent):
# first, find Panda Object's NodePath of the item
itemId = self.tree.GetItemPyData(parent)
if itemId == "render":
return
obj = self.editor.objectMgr.findObjectById(itemId)
if obj is None:
self.tree.Delete(parent)
return
item, cookie = self.tree.GetFirstChild(parent)
while item:
# recurse...
itemToRemove = item
# continue iteration to the next child
item, cookie = self.tree.GetNextChild(parent, cookie)
self.removePandaObjectChildren(itemToRemove)
def add(self, item, parentNP = None):
#import pdb;pdb.set_trace()
if item is None:
return
obj = self.editor.objectMgr.findObjectByNodePath(NodePath(item))
if obj is None:
return
if parentNP is None :
parentNP = obj[OG.OBJ_NP].getParent()
parentObj = self.editor.objectMgr.findObjectByNodePath(parentNP)
if parentObj is None:
parent = self.root
else:
parent = self.traverse(self.root, parentObj[OG.OBJ_UID])
name = NodePath(item).getName()
if not name:
name = ' '
namestr = "%s_%s_%s"%(obj[OG.OBJ_DEF].name, name, obj[OG.OBJ_UID])
newItem = self.tree.AppendItem(parent, namestr)
self.tree.SetItemPyData(newItem, obj[OG.OBJ_UID])
# adding children of PandaObj
if self.shouldShowPandaObjChildren:
self.addPandaObjectChildren(newItem)
self.tree.Expand(self.root)
def traverse(self, parent, itemId):
# prevent from traversing into self
if itemId == self.tree.GetItemPyData(parent):
return None
# main loop - serching for an item with an itemId
item, cookie = self.tree.GetFirstChild(parent)
while item:
# if the item was found - return it
if itemId == self.tree.GetItemPyData(item):
return item
# the tem was not found - checking if it has children
if self.tree.ItemHasChildren(item):
# item has children - delving into it
child = self.traverse(item, itemId)
if child is not None:
return child
# continue iteration to the next child
item, cookie = self.tree.GetNextChild(parent, cookie)
return None
def reParentTree(self, parent, newParent):
# main loop - iterating over item's children
item, cookie = self.tree.GetFirstChild(parent)
while item:
data = self.tree.GetItemText(item)
itemId = self.tree.GetItemPyData(item)
newItem = self.tree.AppendItem(newParent, data)
self.tree.SetItemPyData(newItem, itemId)
# if an item had children, we need to re-parent them as well
if self.tree.ItemHasChildren(item):
# recursing...
self.reParentTree(item, newItem)
# continue iteration to the next child
item, cookie = self.tree.GetNextChild(parent, cookie)
def reParentData(self, parent, child):
child.wrtReparentTo(parent)
def reParent(self, oldParent, newParent, child):
if newParent is None:
newParent = self.root
itemId = self.tree.GetItemPyData(oldParent)
newItem = self.tree.AppendItem(newParent, child)
self.tree.SetItemPyData(newItem, itemId)
self.reParentTree(oldParent, newItem)
obj = self.editor.objectMgr.findObjectById(itemId)
itemId = self.tree.GetItemPyData(newParent)
if itemId != "render":
newParentObj = self.editor.objectMgr.findObjectById(itemId)
self.reParentData(newParentObj[OG.OBJ_NP], obj[OG.OBJ_NP])
else:
self.reParentData(render, obj[OG.OBJ_NP])
self.tree.Delete(oldParent)
if self.shouldShowPandaObjChildren:
self.removePandaObjectChildren(oldParent)
self.addPandaObjectChildren(oldParent)
self.removePandaObjectChildren(newParent)
self.addPandaObjectChildren(newpParent)
def isChildOrGrandChild(self, parent, child):
childId = self.tree.GetItemPyData(child)
return self.traverse(parent, childId)
def changeHierarchy(self, data, x, y):
itemText = data.split('_')
itemId = itemText[-1] # uid is the last token
item = self.traverse(self.tree.GetRootItem(), itemId)
if item is None:
return
dragToItem, flags = self.tree.HitTest(wx.Point(x, y))
if dragToItem.IsOk():
# prevent draging into itself
if dragToItem == item:
return
if self.isChildOrGrandChild(item, dragToItem):
return
# undo function setup...
action = ActionChangeHierarchy(self.editor, self.tree.GetItemPyData(self.tree.GetItemParent(item)), self.tree.GetItemPyData(item), self.tree.GetItemPyData(dragToItem), data)
self.editor.actionMgr.push(action)
action()
def parent(self, oldParentId, newParentId, childName):
oldParent = self.traverse(self.tree.GetRootItem(), oldParentId)
newParent = self.traverse(self.tree.GetRootItem(), newParentId)
self.reParent(oldParent, newParent, childName)
def showPandaObjectChildren(self):
itemList = list()
self.shouldShowPandaObjChildren = not self.shouldShowPandaObjChildren
item, cookie = self.tree.GetFirstChild(self.root)
while item:
itemList.append(item)
item, cookie = self.tree.GetNextChild(self.root, cookie)
#import pdb;set_trace()
for item in itemList:
if self.shouldShowPandaObjChildren:
self.addPandaObjectChildren(item)
else:
self.removePandaObjectChildren(item)
# continue iteration to the next child
def delete(self, itemId):
item = self.traverse(self.root, itemId)
if item:
self.tree.Delete(item)
def select(self, itemId):
item = self.traverse(self.root, itemId)
if item:
if not self.tree.IsSelected(item):
self.tree.SelectItem(item)
self.tree.EnsureVisible(item)
def changeLabel(self, itemId, newName):
item = self.traverse(self.root, itemId)
if item:
obj = self.editor.objectMgr.findObjectById(itemId)
if obj is None:
return
obj[OG.OBJ_NP].setName(newName)
namestr = "%s_%s_%s"%(obj[OG.OBJ_DEF].name, newName, obj[OG.OBJ_UID])
self.tree.SetItemText(item, namestr)
def deSelect(self, itemId):
item = self.traverse(self.root, itemId)
if item is not None:
self.tree.UnselectItem(item)
def onSelected(self, event):
item = event.GetItem();
if item:
itemId = self.tree.GetItemPyData(item)
if itemId:
obj = self.editor.objectMgr.findObjectById(itemId);
if obj:
selections = self.tree.GetSelections()
if len(selections) > 1:
base.direct.select(obj[OG.OBJ_NP], fMultiSelect = 1, fLEPane = 0)
else:
base.direct.select(obj[OG.OBJ_NP], fMultiSelect = 0, fLEPane = 0)
def onBeginDrag(self, event):
item = event.GetItem()
if item != self.tree.GetRootItem(): # prevent dragging root item
text = self.tree.GetItemText(item)
print("Starting SceneGraphUI drag'n'drop with %s..." % repr(text))
tdo = wx.TextDataObject(text)
tds = wx.DropSource(self.tree)
tds.SetData(tdo)
tds.DoDragDrop(True)
def onShowPopup(self, event):
pos = event.GetPosition()
pos = self.ScreenToClient(pos)
item, flags = self.tree.HitTest(pos)
if not item.IsOk():
return
self.currItem = item
itemId = self.tree.GetItemPyData(item)
if not itemId:
return
self.currObj = self.editor.objectMgr.findObjectById(itemId);
if self.currObj:
self.PopupMenu(self.menu, pos)
def populateMenu(self):
menuitem = self.menu.Append(-1, 'Expand All')
self.Bind(wx.EVT_MENU, self.onExpandAllChildren, menuitem)
menuitem = self.menu.Append(-1, 'Collapse All')
self.Bind(wx.EVT_MENU, self.onCollapseAllChildren, menuitem)
menuitem = self.menu.Append(-1, 'Delete')
self.Bind(wx.EVT_MENU, self.onDelete, menuitem)
menuitem = self.menu.Append(-1, 'Rename')
self.Bind(wx.EVT_MENU, self.onRename, menuitem)
self.populateExtraMenu()
def populateExtraMenu(self):
# You should implement this in subclass
raise NotImplementedError('populateExtraMenu() must be implemented in subclass')
def onCollapseAllChildren(self, evt=None):
if self.currItem:
self.tree.CollapseAllChildren(self.currItem)
def onExpandAllChildren(self, evt=None):
if self.currItem:
self.tree.ExpandAllChildren(self.currItem)
def onDelete(self, evt=None):
if self.currObj is None:
return
uid = self.currObj[OG.OBJ_UID]
action = ActionDeleteObjById(self.editor, uid)
self.editor.actionMgr.push(action)
action()
self.delete(uid)
def onRename(self, evt=None):
if self.currObj is None:
return
self.editor.ui.bindKeyEvents(False)
dialog = wx.TextEntryDialog(None, '', 'Input new name', defaultValue=self.currObj[OG.OBJ_NP].getName())
if dialog.ShowModal() == wx.ID_OK:
newName = dialog.GetValue()
dialog.Destroy()
self.editor.ui.bindKeyEvents(True)
self.currObj[OG.OBJ_NP].setName(newName)
self.changeLabel(self.currObj[OG.OBJ_UID], newName)
| bsd-3-clause |
PythonScientists/Shape | env/lib/python3.5/site-packages/alembic/testing/env.py | 16 | 7836 | #!coding: utf-8
import os
import shutil
import textwrap
from ..util.compat import u
from ..script import Script, ScriptDirectory
from .. import util
from . import engines
from . import provision
def _get_staging_directory():
if provision.FOLLOWER_IDENT:
return "scratch_%s" % provision.FOLLOWER_IDENT
else:
return 'scratch'
def staging_env(create=True, template="generic", sourceless=False):
from alembic import command, script
cfg = _testing_config()
if create:
path = os.path.join(_get_staging_directory(), 'scripts')
if os.path.exists(path):
shutil.rmtree(path)
command.init(cfg, path, template=template)
if sourceless:
try:
# do an import so that a .pyc/.pyo is generated.
util.load_python_file(path, 'env.py')
except AttributeError:
# we don't have the migration context set up yet
# so running the .env py throws this exception.
# theoretically we could be using py_compiler here to
# generate .pyc/.pyo without importing but not really
# worth it.
pass
make_sourceless(os.path.join(path, "env.py"))
sc = script.ScriptDirectory.from_config(cfg)
return sc
def clear_staging_env():
shutil.rmtree(_get_staging_directory(), True)
def script_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
path = os.path.join(dir_, "script.py.mako")
with open(path, 'w') as f:
f.write(txt)
def env_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
txt = """
from alembic import context
config = context.config
""" + txt
path = os.path.join(dir_, "env.py")
pyc_path = util.pyc_file_from_path(path)
if os.access(pyc_path, os.F_OK):
os.unlink(pyc_path)
with open(path, 'w') as f:
f.write(txt)
def _sqlite_file_db(tempname="foo.db"):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/%s" % (dir_, tempname)
return engines.testing_engine(url=url)
def _sqlite_testing_config(sourceless=False):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, url, "true" if sourceless else "false"))
def _multi_dir_testing_config(sourceless=False, extra_version_location=''):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/ %s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, url, "true" if sourceless else "false",
extra_version_location))
def _no_sql_testing_config(dialect="postgresql", directives=""):
"""use a postgresql url with no host so that
connections guaranteed to fail"""
dir_ = os.path.join(_get_staging_directory(), 'scripts')
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s://
%s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, dialect, directives))
def _write_config_file(text):
cfg = _testing_config()
with open(cfg.config_file_name, 'w') as f:
f.write(text)
return cfg
def _testing_config():
from alembic.config import Config
if not os.access(_get_staging_directory(), os.F_OK):
os.mkdir(_get_staging_directory())
return Config(os.path.join(_get_staging_directory(), 'test_alembic.ini'))
def write_script(
scriptdir, rev_id, content, encoding='ascii', sourceless=False):
old = scriptdir.revision_map.get_revision(rev_id)
path = old.path
content = textwrap.dedent(content)
if encoding:
content = content.encode(encoding)
with open(path, 'wb') as fp:
fp.write(content)
pyc_path = util.pyc_file_from_path(path)
if os.access(pyc_path, os.F_OK):
os.unlink(pyc_path)
script = Script._from_path(scriptdir, path)
old = scriptdir.revision_map.get_revision(script.revision)
if old.down_revision != script.down_revision:
raise Exception("Can't change down_revision "
"on a refresh operation.")
scriptdir.revision_map.add_revision(script, _replace=True)
if sourceless:
make_sourceless(path)
def make_sourceless(path):
# note that if -O is set, you'd see pyo files here,
# the pyc util function looks at sys.flags.optimize to handle this
pyc_path = util.pyc_file_from_path(path)
assert os.access(pyc_path, os.F_OK)
# look for a non-pep3147 path here.
# if not present, need to copy from __pycache__
simple_pyc_path = util.simple_pyc_file_from_path(path)
if not os.access(simple_pyc_path, os.F_OK):
shutil.copyfile(pyc_path, simple_pyc_path)
os.unlink(path)
def three_rev_fixture(cfg):
a = util.rev_id()
b = util.rev_id()
c = util.rev_id()
script = ScriptDirectory.from_config(cfg)
script.generate_revision(a, "revision a", refresh=True)
write_script(script, a, """\
"Rev A"
revision = '%s'
down_revision = None
from alembic import op
def upgrade():
op.execute("CREATE STEP 1")
def downgrade():
op.execute("DROP STEP 1")
""" % a)
script.generate_revision(b, "revision b", refresh=True)
write_script(script, b, u("""# coding: utf-8
"Rev B, méil"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 2")
def downgrade():
op.execute("DROP STEP 2")
""") % (b, a), encoding="utf-8")
script.generate_revision(c, "revision c", refresh=True)
write_script(script, c, """\
"Rev C"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 3")
def downgrade():
op.execute("DROP STEP 3")
""" % (c, b))
return a, b, c
def _multidb_testing_config(engines):
"""alembic.ini fixture to work exactly with the 'multidb' template"""
dir_ = os.path.join(_get_staging_directory(), 'scripts')
databases = ", ".join(
engines.keys()
)
engines = "\n\n".join(
"[%s]\n"
"sqlalchemy.url = %s" % (key, value.url)
for key, value in engines.items()
)
return _write_config_file("""
[alembic]
script_location = %s
sourceless = false
databases = %s
%s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, databases, engines)
)
| apache-2.0 |
BassantMorsi/finderApp | lib/python2.7/site-packages/django/contrib/auth/views.py | 16 | 23150 | import functools
import warnings
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango21Warning,
)
from django.utils.encoding import force_text
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.six.moves.urllib.parse import urlparse, urlunparse
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
UserModel = get_user_model()
def deprecate_current_app(func):
"""
Handle deprecation of the current_app parameter of the views.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
if 'current_app' in kwargs:
warnings.warn(
"Passing `current_app` as a keyword argument is deprecated. "
"Instead the caller of `{0}` should set "
"`request.current_app`.".format(func.__name__),
RemovedInDjango20Warning
)
current_app = kwargs.pop('current_app')
request = kwargs.get('request', None)
if request and current_app is not None:
request.current_app = current_app
return func(*args, **kwargs)
return inner
class SuccessURLAllowedHostsMixin(object):
success_url_allowed_hosts = set()
def get_success_url_allowed_hosts(self):
allowed_hosts = {self.request.get_host()}
allowed_hosts.update(self.success_url_allowed_hosts)
return allowed_hosts
class LoginView(SuccessURLAllowedHostsMixin, FormView):
"""
Displays the login form and handles the login action.
"""
form_class = AuthenticationForm
authentication_form = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/login.html'
redirect_authenticated_user = False
extra_context = None
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.redirect_authenticated_user and self.request.user.is_authenticated:
redirect_to = self.get_success_url()
if redirect_to == self.request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
return super(LoginView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
"""Ensure the user-originating redirection URL is safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
if not url_is_safe:
return resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
def get_form_class(self):
return self.authentication_form or self.form_class
def get_form_kwargs(self):
kwargs = super(LoginView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
"""Security check complete. Log the user in."""
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
self.redirect_field_name: self.get_success_url(),
'site': current_site,
'site_name': current_site.name,
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
@deprecate_current_app
def login(request, *args, **kwargs):
warnings.warn(
'The login() view is superseded by the class-based LoginView().',
RemovedInDjango21Warning, stacklevel=2
)
return LoginView.as_view(**kwargs)(request, *args, **kwargs)
class LogoutView(SuccessURLAllowedHostsMixin, TemplateView):
"""
Logs out the user and displays 'You are logged out' message.
"""
next_page = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/logged_out.html'
extra_context = None
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
auth_logout(request)
next_page = self.get_next_page()
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
return super(LogoutView, self).dispatch(request, *args, **kwargs)
def get_next_page(self):
if self.next_page is not None:
next_page = resolve_url(self.next_page)
elif settings.LOGOUT_REDIRECT_URL:
next_page = resolve_url(settings.LOGOUT_REDIRECT_URL)
else:
next_page = self.next_page
if (self.redirect_field_name in self.request.POST or
self.redirect_field_name in self.request.GET):
next_page = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name)
)
url_is_safe = is_safe_url(
url=next_page,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
# Security check -- Ensure the user-originating redirection URL is
# safe.
if not url_is_safe:
next_page = self.request.path
return next_page
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out'),
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
@deprecate_current_app
def logout(request, *args, **kwargs):
warnings.warn(
'The logout() view is superseded by the class-based LogoutView().',
RemovedInDjango21Warning, stacklevel=2
)
return LogoutView.as_view(**kwargs)(request, *args, **kwargs)
_sentinel = object()
@deprecate_current_app
def logout_then_login(request, login_url=None, extra_context=_sentinel):
"""
Logs out the user if they are logged in. Then redirects to the log-in page.
"""
if extra_context is not _sentinel:
warnings.warn(
"The unused `extra_context` parameter to `logout_then_login` "
"is deprecated.", RemovedInDjango21Warning
)
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return LogoutView.as_view(next_page=login_url)(request)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@deprecate_current_app
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
warnings.warn("The password_reset() view is superseded by the "
"class-based PasswordResetView().",
RemovedInDjango21Warning, stacklevel=2)
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_done(request,
template_name='registration/password_reset_done.html',
extra_context=None):
warnings.warn("The password_reset_done() view is superseded by the "
"class-based PasswordResetDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password reset sent'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
@deprecate_current_app
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
warnings.warn("The password_reset_confirm() view is superseded by the "
"class-based PasswordResetConfirmView().",
RemovedInDjango21Warning, stacklevel=2)
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
extra_context=None):
warnings.warn("The password_reset_complete() view is superseded by the "
"class-based PasswordResetCompleteView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Password reset complete'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Class-based password reset views
# - PasswordResetView sends the mail
# - PasswordResetDoneView shows a success message for the above
# - PasswordResetConfirmView checks the link the user clicked and
# prompts for a new password
# - PasswordResetCompleteView shows a success message for the above
class PasswordContextMixin(object):
extra_context = None
def get_context_data(self, **kwargs):
context = super(PasswordContextMixin, self).get_context_data(**kwargs)
context['title'] = self.title
if self.extra_context is not None:
context.update(self.extra_context)
return context
class PasswordResetView(PasswordContextMixin, FormView):
email_template_name = 'registration/password_reset_email.html'
extra_email_context = None
form_class = PasswordResetForm
from_email = None
html_email_template_name = None
subject_template_name = 'registration/password_reset_subject.txt'
success_url = reverse_lazy('password_reset_done')
template_name = 'registration/password_reset_form.html'
title = _('Password reset')
token_generator = default_token_generator
@method_decorator(csrf_protect)
def dispatch(self, *args, **kwargs):
return super(PasswordResetView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
opts = {
'use_https': self.request.is_secure(),
'token_generator': self.token_generator,
'from_email': self.from_email,
'email_template_name': self.email_template_name,
'subject_template_name': self.subject_template_name,
'request': self.request,
'html_email_template_name': self.html_email_template_name,
'extra_email_context': self.extra_email_context,
}
form.save(**opts)
return super(PasswordResetView, self).form_valid(form)
INTERNAL_RESET_URL_TOKEN = 'set-password'
INTERNAL_RESET_SESSION_TOKEN = '_password_reset_token'
class PasswordResetDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_done.html'
title = _('Password reset sent')
class PasswordResetConfirmView(PasswordContextMixin, FormView):
form_class = SetPasswordForm
post_reset_login = False
post_reset_login_backend = None
success_url = reverse_lazy('password_reset_complete')
template_name = 'registration/password_reset_confirm.html'
title = _('Enter new password')
token_generator = default_token_generator
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
assert 'uidb64' in kwargs and 'token' in kwargs
self.validlink = False
self.user = self.get_user(kwargs['uidb64'])
if self.user is not None:
token = kwargs['token']
if token == INTERNAL_RESET_URL_TOKEN:
session_token = self.request.session.get(INTERNAL_RESET_SESSION_TOKEN)
if self.token_generator.check_token(self.user, session_token):
# If the token is valid, display the password reset form.
self.validlink = True
return super(PasswordResetConfirmView, self).dispatch(*args, **kwargs)
else:
if self.token_generator.check_token(self.user, token):
# Store the token in the session and redirect to the
# password reset form at a URL without the token. That
# avoids the possibility of leaking the token in the
# HTTP Referer header.
self.request.session[INTERNAL_RESET_SESSION_TOKEN] = token
redirect_url = self.request.path.replace(token, INTERNAL_RESET_URL_TOKEN)
return HttpResponseRedirect(redirect_url)
# Display the "Password reset unsuccessful" page.
return self.render_to_response(self.get_context_data())
def get_user(self, uidb64):
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
return user
def get_form_kwargs(self):
kwargs = super(PasswordResetConfirmView, self).get_form_kwargs()
kwargs['user'] = self.user
return kwargs
def form_valid(self, form):
user = form.save()
del self.request.session[INTERNAL_RESET_SESSION_TOKEN]
if self.post_reset_login:
auth_login(self.request, user, self.post_reset_login_backend)
return super(PasswordResetConfirmView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(PasswordResetConfirmView, self).get_context_data(**kwargs)
if self.validlink:
context['validlink'] = True
else:
context.update({
'form': None,
'title': _('Password reset unsuccessful'),
'validlink': False,
})
return context
class PasswordResetCompleteView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_complete.html'
title = _('Password reset complete')
def get_context_data(self, **kwargs):
context = super(PasswordResetCompleteView, self).get_context_data(**kwargs)
context['login_url'] = resolve_url(settings.LOGIN_URL)
return context
@sensitive_post_parameters()
@csrf_protect
@login_required
@deprecate_current_app
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
extra_context=None):
warnings.warn("The password_change() view is superseded by the "
"class-based PasswordChangeView().",
RemovedInDjango21Warning, stacklevel=2)
if post_change_redirect is None:
post_change_redirect = reverse('password_change_done')
else:
post_change_redirect = resolve_url(post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@login_required
@deprecate_current_app
def password_change_done(request,
template_name='registration/password_change_done.html',
extra_context=None):
warnings.warn("The password_change_done() view is superseded by the "
"class-based PasswordChangeDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password change successful'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
class PasswordChangeView(PasswordContextMixin, FormView):
form_class = PasswordChangeForm
success_url = reverse_lazy('password_change_done')
template_name = 'registration/password_change_form.html'
title = _('Password change')
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PasswordChangeView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(self.request, form.user)
return super(PasswordChangeView, self).form_valid(form)
class PasswordChangeDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_change_done.html'
title = _('Password change successful')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PasswordChangeDoneView, self).dispatch(*args, **kwargs)
| mit |
edfungus/Crouton-Python-Example | env/lib/python2.7/site-packages/setuptools/command/bdist_egg.py | 306 | 17184 | """setuptools.command.bdist_egg
Build .egg distributions"""
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import marshal
import textwrap
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.compat import basestring
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base, name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, basestring):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
| mit |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/protobuf/python/mox.py | 603 | 38237 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
| bsd-3-clause |
hideshis/scripts_for_research | FOSE2016/file_class_method_link/old/java_class_method_linker.py | 2 | 4103 | # -*- coding: utf-8 -*-
import os
import subprocess
import sys
import csv
import re
def class_info_getter(f, directory):
class_dict = {}
class_dict['name'] = f
class_dict['path'] = directory
class_dict['full path'] = class_dict['path'] + '/' + class_dict['name']
return class_dict
def java_dict_getter(class_dict, info):
java_dict = {}
java_dict['name'] = info.split('"')[1]
java_dict['path'] = class_dict['path'].replace('/target/classes/', '/src/main/java/')
java_dict['full path'] = java_dict['path'] + '/' + java_dict['name']
if os.path.exists(java_dict['full path']) is False:
print '!?!?!?wwww!?wwwwww!?!?!?wwwww!?!?!?!?!?!?!?www'
sys.exit()
return java_dict
def javap_exe(class_dict):
cmd = 'javap -private -l ' + class_dict['full path']
result = subprocess.check_output(cmd, shell=True)
result = result.replace('\r', '')
return result.split('\n')
def class_detail_info_getter(class_dict):
cmd = 'javap ' + class_dict['full path']
result = subprocess.check_output(cmd, shell=True)
result = result.replace('\r', '')
return result.split('\n')[:-1]
def method_list_getter(target):
# extract line numbers which declare method.
#pattern = re.compile('^ \w.+\);$')
#method_index_list = [n for n,l in enumerate(target) if pattern.match(l)]
method_candidate_index_list = [n for n,l in enumerate(target) if (('(' in l) and (')' in l) and l.endswith(';'))]
method_index_list = [n for n in method_candidate_index_list if ('LineNumberTable:' in target[n+1])]
method_list = []
for x in method_index_list:
method_dict = {}
arg_part = target[x][target[x].index('('):target[x].index(')')+1]
others = target[x][:target[x].index('(')]
method_name = others.split(' ')[-1]
print target[x]
print arg_part
print method_name
print '\n'
method_dict['name'] = method_name + arg_part
#print method_dict['name']
flag = 0
for y in range(x+1, len(target) + 1):
if (flag == 0) and (not target[y].endswith('LineNumberTable:')):
method_dict['start'] = 'none'
method_dict['end'] = 'none'
break
elif flag == 0:
flag = 1
continue
if target[y].startswith(' line '):
if not method_dict.has_key('start'):
method_dict['start'] = int(target[y].split(' ')[-2][:-1]) - 1
method_dict['end'] = int(target[y].split(' ')[-2][:-1]) + 1
else:
method_dict['end'] = int(target[y].split(' ')[-2][:-1]) + 1
else:
"""
if not method_dict.has_key('end'):
method_dict['end'] = method_dict['start'] + 1
"""
break
method_list.append(method_dict)
return method_list
fp = open('java_class_method_link.csv', 'w')
csvWriter = csv.writer(fp, lineterminator='\n')
initial_path = '/Users/hideshi-s/Desktop/ehime/httpclient/httpclient/target/classes'
#initial_path = '/Users/hideshi-s/Desktop/ehime/javaTest'
dir_counter = 0
for (directory, _, files) in os.walk(initial_path):
num_file = len(files)
file_counter = 0
for f in files:
if f.endswith('.class'):
# class_dict = {'name':f, 'path':directory, 'full path':directory+'/'+f}
class_dict = class_info_getter(f, directory)
# execute 'javap -l -private class_dict['full path']'
javap_result_list = javap_exe(class_dict)
java_dict = java_dict_getter(class_dict, javap_result_list[0])
method_list = method_list_getter(javap_result_list)
for method_dict in method_list:
link_info = [java_dict['full path'], class_dict['full path'], method_dict['name'], method_dict['start'], method_dict['end']]
csvWriter.writerow(link_info)
file_counter += 1
print dir_counter, file_counter, num_file
dir_counter += 1
fp.close()
| mit |
chrisdamba/mining | manage.py | 4 | 3181 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from gevent import monkey
monkey.patch_all()
import sys
import click
from bottle import static_file, Bottle, run, view
from bottle import TEMPLATE_PATH as T
from bottle.ext.websocket import GeventWebSocketServer
from bottle.ext.auth.decorator import login
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
from beaker.middleware import SessionMiddleware
from mining.controllers.api import api_app
from mining.controllers.data import data_app
from mining.utils import conf
from mining.auth import auth
from mining.settings import TEMPLATE_PATH, STATIC_PATH
from mining.celeryc import celery_app
from mining.bin.scheduler import scheduler_app
from mining.bin.demo.build_admin import build
reload(sys)
sys.setdefaultencoding('utf-8')
T.insert(0, TEMPLATE_PATH)
session_opts = {
'session.type': 'file',
'session.data_dir': '/tmp/openmining.data',
'session.lock_dir': '/tmp/openmining.lock',
'session.cookie_expires': 50000,
'session.auto': True
}
app = SessionMiddleware(Bottle(), session_opts)
app.wrap_app.mount('/api', api_app)
app.wrap_app.mount('/data', data_app)
app.wrap_app.install(auth)
@app.wrap_app.route('/assets/<path:path>', name='assets')
def static(path):
yield static_file(path, root=STATIC_PATH)
@app.wrap_app.route('/')
@login()
@view('index.html')
def index():
return {'get_url': app.wrap_app.get_url,
'protocol': conf('openmining')['protocol'],
'lang': conf('openmining')['lang']}
@app.wrap_app.route('/login')
@view('login.html')
def login():
return {'get_url': app.wrap_app.get_url,
'lang': conf('openmining')['lang']}
@click.group()
def cmds():
pass
@cmds.command()
@click.option('--port', type=int, help=u'Set application server port!')
@click.option('--ip', type=str, help=u'Set application server ip!')
@click.option('--debug', default=False,
help=u'Set application server debug!')
def runserver(port, ip, debug):
if debug is None:
server = WSGIServer((ip, port), app, handler_class=WebSocketHandler)
server.serve_forever()
click.echo(u'OpenMining start server at: {}:{}'.format(ip, port))
run(app=app, host=ip, port=port, debug=debug,
reloader=True, server=GeventWebSocketServer)
@cmds.command()
@click.option('--concurrency', type=int, default=4,
help="""Number of child processes processing the queue. The
default is the number of CPUs available on your system.""")
def celery(concurrency):
click.echo(u'OpenMining start tasks')
args = ["celery", "worker", "--concurrency={}".format(concurrency)]
celery_app.start(args)
@cmds.command()
def scheduler():
click.echo(u'OpenMining start scheduler')
scheduler_app()
@cmds.command()
@click.option('--level', type=int, default=0,
help="What level of data volume?")
def build_demo(level):
click.echo(u'OpenMining load demo system')
build(level)
if __name__ == "__main__":
default_map = {"runserver": conf('openmining')}
default_map["runserver"]["debug"] = False
cmds(default_map=default_map)
| mit |
gaddman/ansible | lib/ansible/modules/network/panos/panos_admin.py | 27 | 5636 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_admin
short_description: Add or modify PAN-OS user accounts password.
description:
- PanOS module that allows changes to the user account passwords by doing
API calls to the Firewall using pan-api as the protocol.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
admin_username:
description:
- username for admin user
default: "admin"
admin_password:
description:
- password for admin user
required: true
role:
description:
- role for admin user
commit:
description:
- commit if changed
type: bool
default: 'yes'
extends_documentation_fragment: panos
'''
EXAMPLES = '''
# Set the password of user admin to "badpassword"
# Doesn't commit the candidate config
- name: set admin password
panos_admin:
ip_address: "192.168.1.1"
password: "admin"
admin_username: admin
admin_password: "badpassword"
commit: False
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "okey dokey"
'''
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
_ADMIN_XPATH = "/config/mgt-config/users/entry[@name='%s']"
def admin_exists(xapi, admin_username):
xapi.get(_ADMIN_XPATH % admin_username)
e = xapi.element_root.find('.//entry')
return e
def admin_set(xapi, module, admin_username, admin_password, role):
if admin_password is not None:
xapi.op(cmd='request password-hash password "%s"' % admin_password,
cmd_xml=True)
r = xapi.element_root
phash = r.find('.//phash').text
if role is not None:
rbval = "yes"
if role != "superuser" and role != 'superreader':
rbval = ""
ea = admin_exists(xapi, admin_username)
if ea is not None:
# user exists
changed = False
if role is not None:
rb = ea.find('.//role-based')
if rb is not None:
if rb[0].tag != role:
changed = True
xpath = _ADMIN_XPATH % admin_username
xpath += '/permissions/role-based/%s' % rb[0].tag
xapi.delete(xpath=xpath)
xpath = _ADMIN_XPATH % admin_username
xpath += '/permissions/role-based'
xapi.set(xpath=xpath,
element='<%s>%s</%s>' % (role, rbval, role))
if admin_password is not None:
xapi.edit(xpath=_ADMIN_XPATH % admin_username + '/phash',
element='<phash>%s</phash>' % phash)
changed = True
return changed
# setup the non encrypted part of the monitor
exml = []
exml.append('<phash>%s</phash>' % phash)
exml.append('<permissions><role-based><%s>%s</%s>'
'</role-based></permissions>' % (role, rbval, role))
exml = ''.join(exml)
# module.fail_json(msg=exml)
xapi.set(xpath=_ADMIN_XPATH % admin_username, element=exml)
return True
def main():
argument_spec = dict(
ip_address=dict(),
password=dict(no_log=True),
username=dict(default='admin'),
admin_username=dict(default='admin'),
admin_password=dict(no_log=True),
role=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
password = module.params["password"]
if not password:
module.fail_json(msg="password is required")
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
admin_username = module.params['admin_username']
if admin_username is None:
module.fail_json(msg="admin_username is required")
admin_password = module.params['admin_password']
role = module.params['role']
commit = module.params['commit']
changed = admin_set(xapi, module, admin_username, admin_password, role)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/pip/_vendor/requests/structures.py | 615 | 3012 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
from .compat import OrderedDict
class CaseInsensitiveDict(collections.MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| bsd-2-clause |
cstavr/synnefo | snf-deploy/snfdeploy/context.py | 2 | 4910 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import datetime
import ConfigParser
from snfdeploy import constants
from snfdeploy import config
from snfdeploy import status
context = sys.modules[__name__]
class Context(object):
def __repr__(self):
ret = "[%s]" % datetime.datetime.now().strftime("%H:%M:%S")
ret += " [%s %s]" % (self.node_info.ip, self.node_info.name)
ret += " [%s %s %s %s]" % \
(self.node, self.role, self.cluster, self.setup)
return ret
def __init__(self, node=None, role=None, cluster=None, setup=None):
if not node:
node = context.node
if not role:
role = context.role
if not setup:
setup = context.setup
if not cluster:
cluster = context.cluster
self.node = node
self.role = role
self.cluster = cluster
self.setup = setup
self.update_info()
def update(self, node=None, role=None, cluster=None, setup=None):
if node:
context.node = self.node = node
if role:
context.role = self.role = role
if cluster:
context.cluster = self.cluster = cluster
if setup:
context.setup = self.setup = setup
self.update_info()
def update_info(self):
self.ns = self._get(constants.NS)
self.nfs = self._get(constants.NFS)
self.mq = self._get(constants.MQ)
self.db = self._get(constants.DB)
self.astakos = self._get(constants.ASTAKOS)
self.cyclades = self._get(constants.CYCLADES)
self.pithos = self._get(constants.PITHOS)
self.stats = self._get(constants.STATS)
self.cms = self._get(constants.CMS)
self.router = self._get(constants.ROUTER)
self.client = self._get(constants.CLIENT)
def _get(self, role):
try:
return config.get_single_node_role_info(self.setup, role)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
return config.get_node_info(constants.DUMMY_NODE)
@property
def node_info(self):
return config.get_info(node=self.node)
@property
def cluster_info(self):
return config.get_info(cluster=self.cluster)
@property
def clusters(self):
return config.get(self.setup, constants.CLUSTERS)
@property
def masters(self):
return config.get(self.cluster, constants.MASTER)
@property
def master(self):
info = config.get_single_node_role_info(self.cluster, constants.MASTER)
info.alias = None
return info
@property
def vmcs(self):
return config.get(self.cluster, constants.VMC)
@property
def cluster_nodes(self):
return list(set(self.masters + self.vmcs))
@property
def all_nodes(self):
return config.find_all_nodes(self.setup)
@property
def all_ips(self):
l = lambda x: config.get_node_info(x).ip
return [l(n) for n in self.all_nodes]
def get(self, role):
try:
return config.get(self.setup, role)
except:
return config.get(self.cluster, role)
def backup():
context.node_backup = context.node
context.role_backup = context.role
context.cluster_backup = context.cluster
context.setup_backup = context.setup
def restore():
context.node = context.node_backup
context.role = context.role_backup
context.cluster = context.cluster_backup
context.setup = context.setup_backup
def get_passwd(target):
if not config.passgen:
return getattr(config, target)
return status.get_passwd(context.setup, target)
def update_passwords():
if config.passgen:
for p in constants.ALL_PASSWRD_AND_SECRETS:
passwd = status.get_passwd(context.setup, p)
setattr(config, p, passwd)
else:
print "Using passwords found in configuration files"
def init(args):
context.node = args.node
context.role = args.role
context.cluster = args.cluster
context.setup = args.setup
context.method = args.method
context.component = args.component
context.target_nodes = args.target_nodes
context.cmd = args.cmd
update_passwords()
| gpl-3.0 |
faux123/pantech_vega_racer_2_kernel | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
sjholden/traveller-universe | travelleruniverse/world/world.py | 1 | 1259 | '''
Created on May 30, 2017
@author: Sam Holden <sholden@holden.id.au>
'''
class World(object):
'''
classdocs
'''
def __init__(self, port, size, atmo, hydro, pop, govt, law, tech, bases=None, extra=None):
'''
Constructor
'''
self._port = port
self._size = size
self._atmo = atmo
self._hydro = hydro
self._pop = pop
self._govt = govt
self._law = law
self._tech = tech
if bases:
self._bases = bases
else:
self._bases = []
if extra:
self._extra = extra
else:
self._extra = {}
@property
def port(self): return self._port
@property
def size(self): return self._size
@property
def atmo(self): return self._atmo
@property
def hydro(self): return self._hydro
@property
def pop(self): return self._pop
@property
def govt(self): return self._govt
@property
def law(self): return self._law
@property
def tech(self): return self._tech
@property
def bases(self): return self._bases
@property
def extra(self): return self._extra
| gpl-3.0 |
cgsheeh/moz_intern_presentation | node_modules/node-gyp/gyp/gyptest.py | 1752 | 8019 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| mit |
SickGear/SickGear | lib/dateutil/rrule.py | 2 | 68293 | # -*- coding: utf-8 -*-
"""
The rrule module offers a small, complete, and very fast, implementation of
the recurrence rules documented in the
`iCalendar RFC <https://tools.ietf.org/html/rfc5545>`_,
including support for caching of results.
"""
import calendar
import datetime
import heapq
import itertools
import re
import sys
from functools import wraps
# For warning about deprecation of until and count
from warnings import warn
from six import advance_iterator, integer_types
from six.moves import _thread, range
from ._common import weekday as weekdaybase
try:
from math import gcd
except ImportError:
from fractions import gcd
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(weekdaybase):
"""
This version of weekday does not allow n = 0.
"""
def __init__(self, wkday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n==0")
super(weekday, self).__init__(wkday, n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
def _invalidates_cache(f):
"""
Decorator for rruleset methods which may invalidate the
cached length.
"""
@wraps(f)
def inner_func(self, *args, **kwargs):
rv = f(self, *args, **kwargs)
self._invalidate_cache()
return rv
return inner_func
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._invalidate_cache()
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _invalidate_cache(self):
if self._cache is not None:
self._cache = []
self._cache_complete = False
self._cache_gen = self._iter()
if self._cache_lock.locked():
self._cache_lock.release()
self._len = None
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penalty.
def count(self):
""" Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
comp = lambda dc, dtc: dc >= dtc
else:
comp = lambda dc, dtc: dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
if count is not None:
n += 1
if n > count:
break
yield d
def between(self, after, before, inc=False, count=1):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
.. note::
Per RFC section 3.3.10, recurrence instances falling on invalid dates
and times are ignored rather than coerced:
Recurrence rules may generate recurrence instances with an invalid
date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
on a day where the local time is moved forward by an hour at 1:00
AM). Such recurrence instances MUST be ignored and MUST NOT be
counted as part of the recurrence set.
This can lead to possibly surprising behavior when, for example, the
start date occurs at the end of the month:
>>> from dateutil.rrule import rrule, MONTHLY
>>> from datetime import datetime
>>> start_date = datetime(2014, 12, 31)
>>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
... # doctest: +NORMALIZE_WHITESPACE
[datetime.datetime(2014, 12, 31, 0, 0),
datetime.datetime(2015, 1, 31, 0, 0),
datetime.datetime(2015, 3, 31, 0, 0),
datetime.datetime(2015, 5, 31, 0, 0)]
Additionally, it supports the following keyword arguments:
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
If given, this determines how many occurrences will be generated.
.. note::
As of version 2.5.0, the use of the keyword ``until`` in conjunction
with ``count`` is deprecated, to make sure ``dateutil`` is fully
compliant with `RFC-5545 Sec. 3.3.10 <https://tools.ietf.org/
html/rfc5545#section-3.3.10>`_. Therefore, ``until`` and ``count``
**must not** occur in the same call to ``rrule``.
:param until:
If given, this must be a datetime instance specifying the upper-bound
limit of the recurrence. The last recurrence in the rule is the greatest
datetime that is less than or equal to the value specified in the
``until`` parameter.
.. note::
As of version 2.5.0, the use of the keyword ``until`` in conjunction
with ``count`` is deprecated, to make sure ``dateutil`` is fully
compliant with `RFC-5545 Sec. 3.3.10 <https://tools.ietf.org/
html/rfc5545#section-3.3.10>`_. Therefore, ``until`` and ``count``
**must not** occur in the same call to ``rrule``.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
"""
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
if until and until.tzinfo:
dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0)
else:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
# Cache the original byxxx rules, if they are provided, as the _byxxx
# attributes do not necessarily map to the inputs, and this can be
# a problem in generating the strings. Only store things if they've
# been supplied (the string retrieval will just use .get())
self._original_rule = {}
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if self._dtstart and self._until:
if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None):
# According to RFC5545 Section 3.3.10:
# https://tools.ietf.org/html/rfc5545#section-3.3.10
#
# > If the "DTSTART" property is specified as a date with UTC
# > time or a date with local time and time zone reference,
# > then the UNTIL rule part MUST be specified as a date with
# > UTC time.
raise ValueError(
'RRULE UNTIL values must be specified in UTC when DTSTART '
'is timezone-aware'
)
if count is not None and until:
warn("Using both 'count' and 'until' is inconsistent with RFC 5545"
" and has been deprecated in dateutil. Future versions will "
"raise an error.", DeprecationWarning)
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if self._bysetpos:
self._original_rule['bysetpos'] = self._bysetpos
if (byweekno is None and byyearday is None and bymonthday is None and
byweekday is None and byeaster is None):
if freq == YEARLY:
if bymonth is None:
bymonth = dtstart.month
self._original_rule['bymonth'] = None
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == MONTHLY:
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == WEEKLY:
byweekday = dtstart.weekday()
self._original_rule['byweekday'] = None
# bymonth
if bymonth is None:
self._bymonth = None
else:
if isinstance(bymonth, integer_types):
bymonth = (bymonth,)
self._bymonth = tuple(sorted(set(bymonth)))
if 'bymonth' not in self._original_rule:
self._original_rule['bymonth'] = self._bymonth
# byyearday
if byyearday is None:
self._byyearday = None
else:
if isinstance(byyearday, integer_types):
byyearday = (byyearday,)
self._byyearday = tuple(sorted(set(byyearday)))
self._original_rule['byyearday'] = self._byyearday
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(sorted(byeaster))
self._original_rule['byeaster'] = self._byeaster
else:
self._byeaster = None
# bymonthday
if bymonthday is None:
self._bymonthday = ()
self._bynmonthday = ()
else:
if isinstance(bymonthday, integer_types):
bymonthday = (bymonthday,)
bymonthday = set(bymonthday) # Ensure it's unique
self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
# Storing positive numbers first, then negative numbers
if 'bymonthday' not in self._original_rule:
self._original_rule['bymonthday'] = tuple(
itertools.chain(self._bymonthday, self._bynmonthday))
# byweekno
if byweekno is None:
self._byweekno = None
else:
if isinstance(byweekno, integer_types):
byweekno = (byweekno,)
self._byweekno = tuple(sorted(set(byweekno)))
self._original_rule['byweekno'] = self._byweekno
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
else:
# If it's one of the valid non-sequence types, convert to a
# single-element sequence before the iterator that builds the
# byweekday set.
if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
byweekday = (byweekday,)
self._byweekday = set()
self._bynweekday = set()
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.add(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.add(wday.weekday)
else:
self._bynweekday.add((wday.weekday, wday.n))
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
if self._byweekday is not None:
self._byweekday = tuple(sorted(self._byweekday))
orig_byweekday = [weekday(x) for x in self._byweekday]
else:
orig_byweekday = ()
if self._bynweekday is not None:
self._bynweekday = tuple(sorted(self._bynweekday))
orig_bynweekday = [weekday(*x) for x in self._bynweekday]
else:
orig_bynweekday = ()
if 'byweekday' not in self._original_rule:
self._original_rule['byweekday'] = tuple(itertools.chain(
orig_byweekday, orig_bynweekday))
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = {dtstart.hour}
else:
self._byhour = None
else:
if isinstance(byhour, integer_types):
byhour = (byhour,)
if freq == HOURLY:
self._byhour = self.__construct_byset(start=dtstart.hour,
byxxx=byhour,
base=24)
else:
self._byhour = set(byhour)
self._byhour = tuple(sorted(self._byhour))
self._original_rule['byhour'] = self._byhour
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = {dtstart.minute}
else:
self._byminute = None
else:
if isinstance(byminute, integer_types):
byminute = (byminute,)
if freq == MINUTELY:
self._byminute = self.__construct_byset(start=dtstart.minute,
byxxx=byminute,
base=60)
else:
self._byminute = set(byminute)
self._byminute = tuple(sorted(self._byminute))
self._original_rule['byminute'] = self._byminute
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = ((dtstart.second,))
else:
self._bysecond = None
else:
if isinstance(bysecond, integer_types):
bysecond = (bysecond,)
self._bysecond = set(bysecond)
if freq == SECONDLY:
self._bysecond = self.__construct_byset(start=dtstart.second,
byxxx=bysecond,
base=60)
else:
self._bysecond = set(bysecond)
self._bysecond = tuple(sorted(self._bysecond))
self._original_rule['bysecond'] = self._bysecond
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def __str__(self):
"""
Output a string that would generate this RRULE if passed to rrulestr.
This is mostly compatible with RFC5545, except for the
dateutil-specific extension BYEASTER.
"""
output = []
h, m, s = [None] * 3
if self._dtstart:
output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
h, m, s = self._dtstart.timetuple()[3:6]
parts = ['FREQ=' + FREQNAMES[self._freq]]
if self._interval != 1:
parts.append('INTERVAL=' + str(self._interval))
if self._wkst:
parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
if self._count is not None:
parts.append('COUNT=' + str(self._count))
if self._until:
parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
if self._original_rule.get('byweekday') is not None:
# The str() method on weekday objects doesn't generate
# RFC5545-compliant strings, so we should modify that.
original_rule = dict(self._original_rule)
wday_strings = []
for wday in original_rule['byweekday']:
if wday.n:
wday_strings.append('{n:+d}{wday}'.format(
n=wday.n,
wday=repr(wday)[0:2]))
else:
wday_strings.append(repr(wday))
original_rule['byweekday'] = wday_strings
else:
original_rule = self._original_rule
partfmt = '{name}={vals}'
for name, key in [('BYSETPOS', 'bysetpos'),
('BYMONTH', 'bymonth'),
('BYMONTHDAY', 'bymonthday'),
('BYYEARDAY', 'byyearday'),
('BYWEEKNO', 'byweekno'),
('BYDAY', 'byweekday'),
('BYHOUR', 'byhour'),
('BYMINUTE', 'byminute'),
('BYSECOND', 'bysecond'),
('BYEASTER', 'byeaster')]:
value = original_rule.get(key)
if value:
parts.append(partfmt.format(name=name, vals=(','.join(str(v)
for v in value))))
output.append('RRULE:' + ';'.join(parts))
return '\n'.join(output)
def replace(self, **kwargs):
"""Return new rrule with same attributes except for those attributes given new
values by whichever keyword arguments are specified."""
new_kwargs = {"interval": self._interval,
"count": self._count,
"dtstart": self._dtstart,
"freq": self._freq,
"until": self._until,
"wkst": self._wkst,
"cache": False if self._cache is None else True }
new_kwargs.update(self._original_rule)
new_kwargs.update(kwargs)
return rrule(**new_kwargs)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY: ii.ydayset,
MONTHLY: ii.mdayset,
WEEKLY: ii.wdayset,
DAILY: ii.ddayset,
HOURLY: ii.ddayset,
MINUTELY: ii.ddayset,
SECONDLY: ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY: ii.htimeset,
MINUTELY: ii.mtimeset,
SECONDLY: ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday and
-ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
-ii.nextyearlen+i-ii.yearlen not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal + i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
if byhour:
ndays, hour = self.__mod_distance(value=hour,
byxxx=self._byhour,
base=24)
else:
ndays, hour = divmod(hour+interval, 24)
if ndays:
day += ndays
fixday = True
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
valid = False
rep_rate = (24*60)
for j in range(rep_rate // gcd(interval, rep_rate)):
if byminute:
nhours, minute = \
self.__mod_distance(value=minute,
byxxx=self._byminute,
base=60)
else:
nhours, minute = divmod(minute+interval, 60)
div, hour = divmod(hour+nhours, 24)
if div:
day += div
fixday = True
filtered = False
if not byhour or hour in byhour:
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval and ' +
'byhour resulting in empty rule.')
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399 - (hour * 3600 + minute * 60 + second))
// interval) * interval)
rep_rate = (24 * 3600)
valid = False
for j in range(0, rep_rate // gcd(interval, rep_rate)):
if bysecond:
nminutes, second = \
self.__mod_distance(value=second,
byxxx=self._bysecond,
base=60)
else:
nminutes, second = divmod(second+interval, 60)
div, minute = divmod(minute+nminutes, 60)
if div:
hour += div
div, hour = divmod(hour, 24)
if div:
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval, ' +
'byhour and byminute resulting in empty' +
' rule.')
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
def __construct_byset(self, start, byxxx, base):
"""
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
"""
cset = set()
# Support a single byxxx value.
if isinstance(byxxx, integer_types):
byxxx = (byxxx, )
for num in byxxx:
i_gcd = gcd(self._interval, base)
# Use divmod rather than % because we need to wrap negative nums.
if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
cset.add(num)
if len(cset) == 0:
raise ValueError("Invalid rrule byxxx generates an empty set.")
return cset
def __mod_distance(self, value, byxxx, base):
"""
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
"""
accumulator = 0
for ii in range(1, base + 1):
# Using divmod() over % to account for negative intervals
div, value = divmod(value + self._interval, base)
accumulator += div
if value in byxxx:
return (accumulator, value)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365 + calendar.isleap(year)
self.nextyearlen = 365 + calendar.isleap(year + 1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst) % 7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen +
(lyearweekday-rr._wkst) % 7) % 7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday) % 7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None] * self.yearlen
i = datetime.date(year, month, day).toordinal() - self.yearordinal
dset[i] = i
return dset, i, i + 1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
""" The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. """
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
if self.genlist[0] is self:
heapq.heappop(self.genlist)
else:
self.genlist.remove(self)
heapq.heapify(self.genlist)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
@_invalidates_cache
def rrule(self, rrule):
""" Include the given :py:class:`rrule` instance in the recurrence set
generation. """
self._rrule.append(rrule)
@_invalidates_cache
def rdate(self, rdate):
""" Include the given :py:class:`datetime` instance in the recurrence
set generation. """
self._rdate.append(rdate)
@_invalidates_cache
def exrule(self, exrule):
""" Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
"""
self._exrule.append(exrule)
@_invalidates_cache
def exdate(self, exdate):
""" Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. """
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
lastdt = None
total = 0
heapq.heapify(rlist)
heapq.heapify(exlist)
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exitem = exlist[0]
advance_iterator(exitem)
if exlist and exlist[0] is exitem:
heapq.heapreplace(exlist, exitem)
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
if rlist and rlist[0] is ritem:
heapq.heapreplace(rlist, ritem)
self._len = total
class _rrulestr(object):
""" Parses a string representation of a recurrence rule or set of
recurrence rules.
:param s:
Required, a string defining one or more recurrence rules.
:param dtstart:
If given, used as the default recurrence start if not specified in the
rule string.
:param cache:
If set ``True`` caching of results will be enabled, improving
performance of multiple queries considerably.
:param unfold:
If set ``True`` indicates that a rule string is split over more
than one line and should be joined before processing.
:param forceset:
If set ``True`` forces a :class:`dateutil.rrule.rruleset` to
be returned.
:param compatible:
If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime.datetime` object is returned.
:param tzids:
If given, a callable or mapping used to retrieve a
:class:`datetime.tzinfo` from a string representation.
Defaults to :func:`dateutil.tz.gettz`.
:param tzinfos:
Additional time zone names / aliases which may be present in a string
representation. See :func:`dateutil.parser.parse` for more
information.
:return:
Returns a :class:`dateutil.rrule.rruleset` or
:class:`dateutil.rrule.rrule`
"""
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
"""
Two ways to specify this: +1MO or MO(+1)
"""
l = []
for wday in value.split(','):
if '(' in wday:
# If it's of the form TH(+1), etc.
splt = wday.split('(')
w = splt[0]
n = int(splt[1][:-1])
elif len(wday):
# If it's of the form +1MO
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
else:
raise ValueError("Invalid (empty) BYDAY specification.")
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_date_value(self, date_value, parms, rule_tzids,
ignoretz, tzids, tzinfos):
global parser
if not parser:
from dateutil import parser
datevals = []
value_found = False
TZID = None
for parm in parms:
if parm.startswith("TZID="):
try:
tzkey = rule_tzids[parm.split('TZID=')[-1]]
except KeyError:
continue
if tzids is None:
from . import tz
tzlookup = tz.gettz
elif callable(tzids):
tzlookup = tzids
else:
tzlookup = getattr(tzids, 'get', None)
if tzlookup is None:
msg = ('tzids must be a callable, mapping, or None, '
'not %s' % tzids)
raise ValueError(msg)
TZID = tzlookup(tzkey)
continue
# RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found
# only once.
if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}:
raise ValueError("unsupported parm: " + parm)
else:
if value_found:
msg = ("Duplicate value parameter found in: " + parm)
raise ValueError(msg)
value_found = True
for datestr in date_value.split(','):
date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos)
if TZID is not None:
if date.tzinfo is None:
date = date.replace(tzinfo=TZID)
else:
raise ValueError('DTSTART/EXDATE specifies multiple timezone')
datevals.append(date)
return datevals
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzids=None,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
TZID_NAMES = dict(map(
lambda x: (x.upper(), x),
re.findall('TZID=(?P<name>[^:]+):', s)
))
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
exdatevals.extend(
self._parse_date_value(value, parms,
TZID_NAMES, ignoretz,
tzids, tzinfos)
)
elif name == "DTSTART":
dtvals = self._parse_date_value(value, parms, TZID_NAMES,
ignoretz, tzids, tzinfos)
if len(dtvals) != 1:
raise ValueError("Multiple DTSTART values specified:" +
value)
dtstart = dtvals[0]
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
rset.exdate(value)
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| gpl-3.0 |
simonspa/django-datacollect | datacollect/survey/migrations/0074_auto_20161128_1022.py | 1 | 2106 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-11-28 09:22
from __future__ import unicode_literals
from django.db import migrations, models
import select_multiple_field.models
class Migration(migrations.Migration):
dependencies = [
('survey', '0073_auto_20161123_1720'),
]
operations = [
migrations.AlterField(
model_name='record',
name='gender',
field=models.IntegerField(choices=[(0, 'Male'), (1, 'Female'), (2, 'Trans/inter*'), (3, 'Gender unclear')]),
),
migrations.AlterField(
model_name='record',
name='govreply_action',
field=models.CharField(blank=True, choices=[('protect', 'Protection measures granted'), ('release', 'Individual released early'), ('notrial', 'Individual released without trial'), ('improve', 'Improved prison conditions'), ('investigate', 'Investigation opened'), ('prosecuted', 'Perpetrator suspended/prosecuted'), ('issued', 'Travel documents issued'), ('other', 'Other')], max_length=11, verbose_name='Action taken according to reply'),
),
migrations.AlterField(
model_name='record',
name='relevant_activities',
field=select_multiple_field.models.SelectMultipleField(choices=[('?', 'N/A'), ('CSA', 'Civil society activist'), ('TUA', 'Trade union activist'), ('RA', 'Religious association'), ('PM', 'Politician/Party member'), ('CL', 'Community leader'), ('L', 'Lawyer/Judge/Attorney'), ('J', 'Journalist/Editor'), ('CA', 'Cyberactivist'), ('A', 'Artist/Writer'), ('S', 'Student'), ('T', 'Teacher/Professor/Scientist'), ('MP', 'Medical professional'), ('HW', 'Humanitarian worker'), ('V', 'Victim/witness of HR violations'), ('OP', 'Ombudsperson/Procuraduria/NHRI'), ('UN', 'UN official'), ('GAS', 'Government/Army/Security forces'), ('I', 'Investigation against officials'), ('PC', 'Participation in conference/meeting'), ('PP', 'Participation in public protest/rally')], default='?', help_text='Select maximum 3 items with <i>Ctrl+Click</i>', max_choices=3, max_length=15),
),
]
| gpl-3.0 |
eenchev/idea-note-taking-app | env/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/pypostgresql.py | 33 | 2655 | # postgresql/pypostgresql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pypostgresql
:name: py-postgresql
:dbapi: pypostgresql
:connectstring: postgresql+pypostgresql://user:password@host:port/dbname\
[?key=value&key=value...]
:url: http://python.projects.pgfoundry.org/
"""
from ... import util
from ... import types as sqltypes
from .base import PGDialect, PGExecutionContext
from ... import processors
class PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
if self.asdecimal:
return None
else:
return processors.to_float
class PGExecutionContext_pypostgresql(PGExecutionContext):
pass
class PGDialect_pypostgresql(PGDialect):
driver = 'pypostgresql'
supports_unicode_statements = True
supports_unicode_binds = True
description_encoding = None
default_paramstyle = 'pyformat'
# requires trunk version to support sane rowcounts
# TODO: use dbapi version information to set this flag appropriately
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_pypostgresql
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: PGNumeric,
# prevents PGNumeric from being used
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
from postgresql.driver import dbapi20
return dbapi20
_DBAPI_ERROR_NAMES = [
"Error",
"InterfaceError", "DatabaseError", "DataError",
"OperationalError", "IntegrityError", "InternalError",
"ProgrammingError", "NotSupportedError"
]
@util.memoized_property
def dbapi_exception_translation_map(self):
if self.dbapi is None:
return {}
return dict(
(getattr(self.dbapi, name).__name__, name)
for name in self._DBAPI_ERROR_NAMES
)
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
else:
opts['port'] = 5432
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
dialect = PGDialect_pypostgresql
| mit |
linvictor88/vse-lbaas-driver | quantum/plugins/brocade/QuantumPlugin.py | 1 | 19982 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Shiv Haris (sharis@brocade.com)
# Varma Bhupatiraju (vbhupati@#brocade.com)
#
# (Some parts adapted from LinuxBridge Plugin)
# TODO(shiv) need support for security groups
"""Implentation of Brocade Quantum Plugin."""
from oslo.config import cfg
from quantum.agent import securitygroups_rpc as sg_rpc
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.api.rpc.agentnotifiers import l3_rpc_agent_api
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.common import utils
from quantum.db import agents_db
from quantum.db import agentschedulers_db
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import extraroute_db
from quantum.db import l3_rpc_base
from quantum.db import securitygroups_rpc_base as sg_db_rpc
from quantum.extensions import portbindings
from quantum.extensions import securitygroup as ext_sg
from quantum.openstack.common import context
from quantum.openstack.common import importutils
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.openstack.common.rpc import proxy
from quantum.plugins.brocade.db import models as brocade_db
from quantum.plugins.brocade import vlanbm as vbm
from quantum import scheduler
LOG = logging.getLogger(__name__)
PLUGIN_VERSION = 0.88
AGENT_OWNER_PREFIX = "network:"
NOS_DRIVER = 'quantum.plugins.brocade.nos.nosdriver.NOSdriver'
SWITCH_OPTS = [cfg.StrOpt('address', default=''),
cfg.StrOpt('username', default=''),
cfg.StrOpt('password', default='', secret=True),
cfg.StrOpt('ostype', default='NOS')
]
PHYSICAL_INTERFACE_OPTS = [cfg.StrOpt('physical_interface', default='eth0')
]
cfg.CONF.register_opts(SWITCH_OPTS, "SWITCH")
cfg.CONF.register_opts(PHYSICAL_INTERFACE_OPTS, "PHYSICAL_INTERFACE")
cfg.CONF.register_opts(scheduler.AGENTS_SCHEDULER_OPTS)
class BridgeRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
"""Agent callback."""
RPC_API_VERSION = '1.1'
# Device names start with "tap"
# history
# 1.1 Support Security Group RPC
TAP_PREFIX_LEN = 3
def create_rpc_dispatcher(self):
"""Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
"""
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def get_port_from_device(cls, device):
"""Get port from the brocade specific db."""
# TODO(shh) context is not being passed as
# an argument to this function;
#
# need to be fixed in:
# file: quantum/db/securtygroups_rpc_base.py
# function: securitygroup_rules_for_devices()
# which needs to pass context to us
# Doing what other plugins are doing
session = db.get_session()
port = brocade_db.get_port_from_device(
session, device[cls.TAP_PREFIX_LEN:])
# TODO(shiv): need to extend the db model to include device owners
# make it appears that the device owner is of type network
if port:
port['device'] = device
port['device_owner'] = AGENT_OWNER_PREFIX
port['binding:vif_type'] = 'bridge'
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = brocade_db.get_port(rpc_context, device[self.TAP_PREFIX_LEN:])
if port:
entry = {'device': device,
'vlan_id': port.vlan_id,
'network_id': port.network_id,
'port_id': port.port_id,
'physical_network': port.physical_interface,
'admin_state_up': port.admin_state_up
}
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
device = kwargs.get('device')
port = self.get_port_from_device(device)
if port:
entry = {'device': device,
'exists': True}
# Set port status to DOWN
port_id = port['port_id']
brocade_db.update_port_state(rpc_context, port_id, False)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
"""Agent side of the linux bridge rpc API.
API version history:
1.0 - Initial version.
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic = topic
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, physical_network, vlan_id):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
physical_network=physical_network,
vlan_id=vlan_id),
topic=self.topic_port_update)
class BrocadePluginV2(db_base_plugin_v2.QuantumDbPluginV2,
extraroute_db.ExtraRoute_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.AgentSchedulerDbMixin):
"""BrocadePluginV2 is a Quantum plugin.
Provides L2 Virtual Network functionality using VDX. Upper
layer driver class that interfaces to NETCONF layer below.
"""
def __init__(self):
"""Initialize Brocade Plugin.
Specify switch address and db configuration.
"""
self.supported_extension_aliases = ["binding", "security-group",
"router", "extraroute",
"agent", "agent_scheduler"]
self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE.
physical_interface)
db.configure_db()
self.ctxt = context.get_admin_context()
self.ctxt.session = db.get_session()
self._vlan_bitmap = vbm.VlanBitmap(self.ctxt)
self._setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
self.brocade_init()
def brocade_init(self):
"""Brocade specific initialization."""
self._switch = {'address': cfg.CONF.SWITCH.address,
'username': cfg.CONF.SWITCH.username,
'password': cfg.CONF.SWITCH.password
}
self._driver = importutils.import_object(NOS_DRIVER)
def _setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.rpc_context = context.RequestContext('quantum', 'quantum',
is_admin=False)
self.conn = rpc.create_connection(new=True)
self.callbacks = BridgeRpcCallbacks()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.notifier = AgentNotifierApi(topics.AGENT)
self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotify
def create_network(self, context, network):
"""Create network.
This call to create network translates to creation of port-profile on
the physical switch.
"""
with context.session.begin(subtransactions=True):
net = super(BrocadePluginV2, self).create_network(context, network)
net_uuid = net['id']
vlan_id = self._vlan_bitmap.get_next_vlan(None)
switch = self._switch
try:
self._driver.create_network(switch['address'],
switch['username'],
switch['password'],
vlan_id)
except Exception as e:
# Proper formatting
LOG.warning(_("Brocade NOS driver:"))
LOG.warning(_("%s"), e)
LOG.debug(_("Returning the allocated vlan (%d) to the pool"),
vlan_id)
self._vlan_bitmap.release_vlan(int(vlan_id))
raise Exception("Brocade plugin raised exception, check logs")
brocade_db.create_network(context, net_uuid, vlan_id)
self._process_l3_create(context, network['network'], net['id'])
self._extend_network_dict_l3(context, net)
LOG.info(_("Allocated vlan (%d) from the pool"), vlan_id)
return net
def delete_network(self, context, net_id):
"""Delete network.
This call to delete the network translates to removing the
port-profile on the physical switch.
"""
with context.session.begin(subtransactions=True):
result = super(BrocadePluginV2, self).delete_network(context,
net_id)
# we must delete all ports in db first (foreign key constraint)
# there is no need to delete port in the driver (its a no-op)
# (actually: note there is no such call to the driver)
bports = brocade_db.get_ports(context, net_id)
for bport in bports:
brocade_db.delete_port(context, bport['port_id'])
# find the vlan for this network
net = brocade_db.get_network(context, net_id)
vlan_id = net['vlan']
# Tell hw to do remove PP
switch = self._switch
try:
self._driver.delete_network(switch['address'],
switch['username'],
switch['password'],
net_id)
except Exception as e:
# Proper formatting
LOG.warning(_("Brocade NOS driver:"))
LOG.warning(_("%s"), e)
raise Exception("Brocade plugin raised exception, check logs")
# now ok to delete the network
brocade_db.delete_network(context, net_id)
# relinquish vlan in bitmap
self._vlan_bitmap.release_vlan(int(vlan_id))
return result
def update_network(self, context, id, network):
session = context.session
with session.begin(subtransactions=True):
net = super(BrocadePluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, network['network'], id)
self._extend_network_dict_l3(context, net)
return net
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(BrocadePluginV2, self).get_network(context,
id, None)
self._extend_network_dict_l3(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(BrocadePluginV2,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_l3(context, net)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
"""Create logical port on the switch."""
tenant_id = port['port']['tenant_id']
network_id = port['port']['network_id']
admin_state_up = port['port']['admin_state_up']
physical_interface = self.physical_interface
with context.session.begin(subtransactions=True):
bnet = brocade_db.get_network(context, network_id)
vlan_id = bnet['vlan']
quantum_port = super(BrocadePluginV2, self).create_port(context,
port)
interface_mac = quantum_port['mac_address']
port_id = quantum_port['id']
switch = self._switch
# convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx
mac = self.mac_reformat_62to34(interface_mac)
try:
self._driver.associate_mac_to_network(switch['address'],
switch['username'],
switch['password'],
vlan_id,
mac)
except Exception as e:
# Proper formatting
LOG.warning(_("Brocade NOS driver:"))
LOG.warning(_("%s"), e)
raise Exception("Brocade plugin raised exception, check logs")
# save to brocade persistent db
brocade_db.create_port(context, port_id, network_id,
physical_interface,
vlan_id, tenant_id, admin_state_up)
# apply any extensions
return self._extend_port_dict_binding(context, quantum_port)
def delete_port(self, context, port_id):
with context.session.begin(subtransactions=True):
super(BrocadePluginV2, self).delete_port(context, port_id)
brocade_db.delete_port(context, port_id)
def update_port(self, context, port_id, port):
original_port = self.get_port(context, port_id)
session = context.session
port_updated = False
with session.begin(subtransactions=True):
# delete the port binding and read it with the new rules
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._delete_port_security_group_bindings(context, port_id)
# process_port_create_security_group also needs port id
port['port']['id'] = port_id
self._process_port_create_security_group(
context,
port['port'],
port['port'][ext_sg.SECURITYGROUPS])
port_updated = True
port = super(BrocadePluginV2, self).update_port(
context, port_id, port)
if original_port['admin_state_up'] != port['admin_state_up']:
port_updated = True
if (original_port['fixed_ips'] != port['fixed_ips'] or
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
port.get(ext_sg.SECURITYGROUPS))):
self.notifier.security_groups_member_updated(
context, port.get(ext_sg.SECURITYGROUPS))
if port_updated:
self._notify_port_updated(context, port)
return self._extend_port_dict_binding(context, port)
def get_port(self, context, port_id, fields=None):
with context.session.begin(subtransactions=True):
port = super(BrocadePluginV2, self).get_port(
context, port_id, fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
res_ports = []
with context.session.begin(subtransactions=True):
ports = super(BrocadePluginV2, self).get_ports(context,
filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
res_ports.append(self._fields(port, fields))
return res_ports
def _notify_port_updated(self, context, port):
port_id = port['id']
bport = brocade_db.get_port(context, port_id)
self.notifier.port_update(context, port,
bport.physical_interface,
bport.vlan_id)
def _extend_port_dict_binding(self, context, port):
port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_BRIDGE
port[portbindings.CAPABILITIES] = {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}
return port
def get_plugin_version(self):
"""Get version number of the plugin."""
return PLUGIN_VERSION
@staticmethod
def mac_reformat_62to34(interface_mac):
"""Transform MAC address format.
Transforms from 6 groups of 2 hexadecimal numbers delimited by ":"
to 3 groups of 4 hexadecimals numbers delimited by ".".
:param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx
:type interface_mac: string
:returns: MAC address in the format xxxx.xxxx.xxxx
:rtype: string
"""
mac = interface_mac.replace(":", "")
mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12]
return mac
| apache-2.0 |
sofianehaddad/ot-svn | python/test/t_ChiSquareFactory_std.py | 2 | 1625 | #! /usr/bin/env python
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
distribution = ChiSquare(0.5)
size = 10000
sample = distribution.getSample(size)
factory = ChiSquareFactory()
estimatedDistribution = factory.build(sample)
print "distribution=", repr(distribution)
print "Estimated distribution=", repr(estimatedDistribution)
distribution = ChiSquare(1.0)
sample = distribution.getSample(size)
estimatedDistribution = factory.build(sample)
print "distribution=", repr(distribution)
print "Estimated distribution=", repr(estimatedDistribution)
distribution = ChiSquare(2.5)
sample = distribution.getSample(size)
estimatedDistribution = factory.build(sample)
print "distribution=", repr(distribution)
print "Estimated distribution=", repr(estimatedDistribution)
estimatedDistribution = factory.build()
print "Default distribution=", estimatedDistribution
estimatedDistribution = factory.build(
distribution.getParametersCollection())
print "Distribution from parameters=", estimatedDistribution
estimatedChiSquare = factory.buildAsChiSquare(sample)
print "ChiSquare =", distribution
print "Estimated chiSquare=", estimatedChiSquare
estimatedChiSquare = factory.buildAsChiSquare()
print "Default chiSquare=", estimatedChiSquare
estimatedChiSquare = factory.buildAsChiSquare(
distribution.getParametersCollection())
print "ChiSquare from parameters=", estimatedChiSquare
except:
import sys
print "t_ChiSquareFactory_std.py", sys.exc_type, sys.exc_value
| mit |
rahuldan/sympy | sympy/physics/tests/test_qho_1d.py | 88 | 1552 | from sympy import exp, integrate, oo, Rational, pi, S, simplify, sqrt, Symbol
from sympy.core.compatibility import range
from sympy.abc import omega, m, x
from sympy.physics.qho_1d import psi_n, E_n, coherent_state
from sympy.physics.quantum.constants import hbar
nu = m * omega / hbar
def test_wavefunction():
Psi = {
0: (nu/pi)**(S(1)/4) * exp(-nu * x**2 /2),
1: (nu/pi)**(S(1)/4) * sqrt(2*nu) * x * exp(-nu * x**2 /2),
2: (nu/pi)**(S(1)/4) * (2 * nu * x**2 - 1)/sqrt(2) * exp(-nu * x**2 /2),
3: (nu/pi)**(S(1)/4) * sqrt(nu/3) * (2 * nu * x**3 - 3 * x) * exp(-nu * x**2 /2)
}
for n in Psi:
assert simplify(psi_n(n, x, m, omega) - Psi[n]) == 0
def test_norm(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert integrate(psi_n(i, x, 1, 1)**2, (x, -oo, oo)) == 1
def test_orthogonality(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
for j in range(i + 1, n + 1):
assert integrate(
psi_n(i, x, 1, 1)*psi_n(j, x, 1, 1), (x, -oo, oo)) == 0
def test_energies(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert E_n(i, omega) == hbar * omega * (i + Rational(1, 2))
def test_coherent_state(n=10):
# Maximum "n" which is tested:
# test whether coherent state is the eigenstate of annihilation operator
alpha = Symbol("alpha")
for i in range(n + 1):
assert simplify(sqrt(n + 1) * coherent_state(n + 1, alpha)) == simplify(alpha * coherent_state(n, alpha))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.