repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
admcrae/tensorflow | tensorflow/python/framework/device.py | 150 | 9078 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to represent a device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
class DeviceSpec(object):
"""Represents a (possibly partial) specification for a TensorFlow device.
`DeviceSpec`s are used throughout TensorFlow to describe where state is stored
and computations occur. Using `DeviceSpec` allows you to parse device spec
strings to verify their validity, merge them or compose them programmatically.
Example:
```python
# Place the operations on device "GPU:0" in the "ps" job.
device_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0)
with tf.device(device_spec):
# Both my_var and squared_var will be placed on /job:ps/device:GPU:0.
my_var = tf.Variable(..., name="my_variable")
squared_var = tf.square(my_var)
```
If a `DeviceSpec` is partially specified, it will be merged with other
`DeviceSpec`s according to the scope in which it is defined. `DeviceSpec`
components defined in inner scopes take precedence over those defined in
outer scopes.
```python
with tf.device(DeviceSpec(job="train", )):
with tf.device(DeviceSpec(job="ps", device_type="GPU", device_index=0):
# Nodes created here will be assigned to /job:ps/device:GPU:0.
with tf.device(DeviceSpec(device_type="GPU", device_index=1):
# Nodes created here will be assigned to /job:train/device:GPU:1.
```
A `DeviceSpec` consists of 5 components -- each of
which is optionally specified:
* Job: The job name.
* Replica: The replica index.
* Task: The task index.
* Device type: The device type string (e.g. "CPU" or "GPU").
* Device index: The device index.
"""
def __init__(self, job=None, replica=None, task=None, device_type=None,
device_index=None):
"""Create a new `DeviceSpec` object.
Args:
job: string. Optional job name.
replica: int. Optional replica index.
task: int. Optional task index.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self.job = job
self.replica = replica
self.task = task
if device_type == "cpu" or device_type == "gpu":
# For backwards compatibility only, we support lowercase variants of
# cpu and gpu but turn them into uppercase here.
self.device_type = device_type.upper()
else:
self.device_type = device_type
self.device_index = device_index
def _clear(self):
self._job = None
self._replica = None
self._task = None
self.device_type = None
self.device_index = None
@property
def job(self):
return self._job
@job.setter
def job(self, job):
if job is not None:
self._job = str(job)
else:
self._job = None
@property
def replica(self):
return self._replica
@replica.setter
def replica(self, replica):
if replica is not None:
self._replica = int(replica)
else:
self._replica = None
@property
def task(self):
return self._task
@task.setter
def task(self, task):
if task is not None:
self._task = int(task)
else:
self._task = None
def parse_from_string(self, spec):
"""Parse a `DeviceSpec` name into its components.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
The `DeviceSpec`.
Raises:
ValueError: if the spec was not valid.
"""
self._clear()
splits = [x.split(":") for x in spec.split("/")]
for y in splits:
ly = len(y)
if y:
# NOTE(touts): we use the property getters here.
if ly == 2 and y[0] == "job":
self.job = y[1]
elif ly == 2 and y[0] == "replica":
self.replica = y[1]
elif ly == 2 and y[0] == "task":
self.task = y[1]
elif ((ly == 1 or ly == 2) and
((y[0].upper() == "GPU") or (y[0].upper() == "CPU"))):
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[0].upper()
if ly == 2 and y[1] != "*":
self.device_index = int(y[1])
elif ly == 3 and y[0] == "device":
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[1]
if y[2] != "*":
self.device_index = int(y[2])
elif ly and y[0] != "": # pylint: disable=g-explicit-bool-comparison
raise ValueError("Unknown attribute: '%s' in '%s'" % (y[0], spec))
return self
def merge_from(self, dev):
"""Merge the properties of "dev" into this `DeviceSpec`.
Args:
dev: a `DeviceSpec`.
"""
if dev.job is not None:
self.job = dev.job
if dev.replica is not None:
self.replica = dev.replica
if dev.task is not None:
self.task = dev.task
if dev.device_type is not None:
self.device_type = dev.device_type
if dev.device_index is not None:
self.device_index = dev.device_index
def to_string(self):
"""Return a string representation of this `DeviceSpec`.
Returns:
a string of the form
/job:<name>/replica:<id>/task:<id>/device:<device_type>:<id>.
"""
dev = ""
if self.job is not None:
dev += "/job:" + self.job
if self.replica is not None:
dev += "/replica:" + str(self.replica)
if self.task is not None:
dev += "/task:" + str(self.task)
if self.device_type is not None:
device_index_string = "*"
if self.device_index is not None:
device_index_string = str(self.device_index)
dev += "/device:%s:%s" % (self.device_type, device_index_string)
return dev
@staticmethod
def from_string(spec):
"""Construct a `DeviceSpec` from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A DeviceSpec.
"""
return DeviceSpec().parse_from_string(spec)
def check_valid(spec):
"""Check that a device spec is valid.
Args:
spec: a string.
Raises:
An exception if the spec is invalid.
"""
# Construct a DeviceSpec. It will assert a failure if spec is invalid.
DeviceSpec.from_string(spec)
def canonical_name(device):
"""Returns a canonical name for the given `DeviceSpec` or device name."""
if device is None:
return ""
if isinstance(device, DeviceSpec):
return device.to_string()
else:
device = DeviceSpec.from_string(device)
return device.to_string()
def merge_device(spec):
"""Returns a device function that merges devices specifications.
This can be used to merge partial specifications of devices. The
innermost setting for a device field takes precedence. For example:
with tf.device(merge_device("/device:GPU:0"))
# Nodes created here have device "/device:GPU:0"
with tf.device(merge_device("/job:worker")):
# Nodes created here have device "/job:worker/device:GPU:0"
with tf.device(merge_device("/device:CPU:0")):
# Nodes created here have device "/job:worker/device:CPU:0"
with tf.device(merge_device("/job:ps")):
# Nodes created here have device "/job:ps/device:CPU:0"
Args:
spec: A `DeviceSpec` or a device spec string (partially) describing the
device that should be used for all nodes created in the scope of
the returned device function's with block.
Returns:
A device function with the above-described behavior.
Raises:
ValueError: if the spec was not valid.
"""
if not isinstance(spec, DeviceSpec):
spec = DeviceSpec.from_string(spec or "")
def _device_function(node_def):
current_device = DeviceSpec.from_string(node_def.device or "")
copy_spec = copy.copy(spec)
copy_spec.merge_from(current_device) # current_device takes precedence.
return copy_spec
return _device_function
| apache-2.0 |
cloudburst/libheap | libheap/pydbg/pygdbpython.py | 1 | 4894 | import sys
from functools import wraps
from libheap.frontend.printutils import print_error
try:
import gdb
except ImportError:
print("Not running inside of GDB, exiting...")
sys.exit()
def gdb_is_running(f):
"decorator to make sure gdb is running"
@wraps(f)
def _gdb_is_running(*args, **kwargs):
if (gdb.selected_thread() is not None):
return f(*args, **kwargs)
else:
print_error("GDB is not running.")
return _gdb_is_running
class pygdbpython:
def __init__(self):
self.inferior = None
@gdb_is_running
def execute(self, cmd, to_string=True):
return gdb.execute(cmd, to_string=to_string)
def format_address(self, value):
"""Helper for printing gdb.Value on both python 2 and 3
"""
try:
ret = int(value)
except gdb.error:
# python2 error: Cannot convert value to int.
# value.cast(gdb.lookup_type("unsigned long"))
ret = int(str(value).split(' ')[0], 16)
return ret
@gdb_is_running
def get_heap_address(self, mp=None):
"""Read heap address from glibc's mp_ structure if available,
otherwise fall back to /proc/self/maps which is unreliable.
"""
start, end = None, None
if mp is not None:
from libheap.ptmalloc.malloc_par import malloc_par
if isinstance(mp, malloc_par):
start = mp.sbrk_base
else:
print_error("Please specify a valid malloc_par variable")
# XXX: add end from arena(s).system_mem ?
else:
pid, task_id, thread_id = gdb.selected_thread().ptid
maps_file = "/proc/%d/task/%d/maps"
maps_data = open(maps_file % (pid, task_id)).readlines()
for line in maps_data:
if any(x.strip() == '[heap]' for x in line.split(' ')):
heap_range = line.split(' ')[0]
start, end = [int(h, 16) for h in heap_range.split('-')]
break
return start, end
@gdb_is_running
def get_arch(self):
cmd = self.execute("maintenance info sections ?")
return cmd.strip().split()[-1:]
def get_inferior(self):
try:
if self.inferior is None:
if len(gdb.inferiors()) == 0:
print_error("No gdb inferior could be found.")
return -1
else:
self.inferior = gdb.inferiors()[0]
return self.inferior
else:
return self.inferior
except AttributeError:
print_error("This gdb's python support is too old.")
sys.exit()
@gdb_is_running
def get_size_sz(self):
try:
_machine = self.get_arch()[0]
except IndexError:
_machine = ""
SIZE_SZ = 0
print_error("Retrieving SIZE_SZ failed.")
except TypeError: # gdb is not running
_machine = ""
SIZE_SZ = 0
print_error("Retrieving SIZE_SZ failed.")
if "elf64" in _machine:
SIZE_SZ = 8
elif "elf32" in _machine:
SIZE_SZ = 4
else:
SIZE_SZ = 0
print_error("Retrieving SIZE_SZ failed.")
return SIZE_SZ
@gdb_is_running
def read_memory(self, address, length):
if self.inferior is None:
self.inferior = self.get_inferior()
return self.inferior.read_memory(address, length)
@gdb_is_running
def read_variable(self, variable=None):
if variable is None:
print_error("Please specify a variable to read")
return None
try:
return gdb.selected_frame().read_var(variable)
except RuntimeError:
# No idea why this works but sometimes the frame is not selected
# print_error("No gdb frame is currently selected.\n")
try:
return gdb.selected_frame().read_var(variable)
except RuntimeError:
# variable was not found
# print_error("wrong here!")
return None
except ValueError:
# variable was not found
return None
@gdb_is_running
def string_to_argv(self, arg=None):
if arg is not None:
return gdb.string_to_argv(arg)
@gdb_is_running
def write_memory(self, address, buf, length=None):
if self.inferior is None:
self.inferior = self.get_inferior()
try:
if length is None:
self.inferior.write_memory(address, buf)
else:
self.inferior.write_memory(address, buf, length)
except MemoryError:
print_error("GDB inferior write_memory error")
| mit |
youdonghai/intellij-community | python/lib/Lib/site-packages/django/contrib/localflavor/us/us_states.py | 69 | 7577 | """
A mapping of state misspellings/abbreviations to normalized
abbreviations, and alphabetical lists of US states, territories,
military mail regions and non-US states to which the US provides
postal service.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
# The 48 contiguous states, plus the District of Columbia.
CONTIGUOUS_STATES = (
('AL', 'Alabama'),
('AZ', 'Arizona'),
('AR', 'Arkansas'),
('CA', 'California'),
('CO', 'Colorado'),
('CT', 'Connecticut'),
('DE', 'Delaware'),
('DC', 'District of Columbia'),
('FL', 'Florida'),
('GA', 'Georgia'),
('ID', 'Idaho'),
('IL', 'Illinois'),
('IN', 'Indiana'),
('IA', 'Iowa'),
('KS', 'Kansas'),
('KY', 'Kentucky'),
('LA', 'Louisiana'),
('ME', 'Maine'),
('MD', 'Maryland'),
('MA', 'Massachusetts'),
('MI', 'Michigan'),
('MN', 'Minnesota'),
('MS', 'Mississippi'),
('MO', 'Missouri'),
('MT', 'Montana'),
('NE', 'Nebraska'),
('NV', 'Nevada'),
('NH', 'New Hampshire'),
('NJ', 'New Jersey'),
('NM', 'New Mexico'),
('NY', 'New York'),
('NC', 'North Carolina'),
('ND', 'North Dakota'),
('OH', 'Ohio'),
('OK', 'Oklahoma'),
('OR', 'Oregon'),
('PA', 'Pennsylvania'),
('RI', 'Rhode Island'),
('SC', 'South Carolina'),
('SD', 'South Dakota'),
('TN', 'Tennessee'),
('TX', 'Texas'),
('UT', 'Utah'),
('VT', 'Vermont'),
('VA', 'Virginia'),
('WA', 'Washington'),
('WV', 'West Virginia'),
('WI', 'Wisconsin'),
('WY', 'Wyoming'),
)
# All 50 states, plus the District of Columbia.
US_STATES = (
('AL', 'Alabama'),
('AK', 'Alaska'),
('AZ', 'Arizona'),
('AR', 'Arkansas'),
('CA', 'California'),
('CO', 'Colorado'),
('CT', 'Connecticut'),
('DE', 'Delaware'),
('DC', 'District of Columbia'),
('FL', 'Florida'),
('GA', 'Georgia'),
('HI', 'Hawaii'),
('ID', 'Idaho'),
('IL', 'Illinois'),
('IN', 'Indiana'),
('IA', 'Iowa'),
('KS', 'Kansas'),
('KY', 'Kentucky'),
('LA', 'Louisiana'),
('ME', 'Maine'),
('MD', 'Maryland'),
('MA', 'Massachusetts'),
('MI', 'Michigan'),
('MN', 'Minnesota'),
('MS', 'Mississippi'),
('MO', 'Missouri'),
('MT', 'Montana'),
('NE', 'Nebraska'),
('NV', 'Nevada'),
('NH', 'New Hampshire'),
('NJ', 'New Jersey'),
('NM', 'New Mexico'),
('NY', 'New York'),
('NC', 'North Carolina'),
('ND', 'North Dakota'),
('OH', 'Ohio'),
('OK', 'Oklahoma'),
('OR', 'Oregon'),
('PA', 'Pennsylvania'),
('RI', 'Rhode Island'),
('SC', 'South Carolina'),
('SD', 'South Dakota'),
('TN', 'Tennessee'),
('TX', 'Texas'),
('UT', 'Utah'),
('VT', 'Vermont'),
('VA', 'Virginia'),
('WA', 'Washington'),
('WV', 'West Virginia'),
('WI', 'Wisconsin'),
('WY', 'Wyoming'),
)
# Non-state territories.
US_TERRITORIES = (
('AS', 'American Samoa'),
('GU', 'Guam'),
('MP', 'Northern Mariana Islands'),
('PR', 'Puerto Rico'),
('VI', 'Virgin Islands'),
)
# Military postal "states". Note that 'AE' actually encompasses
# Europe, Canada, Africa and the Middle East.
ARMED_FORCES_STATES = (
('AA', 'Armed Forces Americas'),
('AE', 'Armed Forces Europe'),
('AP', 'Armed Forces Pacific'),
)
# Non-US locations serviced by USPS (under Compact of Free
# Association).
COFA_STATES = (
('FM', 'Federated States of Micronesia'),
('MH', 'Marshall Islands'),
('PW', 'Palau'),
)
# Obsolete abbreviations (no longer US territories/USPS service, or
# code changed).
OBSOLETE_STATES = (
('CM', 'Commonwealth of the Northern Mariana Islands'), # Is now 'MP'
('CZ', 'Panama Canal Zone'), # Reverted to Panama 1979
('PI', 'Philippine Islands'), # Philippine independence 1946
('TT', 'Trust Territory of the Pacific Islands'), # Became the independent COFA states + Northern Mariana Islands 1979-1994
)
# All US states and territories plus DC and military mail.
STATE_CHOICES = US_STATES + US_TERRITORIES + ARMED_FORCES_STATES
# All US Postal Service locations.
USPS_CHOICES = US_STATES + US_TERRITORIES + ARMED_FORCES_STATES + COFA_STATES
STATES_NORMALIZED = {
'ak': 'AK',
'al': 'AL',
'ala': 'AL',
'alabama': 'AL',
'alaska': 'AK',
'american samao': 'AS',
'american samoa': 'AS',
'ar': 'AR',
'ariz': 'AZ',
'arizona': 'AZ',
'ark': 'AR',
'arkansas': 'AR',
'as': 'AS',
'az': 'AZ',
'ca': 'CA',
'calf': 'CA',
'calif': 'CA',
'california': 'CA',
'co': 'CO',
'colo': 'CO',
'colorado': 'CO',
'conn': 'CT',
'connecticut': 'CT',
'ct': 'CT',
'dc': 'DC',
'de': 'DE',
'del': 'DE',
'delaware': 'DE',
'deleware': 'DE',
'district of columbia': 'DC',
'fl': 'FL',
'fla': 'FL',
'florida': 'FL',
'ga': 'GA',
'georgia': 'GA',
'gu': 'GU',
'guam': 'GU',
'hawaii': 'HI',
'hi': 'HI',
'ia': 'IA',
'id': 'ID',
'idaho': 'ID',
'il': 'IL',
'ill': 'IL',
'illinois': 'IL',
'in': 'IN',
'ind': 'IN',
'indiana': 'IN',
'iowa': 'IA',
'kan': 'KS',
'kans': 'KS',
'kansas': 'KS',
'kentucky': 'KY',
'ks': 'KS',
'ky': 'KY',
'la': 'LA',
'louisiana': 'LA',
'ma': 'MA',
'maine': 'ME',
'marianas islands': 'MP',
'marianas islands of the pacific': 'MP',
'marinas islands of the pacific': 'MP',
'maryland': 'MD',
'mass': 'MA',
'massachusetts': 'MA',
'massachussetts': 'MA',
'md': 'MD',
'me': 'ME',
'mi': 'MI',
'mich': 'MI',
'michigan': 'MI',
'minn': 'MN',
'minnesota': 'MN',
'miss': 'MS',
'mississippi': 'MS',
'missouri': 'MO',
'mn': 'MN',
'mo': 'MO',
'mont': 'MT',
'montana': 'MT',
'mp': 'MP',
'ms': 'MS',
'mt': 'MT',
'n d': 'ND',
'n dak': 'ND',
'n h': 'NH',
'n j': 'NJ',
'n m': 'NM',
'n mex': 'NM',
'nc': 'NC',
'nd': 'ND',
'ne': 'NE',
'neb': 'NE',
'nebr': 'NE',
'nebraska': 'NE',
'nev': 'NV',
'nevada': 'NV',
'new hampshire': 'NH',
'new jersey': 'NJ',
'new mexico': 'NM',
'new york': 'NY',
'nh': 'NH',
'nj': 'NJ',
'nm': 'NM',
'nmex': 'NM',
'north carolina': 'NC',
'north dakota': 'ND',
'northern mariana islands': 'MP',
'nv': 'NV',
'ny': 'NY',
'oh': 'OH',
'ohio': 'OH',
'ok': 'OK',
'okla': 'OK',
'oklahoma': 'OK',
'or': 'OR',
'ore': 'OR',
'oreg': 'OR',
'oregon': 'OR',
'pa': 'PA',
'penn': 'PA',
'pennsylvania': 'PA',
'pr': 'PR',
'puerto rico': 'PR',
'rhode island': 'RI',
'ri': 'RI',
's dak': 'SD',
'sc': 'SC',
'sd': 'SD',
'sdak': 'SD',
'south carolina': 'SC',
'south dakota': 'SD',
'tenn': 'TN',
'tennessee': 'TN',
'territory of hawaii': 'HI',
'tex': 'TX',
'texas': 'TX',
'tn': 'TN',
'tx': 'TX',
'us virgin islands': 'VI',
'usvi': 'VI',
'ut': 'UT',
'utah': 'UT',
'va': 'VA',
'vermont': 'VT',
'vi': 'VI',
'viginia': 'VA',
'virgin islands': 'VI',
'virgina': 'VA',
'virginia': 'VA',
'vt': 'VT',
'w va': 'WV',
'wa': 'WA',
'wash': 'WA',
'washington': 'WA',
'west virginia': 'WV',
'wi': 'WI',
'wis': 'WI',
'wisc': 'WI',
'wisconsin': 'WI',
'wv': 'WV',
'wva': 'WV',
'wy': 'WY',
'wyo': 'WY',
'wyoming': 'WY',
}
| apache-2.0 |
Jusedawg/SickRage | lib/html5lib/treewalkers/__init__.py | 1229 | 2323 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
| gpl-3.0 |
TalShafir/ansible | lib/ansible/modules/system/seboolean.py | 12 | 10109 | #!/usr/bin/python
# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: seboolean
short_description: Toggles SELinux booleans
description:
- Toggles SELinux booleans.
version_added: "0.7"
options:
name:
description:
- Name of the boolean to configure.
required: true
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot.
type: bool
default: 'no'
state:
description:
- Desired boolean value
type: bool
required: true
notes:
- Not tested on any Debian based system.
requirements:
- libselinux-python
- libsemanage-python
author:
- Stephen Fromm (@sfromm)
'''
EXAMPLES = '''
- name: Set httpd_can_network_connect flag on and keep it persistent across reboots
seboolean:
name: httpd_can_network_connect
state: yes
persistent: yes
'''
import os
try:
import selinux
HAVE_SELINUX = True
except ImportError:
HAVE_SELINUX = False
try:
import semanage
HAVE_SEMANAGE = True
except ImportError:
HAVE_SEMANAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import binary_type
from ansible.module_utils._text import to_bytes
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
# work around for selinux who changed its API, see
# https://github.com/ansible/ansible/issues/25651
if len(bools) > 0:
if isinstance(bools[0], binary_type):
name = to_bytes(name)
if name in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
state = selinux.security_get_boolean_active(name)
except OSError:
module.fail_json(msg="Failed to determine current state for boolean %s" % name)
if state == 1:
return True
else:
return False
def semanage_get_handle(module):
handle = semanage.semanage_handle_create()
if not handle:
module.fail_json(msg="Failed to create semanage library handle")
managed = semanage.semanage_is_managed(handle)
if managed <= 0:
semanage.semanage_handle_destroy(handle)
if managed < 0:
module.fail_json(msg="Failed to determine whether policy is manage")
if managed == 0:
if os.getuid() == 0:
module.fail_json(msg="Cannot set persistent booleans without managed policy")
else:
module.fail_json(msg="Cannot set persistent booleans; please try as root")
if semanage.semanage_connect(handle) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to connect to semanage")
return handle
def semanage_begin_transaction(module, handle):
if semanage.semanage_begin_transaction(handle) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to begin semanage transaction")
def semanage_set_boolean_value(module, handle, name, value):
rc, t_b = semanage.semanage_bool_create(handle)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, t_b, name) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set seboolean name with semanage")
rc, boolkey = semanage.semanage_bool_key_extract(handle, t_b)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to extract boolean key with semanage")
rc, exists = semanage.semanage_bool_exists(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to check if boolean is defined")
if not exists:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="SELinux boolean %s is not defined in persistent policy" % name)
rc, sebool = semanage.semanage_bool_query(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to query boolean in persistent policy")
semanage.semanage_bool_set_value(sebool, value)
if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set boolean key active with semanage")
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(t_b)
semanage.semanage_bool_free(sebool)
def semanage_get_boolean_value(module, handle, name):
rc, t_b = semanage.semanage_bool_create(handle)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, t_b, name) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set seboolean name with semanage")
rc, boolkey = semanage.semanage_bool_key_extract(handle, t_b)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to extract boolean key with semanage")
rc, exists = semanage.semanage_bool_exists(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to check if boolean is defined")
if not exists:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="SELinux boolean %s is not defined in persistent policy" % name)
rc, sebool = semanage.semanage_bool_query(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to query boolean in persistent policy")
value = semanage.semanage_bool_get_value(sebool)
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(t_b)
semanage.semanage_bool_free(sebool)
return value
def semanage_commit(module, handle, load=0):
semanage.semanage_set_reload(handle, load)
if semanage.semanage_commit(handle) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to commit changes to semanage")
def semanage_destroy_handle(module, handle):
rc = semanage.semanage_disconnect(handle)
semanage.semanage_handle_destroy(handle)
if rc < 0:
module.fail_json(msg="Failed to disconnect from semanage")
# The following method implements what setsebool.c does to change
# a boolean and make it persist after reboot..
def semanage_boolean_value(module, name, state):
value = 0
changed = False
if state:
value = 1
try:
handle = semanage_get_handle(module)
semanage_begin_transaction(module, handle)
cur_value = semanage_get_boolean_value(module, handle, name)
if cur_value != value:
changed = True
if not module.check_mode:
semanage_set_boolean_value(module, handle, name, value)
semanage_commit(module, handle)
semanage_destroy_handle(module, handle)
except Exception as e:
module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e)))
return changed
def set_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
try:
rc = selinux.security_set_boolean(name, value)
except OSError:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
if rc == 0:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
persistent=dict(type='bool', default=False),
state=dict(type='bool', required=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python support")
if not HAVE_SEMANAGE:
module.fail_json(msg="This module requires libsemanage-python support")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
name = module.params['name']
persistent = module.params['persistent']
state = module.params['state']
result = dict(
name=name,
persistent=persistent,
state=state
)
changed = False
if hasattr(selinux, 'selinux_boolean_sub'):
# selinux_boolean_sub allows sites to rename a boolean and alias the old name
# Feature only available in selinux library since 2012.
name = selinux.selinux_boolean_sub(name)
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
if persistent:
changed = semanage_boolean_value(module, name, state)
else:
cur_value = get_boolean_value(module, name)
if cur_value != state:
changed = True
if not module.check_mode:
changed = set_boolean_value(module, name, state)
if not changed:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, state))
try:
selinux.security_commit_booleans()
except:
module.fail_json(msg="Failed to commit pending boolean %s value" % name)
result['changed'] = changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
vmax-feihu/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/geoip/libgeoip.py | 121 | 1103 | import os
from ctypes import CDLL
from ctypes.util import find_library
from django.conf import settings
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = dict((key, getattr(settings, key))
for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY')
if hasattr(settings, key))
lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None)
# The shared library for the GeoIP C API. May be downloaded
# from http://www.maxmind.com/download/geoip/api/c/
if lib_path:
lib_name = None
else:
# TODO: Is this really the library name for Windows?
lib_name = 'GeoIP'
# Getting the path to the GeoIP library.
if lib_name: lib_path = find_library(lib_name)
if lib_path is None: raise RuntimeError('Could not find the GeoIP library (tried "%s"). '
'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name)
lgeoip = CDLL(lib_path)
# Getting the C `free` for the platform.
if os.name == 'nt':
libc = CDLL('msvcrt')
else:
libc = CDLL(None)
free = libc.free
| apache-2.0 |
agaurav/ansible | lib/ansible/plugins/action/assemble.py | 9 | 6012 | # (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import pipes
import shutil
import tempfile
import base64
import re
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum_s
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
add_newline = False
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = "%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = file(fragment).read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write('\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = delimiter.decode('unicode-escape')
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != '\n':
tmp.write('\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith('\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=dict()):
if self._connection_info.check_mode:
return dict(skipped=True, msg=("skipped, this module does not support check_mode."))
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
ignore_hidden = self._task.args.get('ignore_hidden', False)
if src is None or dest is None:
return dict(failed=True, msg="src and dest are required")
if boolean(remote_src):
return self._execute_module(tmp=tmp, task_vars=task_vars)
elif self._task._role is not None:
src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
else:
# the source is local, so expand it here
src = self._loader.path_dwim(os.path.expanduser(src))
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest, tmp)
remote_checksum = self._remote_checksum(tmp, dest)
if path_checksum != remote_checksum:
resultant = file(path).read()
# FIXME: diff needs to be moved somewhere else
#if self.runner.diff:
# dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), task_vars=task_vars, tmp=tmp, persist_files=True)
# if 'content' in dest_result:
# dest_contents = dest_result['content']
# if dest_result['encoding'] == 'base64':
# dest_contents = base64.b64decode(dest_contents)
# else:
# raise Exception("unknown encoding, failed: %s" % dest_result)
xfered = self._transfer_data('src', resultant)
# fix file permissions when the copy is done as a different user
if self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', xfered, tmp)
# run the copy module
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(src),
)
)
res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp)
# FIXME: diff stuff
#res.diff = dict(after=resultant)
return res
else:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(src),
)
)
return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp)
| gpl-3.0 |
endlessm/chromium-browser | third_party/angle/third_party/vulkan-headers/src/registry/generator.py | 3 | 37029 | #!/usr/bin/python3 -i
#
# Copyright (c) 2013-2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for source/header/doc generators, as well as some utility functions."""
from __future__ import unicode_literals
import io
import os
import pdb
import re
import shutil
import sys
import tempfile
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
from spec_tools.util import getElemName, getElemType
def write(*args, **kwargs):
file = kwargs.pop('file', sys.stdout)
end = kwargs.pop('end', '\n')
file.write(' '.join(str(arg) for arg in args))
file.write(end)
def noneStr(s):
"""Return string argument, or "" if argument is None.
Used in converting etree Elements into text.
s - string to convert"""
if s:
return s
return ""
def enquote(s):
"""Return string argument with surrounding quotes,
for serialization into Python code."""
if s:
return "'{}'".format(s)
return None
def regSortCategoryKey(feature):
"""Sort key for regSortFeatures.
Sorts by category of the feature name string:
- Core API features (those defined with a `<feature>` tag)
- ARB/KHR/OES (Khronos extensions)
- other (EXT/vendor extensions)"""
if feature.elem.tag == 'feature':
return 0
if (feature.category == 'ARB'
or feature.category == 'KHR'
or feature.category == 'OES'):
return 1
return 2
def regSortOrderKey(feature):
"""Sort key for regSortFeatures - key is the sortorder attribute."""
# print("regSortOrderKey {} -> {}".format(feature.name, feature.sortorder))
return feature.sortorder
def regSortFeatureVersionKey(feature):
"""Sort key for regSortFeatures - key is the feature version.
`<extension>` elements all have version number 0."""
return float(feature.versionNumber)
def regSortExtensionNumberKey(feature):
"""Sort key for regSortFeatures - key is the extension number.
`<feature>` elements all have extension number 0."""
return int(feature.number)
def regSortFeatures(featureList):
"""Default sort procedure for features.
- Sorts by explicit sort order (default 0) relative to other features
- then by feature category ('feature' or 'extension'),
- then by version number (for features)
- then by extension number (for extensions)"""
featureList.sort(key=regSortExtensionNumberKey)
featureList.sort(key=regSortFeatureVersionKey)
featureList.sort(key=regSortCategoryKey)
featureList.sort(key=regSortOrderKey)
class GeneratorOptions:
"""Base class for options used during header/documentation production.
These options are target language independent, and used by
Registry.apiGen() and by base OutputGenerator objects."""
def __init__(self,
conventions=None,
filename=None,
directory='.',
apiname=None,
profile=None,
versions='.*',
emitversions='.*',
defaultExtensions=None,
addExtensions=None,
removeExtensions=None,
emitExtensions=None,
reparentEnums=True,
sortProcedure=regSortFeatures):
"""Constructor.
Arguments:
- conventions - may be mandatory for some generators:
an object that implements ConventionsBase
- filename - basename of file to generate, or None to write to stdout.
- directory - directory in which to generate filename
- apiname - string matching `<api>` 'apiname' attribute, e.g. 'gl'.
- profile - string specifying API profile , e.g. 'core', or None.
- versions - regex matching API versions to process interfaces for.
Normally `'.*'` or `'[0-9][.][0-9]'` to match all defined versions.
- emitversions - regex matching API versions to actually emit
interfaces for (though all requested versions are considered
when deciding which interfaces to generate). For GL 4.3 glext.h,
this might be `'1[.][2-5]|[2-4][.][0-9]'`.
- defaultExtensions - If not None, a string which must in its
entirety match the pattern in the "supported" attribute of
the `<extension>`. Defaults to None. Usually the same as apiname.
- addExtensions - regex matching names of additional extensions
to include. Defaults to None.
- removeExtensions - regex matching names of extensions to
remove (after defaultExtensions and addExtensions). Defaults
to None.
- emitExtensions - regex matching names of extensions to actually emit
interfaces for (though all requested versions are considered when
deciding which interfaces to generate).
- reparentEnums - move <enum> elements which extend an enumerated
type from <feature> or <extension> elements to the target <enums>
element. This is required for almost all purposes, but the
InterfaceGenerator relies on the list of interfaces in the <feature>
or <extension> being complete. Defaults to True.
- sortProcedure - takes a list of FeatureInfo objects and sorts
them in place to a preferred order in the generated output.
Default is core API versions, ARB/KHR/OES extensions, all other
extensions, by core API version number or extension number in each
group.
The regex patterns can be None or empty, in which case they match
nothing."""
self.conventions = conventions
"""may be mandatory for some generators:
an object that implements ConventionsBase"""
self.filename = filename
"basename of file to generate, or None to write to stdout."
self.directory = directory
"directory in which to generate filename"
self.apiname = apiname
"string matching `<api>` 'apiname' attribute, e.g. 'gl'."
self.profile = profile
"string specifying API profile , e.g. 'core', or None."
self.versions = self.emptyRegex(versions)
"""regex matching API versions to process interfaces for.
Normally `'.*'` or `'[0-9][.][0-9]'` to match all defined versions."""
self.emitversions = self.emptyRegex(emitversions)
"""regex matching API versions to actually emit
interfaces for (though all requested versions are considered
when deciding which interfaces to generate). For GL 4.3 glext.h,
this might be `'1[.][2-5]|[2-4][.][0-9]'`."""
self.defaultExtensions = defaultExtensions
"""If not None, a string which must in its
entirety match the pattern in the "supported" attribute of
the `<extension>`. Defaults to None. Usually the same as apiname."""
self.addExtensions = self.emptyRegex(addExtensions)
"""regex matching names of additional extensions
to include. Defaults to None."""
self.removeExtensions = self.emptyRegex(removeExtensions)
"""regex matching names of extensions to
remove (after defaultExtensions and addExtensions). Defaults
to None."""
self.emitExtensions = self.emptyRegex(emitExtensions)
"""regex matching names of extensions to actually emit
interfaces for (though all requested versions are considered when
deciding which interfaces to generate)."""
self.reparentEnums = reparentEnums
"""boolean specifying whether to remove <enum> elements from
<feature> or <extension> when extending an <enums> type."""
self.sortProcedure = sortProcedure
"""takes a list of FeatureInfo objects and sorts
them in place to a preferred order in the generated output.
Default is core API versions, ARB/KHR/OES extensions, all
other extensions, alphabetically within each group."""
self.codeGenerator = False
"""True if this generator makes compilable code"""
def emptyRegex(self, pat):
"""Substitute a regular expression which matches no version
or extension names for None or the empty string."""
if not pat:
return '_nomatch_^'
return pat
class OutputGenerator:
"""Generate specified API interfaces in a specific style, such as a C header.
Base class for generating API interfaces.
Manages basic logic, logging, and output file control.
Derived classes actually generate formatted output.
"""
# categoryToPath - map XML 'category' to include file directory name
categoryToPath = {
'bitmask': 'flags',
'enum': 'enums',
'funcpointer': 'funcpointers',
'handle': 'handles',
'define': 'defines',
'basetype': 'basetypes',
}
def __init__(self, errFile=sys.stderr, warnFile=sys.stderr, diagFile=sys.stdout):
"""Constructor
- errFile, warnFile, diagFile - file handles to write errors,
warnings, diagnostics to. May be None to not write."""
self.outFile = None
self.errFile = errFile
self.warnFile = warnFile
self.diagFile = diagFile
# Internal state
self.featureName = None
self.genOpts = None
self.registry = None
self.featureDictionary = {}
# Used for extension enum value generation
self.extBase = 1000000000
self.extBlockSize = 1000
self.madeDirs = {}
def logMsg(self, level, *args):
"""Write a message of different categories to different
destinations.
- `level`
- 'diag' (diagnostic, voluminous)
- 'warn' (warning)
- 'error' (fatal error - raises exception after logging)
- `*args` - print()-style arguments to direct to corresponding log"""
if level == 'error':
strfile = io.StringIO()
write('ERROR:', *args, file=strfile)
if self.errFile is not None:
write(strfile.getvalue(), file=self.errFile)
raise UserWarning(strfile.getvalue())
elif level == 'warn':
if self.warnFile is not None:
write('WARNING:', *args, file=self.warnFile)
elif level == 'diag':
if self.diagFile is not None:
write('DIAG:', *args, file=self.diagFile)
else:
raise UserWarning(
'*** FATAL ERROR in Generator.logMsg: unknown level:' + level)
def enumToValue(self, elem, needsNum):
"""Parse and convert an `<enum>` tag into a value.
Returns a list:
- first element - integer representation of the value, or None
if needsNum is False. The value must be a legal number
if needsNum is True.
- second element - string representation of the value
There are several possible representations of values.
- A 'value' attribute simply contains the value.
- A 'bitpos' attribute defines a value by specifying the bit
position which is set in that value.
- An 'offset','extbase','extends' triplet specifies a value
as an offset to a base value defined by the specified
'extbase' extension name, which is then cast to the
typename specified by 'extends'. This requires probing
the registry database, and imbeds knowledge of the
API extension enum scheme in this function.
- An 'alias' attribute contains the name of another enum
which this is an alias of. The other enum must be
declared first when emitting this enum."""
name = elem.get('name')
numVal = None
if 'value' in elem.keys():
value = elem.get('value')
# print('About to translate value =', value, 'type =', type(value))
if needsNum:
numVal = int(value, 0)
# If there's a non-integer, numeric 'type' attribute (e.g. 'u' or
# 'ull'), append it to the string value.
# t = enuminfo.elem.get('type')
# if t is not None and t != '' and t != 'i' and t != 's':
# value += enuminfo.type
self.logMsg('diag', 'Enum', name, '-> value [', numVal, ',', value, ']')
return [numVal, value]
if 'bitpos' in elem.keys():
value = elem.get('bitpos')
bitpos = int(value, 0)
numVal = 1 << bitpos
value = '0x%08x' % numVal
if not self.genOpts.conventions.valid_flag_bit(bitpos):
msg='Enum {} uses bit position {}, which may result in undefined behavior or unexpected enumerant scalar data type'
self.logMsg('warn', msg.format(name, bitpos))
if bitpos >= 32:
value = value + 'ULL'
self.logMsg('diag', 'Enum', name, '-> bitpos [', numVal, ',', value, ']')
return [numVal, value]
if 'offset' in elem.keys():
# Obtain values in the mapping from the attributes
enumNegative = False
offset = int(elem.get('offset'), 0)
extnumber = int(elem.get('extnumber'), 0)
extends = elem.get('extends')
if 'dir' in elem.keys():
enumNegative = True
self.logMsg('diag', 'Enum', name, 'offset =', offset,
'extnumber =', extnumber, 'extends =', extends,
'enumNegative =', enumNegative)
# Now determine the actual enumerant value, as defined
# in the "Layers and Extensions" appendix of the spec.
numVal = self.extBase + (extnumber - 1) * self.extBlockSize + offset
if enumNegative:
numVal *= -1
value = '%d' % numVal
# More logic needed!
self.logMsg('diag', 'Enum', name, '-> offset [', numVal, ',', value, ']')
return [numVal, value]
if 'alias' in elem.keys():
return [None, elem.get('alias')]
return [None, None]
def checkDuplicateEnums(self, enums):
"""Sanity check enumerated values.
- enums - list of `<enum>` Elements
returns the list with duplicates stripped"""
# Dictionaries indexed by name and numeric value.
# Entries are [ Element, numVal, strVal ] matching name or value
nameMap = {}
valueMap = {}
stripped = []
for elem in enums:
name = elem.get('name')
(numVal, strVal) = self.enumToValue(elem, True)
if name in nameMap:
# Duplicate name found; check values
(name2, numVal2, strVal2) = nameMap[name]
# Duplicate enum values for the same name are benign. This
# happens when defining the same enum conditionally in
# several extension blocks.
if (strVal2 == strVal or (numVal is not None
and numVal == numVal2)):
True
# self.logMsg('info', 'checkDuplicateEnums: Duplicate enum (' + name +
# ') found with the same value:' + strVal)
else:
self.logMsg('warn', 'checkDuplicateEnums: Duplicate enum (' + name
+ ') found with different values:' + strVal
+ ' and ' + strVal2)
# Don't add the duplicate to the returned list
continue
elif numVal in valueMap:
# Duplicate value found (such as an alias); report it, but
# still add this enum to the list.
(name2, numVal2, strVal2) = valueMap[numVal]
msg = 'Two enums found with the same value: {} = {} = {}'.format(
name, name2.get('name'), strVal)
self.logMsg('error', msg)
# Track this enum to detect followon duplicates
nameMap[name] = [elem, numVal, strVal]
if numVal is not None:
valueMap[numVal] = [elem, numVal, strVal]
# Add this enum to the list
stripped.append(elem)
# Return the list
return stripped
def buildEnumCDecl(self, expand, groupinfo, groupName):
"""Generate the C declaration for an enum"""
groupElem = groupinfo.elem
if self.genOpts.conventions.constFlagBits and groupElem.get('type') == 'bitmask':
return self.buildEnumCDecl_Bitmask(groupinfo, groupName)
else:
return self.buildEnumCDecl_Enum(expand, groupinfo, groupName)
def buildEnumCDecl_Bitmask(self, groupinfo, groupName):
"""Generate the C declaration for an "enum" that is actually a
set of flag bits"""
groupElem = groupinfo.elem
flagTypeName = groupinfo.flagType.elem.get('name')
# Prefix
body = "// Flag bits for " + flagTypeName + "\n"
# Loop over the nested 'enum' tags.
for elem in groupElem.findall('enum'):
# Convert the value to an integer and use that to track min/max.
# Values of form -(number) are accepted but nothing more complex.
# Should catch exceptions here for more complex constructs. Not yet.
(_, strVal) = self.enumToValue(elem, True)
name = elem.get('name')
body += "static const {} {} = {};\n".format(flagTypeName, name, strVal)
# Postfix
return ("bitmask", body)
def buildEnumCDecl_Enum(self, expand, groupinfo, groupName):
"""Generate the C declaration for an enumerated type"""
groupElem = groupinfo.elem
# Break the group name into prefix and suffix portions for range
# enum generation
expandName = re.sub(r'([0-9a-z_])([A-Z0-9])', r'\1_\2', groupName).upper()
expandPrefix = expandName
expandSuffix = ''
expandSuffixMatch = re.search(r'[A-Z][A-Z]+$', groupName)
if expandSuffixMatch:
expandSuffix = '_' + expandSuffixMatch.group()
# Strip off the suffix from the prefix
expandPrefix = expandName.rsplit(expandSuffix, 1)[0]
# Prefix
body = ["typedef enum %s {" % groupName]
# @@ Should use the type="bitmask" attribute instead
isEnum = ('FLAG_BITS' not in expandPrefix)
# Get a list of nested 'enum' tags.
enums = groupElem.findall('enum')
# Check for and report duplicates, and return a list with them
# removed.
enums = self.checkDuplicateEnums(enums)
# Loop over the nested 'enum' tags. Keep track of the minimum and
# maximum numeric values, if they can be determined; but only for
# core API enumerants, not extension enumerants. This is inferred
# by looking for 'extends' attributes.
minName = None
# Accumulate non-numeric enumerant values separately and append
# them following the numeric values, to allow for aliases.
# NOTE: this doesn't do a topological sort yet, so aliases of
# aliases can still get in the wrong order.
aliasText = []
for elem in enums:
# Convert the value to an integer and use that to track min/max.
# Values of form -(number) are accepted but nothing more complex.
# Should catch exceptions here for more complex constructs. Not yet.
(numVal, strVal) = self.enumToValue(elem, True)
name = elem.get('name')
# Extension enumerants are only included if they are required
if self.isEnumRequired(elem):
decl = " {} = {},".format(name, strVal)
if numVal is not None:
body.append(decl)
else:
aliasText.append(decl)
# Don't track min/max for non-numbers (numVal is None)
if isEnum and numVal is not None and elem.get('extends') is None:
if minName is None:
minName = maxName = name
minValue = maxValue = numVal
elif numVal < minValue:
minName = name
minValue = numVal
elif numVal > maxValue:
maxName = name
maxValue = numVal
# Now append the non-numeric enumerant values
body.extend(aliasText)
# Generate min/max value tokens - legacy use case.
if isEnum and expand:
body.extend((" {}_BEGIN_RANGE{} = {},".format(expandPrefix, expandSuffix, minName),
" {}_END_RANGE{} = {},".format(
expandPrefix, expandSuffix, maxName),
" {}_RANGE_SIZE{} = ({} - {} + 1),".format(expandPrefix, expandSuffix, maxName, minName)))
# Generate a range-padding value to ensure the enum is 32 bits, but
# only in code generators, so it doesn't appear in documentation
if (self.genOpts.codeGenerator or
self.conventions.generate_max_enum_in_docs):
body.append(" {}_MAX_ENUM{} = 0x7FFFFFFF".format(
expandPrefix, expandSuffix))
# Postfix
body.append("} %s;" % groupName)
# Determine appropriate section for this declaration
if groupElem.get('type') == 'bitmask':
section = 'bitmask'
else:
section = 'group'
return (section, '\n'.join(body))
def makeDir(self, path):
"""Create a directory, if not already done.
Generally called from derived generators creating hierarchies."""
self.logMsg('diag', 'OutputGenerator::makeDir(' + path + ')')
if path not in self.madeDirs:
# This can get race conditions with multiple writers, see
# https://stackoverflow.com/questions/273192/
if not os.path.exists(path):
os.makedirs(path)
self.madeDirs[path] = None
def beginFile(self, genOpts):
"""Start a new interface file
- genOpts - GeneratorOptions controlling what's generated and how"""
self.genOpts = genOpts
self.should_insert_may_alias_macro = \
self.genOpts.conventions.should_insert_may_alias_macro(self.genOpts)
self.conventions = genOpts.conventions
# Open a temporary file for accumulating output.
if self.genOpts.filename is not None:
self.outFile = tempfile.NamedTemporaryFile(mode='w', encoding='utf-8', newline='\n', delete=False)
else:
self.outFile = sys.stdout
def endFile(self):
if self.errFile:
self.errFile.flush()
if self.warnFile:
self.warnFile.flush()
if self.diagFile:
self.diagFile.flush()
self.outFile.flush()
if self.outFile != sys.stdout and self.outFile != sys.stderr:
self.outFile.close()
# On successfully generating output, move the temporary file to the
# target file.
if self.genOpts.filename is not None:
if sys.platform == 'win32':
directory = Path(self.genOpts.directory)
if not Path.exists(directory):
os.makedirs(directory)
shutil.move(self.outFile.name, self.genOpts.directory + '/' + self.genOpts.filename)
self.genOpts = None
def beginFeature(self, interface, emit):
"""Write interface for a feature and tag generated features as having been done.
- interface - element for the `<version>` / `<extension>` to generate
- emit - actually write to the header only when True"""
self.emit = emit
self.featureName = interface.get('name')
# If there's an additional 'protect' attribute in the feature, save it
self.featureExtraProtect = interface.get('protect')
def endFeature(self):
"""Finish an interface file, closing it when done.
Derived classes responsible for emitting feature"""
self.featureName = None
self.featureExtraProtect = None
def validateFeature(self, featureType, featureName):
"""Validate we're generating something only inside a `<feature>` tag"""
if self.featureName is None:
raise UserWarning('Attempt to generate', featureType,
featureName, 'when not in feature')
def genType(self, typeinfo, name, alias):
"""Generate interface for a type
- typeinfo - TypeInfo for a type
Extend to generate as desired in your derived class."""
self.validateFeature('type', name)
def genStruct(self, typeinfo, typeName, alias):
"""Generate interface for a C "struct" type.
- typeinfo - TypeInfo for a type interpreted as a struct
Extend to generate as desired in your derived class."""
self.validateFeature('struct', typeName)
# The mixed-mode <member> tags may contain no-op <comment> tags.
# It is convenient to remove them here where all output generators
# will benefit.
for member in typeinfo.elem.findall('.//member'):
for comment in member.findall('comment'):
member.remove(comment)
def genGroup(self, groupinfo, groupName, alias):
"""Generate interface for a group of enums (C "enum")
- groupinfo - GroupInfo for a group.
Extend to generate as desired in your derived class."""
self.validateFeature('group', groupName)
def genEnum(self, enuminfo, typeName, alias):
"""Generate interface for an enum (constant).
- enuminfo - EnumInfo for an enum
- name - enum name
Extend to generate as desired in your derived class."""
self.validateFeature('enum', typeName)
def genCmd(self, cmd, cmdinfo, alias):
"""Generate interface for a command.
- cmdinfo - CmdInfo for a command
Extend to generate as desired in your derived class."""
self.validateFeature('command', cmdinfo)
def makeProtoName(self, name, tail):
"""Turn a `<proto>` `<name>` into C-language prototype
and typedef declarations for that name.
- name - contents of `<name>` tag
- tail - whatever text follows that tag in the Element"""
return self.genOpts.apientry + name + tail
def makeTypedefName(self, name, tail):
"""Make the function-pointer typedef name for a command."""
return '(' + self.genOpts.apientryp + 'PFN_' + name + tail + ')'
def makeCParamDecl(self, param, aligncol):
"""Return a string which is an indented, formatted
declaration for a `<param>` or `<member>` block (e.g. function parameter
or structure/union member).
- param - Element (`<param>` or `<member>`) to format
- aligncol - if non-zero, attempt to align the nested `<name>` element
at this column"""
indent = ' '
paramdecl = indent + noneStr(param.text)
for elem in param:
text = noneStr(elem.text)
tail = noneStr(elem.tail)
if self.should_insert_may_alias_macro and self.genOpts.conventions.is_voidpointer_alias(elem.tag, text, tail):
# OpenXR-specific macro insertion - but not in apiinc for the spec
tail = self.genOpts.conventions.make_voidpointer_alias(tail)
if elem.tag == 'name' and aligncol > 0:
self.logMsg('diag', 'Aligning parameter', elem.text, 'to column', self.genOpts.alignFuncParam)
# Align at specified column, if possible
paramdecl = paramdecl.rstrip()
oldLen = len(paramdecl)
# This works around a problem where very long type names -
# longer than the alignment column - would run into the tail
# text.
paramdecl = paramdecl.ljust(aligncol - 1) + ' '
newLen = len(paramdecl)
self.logMsg('diag', 'Adjust length of parameter decl from', oldLen, 'to', newLen, ':', paramdecl)
paramdecl += text + tail
if aligncol == 0:
# Squeeze out multiple spaces other than the indentation
paramdecl = indent + ' '.join(paramdecl.split())
return paramdecl
def getCParamTypeLength(self, param):
"""Return the length of the type field is an indented, formatted
declaration for a `<param>` or `<member>` block (e.g. function parameter
or structure/union member).
- param - Element (`<param>` or `<member>`) to identify"""
# Allow for missing <name> tag
newLen = 0
paramdecl = ' ' + noneStr(param.text)
for elem in param:
text = noneStr(elem.text)
tail = noneStr(elem.tail)
if self.should_insert_may_alias_macro and self.genOpts.conventions.is_voidpointer_alias(elem.tag, text, tail):
# OpenXR-specific macro insertion
tail = self.genOpts.conventions.make_voidpointer_alias(tail)
if elem.tag == 'name':
# Align at specified column, if possible
newLen = len(paramdecl.rstrip())
self.logMsg('diag', 'Identifying length of', elem.text, 'as', newLen)
paramdecl += text + tail
return newLen
def getMaxCParamTypeLength(self, info):
"""Return the length of the longest type field for a member/parameter.
- info - TypeInfo or CommandInfo.
"""
lengths = (self.getCParamTypeLength(member)
for member in info.getMembers())
return max(lengths)
def getHandleParent(self, typename):
"""Get the parent of a handle object."""
info = self.registry.typedict.get(typename)
if info is None:
return None
elem = info.elem
if elem is not None:
return elem.get('parent')
return None
def iterateHandleAncestors(self, typename):
"""Iterate through the ancestors of a handle type."""
current = self.getHandleParent(typename)
while current is not None:
yield current
current = self.getHandleParent(current)
def getHandleAncestors(self, typename):
"""Get the ancestors of a handle object."""
return list(self.iterateHandleAncestors(typename))
def getTypeCategory(self, typename):
"""Get the category of a type."""
info = self.registry.typedict.get(typename)
if info is None:
return None
elem = info.elem
if elem is not None:
return elem.get('category')
return None
def isStructAlwaysValid(self, structname):
"""Try to do check if a structure is always considered valid (i.e. there's no rules to its acceptance)."""
# A conventions object is required for this call.
if not self.conventions:
raise RuntimeError("To use isStructAlwaysValid, be sure your options include a Conventions object.")
if self.conventions.type_always_valid(structname):
return True
category = self.getTypeCategory(structname)
if self.conventions.category_requires_validation(category):
return False
info = self.registry.typedict.get(structname)
assert(info is not None)
members = info.getMembers()
for member in members:
member_name = getElemName(member)
if member_name in (self.conventions.structtype_member_name,
self.conventions.nextpointer_member_name):
return False
if member.get('noautovalidity'):
return False
member_type = getElemType(member)
if member_type in ('void', 'char') or self.paramIsArray(member) or self.paramIsPointer(member):
return False
if self.conventions.type_always_valid(member_type):
continue
member_category = self.getTypeCategory(member_type)
if self.conventions.category_requires_validation(member_category):
return False
if member_category in ('struct', 'union'):
if self.isStructAlwaysValid(member_type) is False:
return False
return True
def isEnumRequired(self, elem):
"""Return True if this `<enum>` element is
required, False otherwise
- elem - `<enum>` element to test"""
required = elem.get('required') is not None
self.logMsg('diag', 'isEnumRequired:', elem.get('name'),
'->', required)
return required
# @@@ This code is overridden by equivalent code now run in
# @@@ Registry.generateFeature
required = False
extname = elem.get('extname')
if extname is not None:
# 'supported' attribute was injected when the <enum> element was
# moved into the <enums> group in Registry.parseTree()
if self.genOpts.defaultExtensions == elem.get('supported'):
required = True
elif re.match(self.genOpts.addExtensions, extname) is not None:
required = True
elif elem.get('version') is not None:
required = re.match(self.genOpts.emitversions, elem.get('version')) is not None
else:
required = True
return required
def makeCDecls(self, cmd):
"""Return C prototype and function pointer typedef for a
`<command>` Element, as a two-element list of strings.
- cmd - Element containing a `<command>` tag"""
proto = cmd.find('proto')
params = cmd.findall('param')
# Begin accumulating prototype and typedef strings
pdecl = self.genOpts.apicall
tdecl = 'typedef '
# Insert the function return type/name.
# For prototypes, add APIENTRY macro before the name
# For typedefs, add (APIENTRY *<name>) around the name and
# use the PFN_cmdnameproc naming convention.
# Done by walking the tree for <proto> element by element.
# etree has elem.text followed by (elem[i], elem[i].tail)
# for each child element and any following text
# Leading text
pdecl += noneStr(proto.text)
tdecl += noneStr(proto.text)
# For each child element, if it's a <name> wrap in appropriate
# declaration. Otherwise append its contents and tail contents.
for elem in proto:
text = noneStr(elem.text)
tail = noneStr(elem.tail)
if elem.tag == 'name':
pdecl += self.makeProtoName(text, tail)
tdecl += self.makeTypedefName(text, tail)
else:
pdecl += text + tail
tdecl += text + tail
if self.genOpts.alignFuncParam == 0:
# Squeeze out multiple spaces - there is no indentation
pdecl = ' '.join(pdecl.split())
tdecl = ' '.join(tdecl.split())
# Now add the parameter declaration list, which is identical
# for prototypes and typedefs. Concatenate all the text from
# a <param> node without the tags. No tree walking required
# since all tags are ignored.
# Uses: self.indentFuncProto
# self.indentFuncPointer
# self.alignFuncParam
n = len(params)
# Indented parameters
if n > 0:
indentdecl = '(\n'
indentdecl += ',\n'.join(self.makeCParamDecl(p, self.genOpts.alignFuncParam)
for p in params)
indentdecl += ');'
else:
indentdecl = '(void);'
# Non-indented parameters
paramdecl = '('
if n > 0:
paramnames = (''.join(t for t in p.itertext())
for p in params)
paramdecl += ', '.join(paramnames)
else:
paramdecl += 'void'
paramdecl += ");"
return [pdecl + indentdecl, tdecl + paramdecl]
def newline(self):
"""Print a newline to the output file (utility function)"""
write('', file=self.outFile)
def setRegistry(self, registry):
self.registry = registry
| bsd-3-clause |
tchernomax/ansible | lib/ansible/module_utils/known_hosts.py | 73 | 6919 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import hmac
import re
from ansible.module_utils.six.moves.urllib.parse import urlparse
try:
from hashlib import sha1
except ImportError:
import sha as sha1
HASHED_KEY_MAGIC = "|1|"
def is_ssh_url(url):
""" check if url is ssh """
if "@" in url and "://" not in url:
return True
for scheme in "ssh://", "git+ssh://", "ssh+git://":
if url.startswith(scheme):
return True
return False
def get_fqdn_and_port(repo_url):
""" chop the hostname and port out of a url """
fqdn = None
port = None
ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?')
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
match = ipv6_re.match(repo_url)
# For this type of URL, colon specifies the path, not the port
if match:
fqdn, path = match.groups()
elif ":" in repo_url:
fqdn = repo_url.split(":")[0]
elif "/" in repo_url:
fqdn = repo_url.split("/")[0]
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse(repo_url)
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
# ensure we actually have a parts[1] before continuing.
if parts[1] != '':
fqdn = parts[1]
if "@" in fqdn:
fqdn = fqdn.split("@", 1)[1]
match = ipv6_re.match(fqdn)
if match:
fqdn, port = match.groups()
elif ":" in fqdn:
fqdn, port = fqdn.split(":")[0:2]
return fqdn, port
def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
host_file_list.append("/etc/openssh/ssh_known_hosts")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
""" use ssh-keyscan to add the hostkey """
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
if 'USER' in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_ssh_dir = "~/.ssh/"
user_host_file = "~/.ssh/known_hosts"
user_ssh_dir = os.path.expanduser(user_ssh_dir)
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, int('700', 8))
except:
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
else:
module.fail_json(msg="%s does not exist" % user_ssh_dir)
elif not os.path.isdir(user_ssh_dir):
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
if port:
this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
else:
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
rc, out, err = module.run_command(this_cmd)
# ssh-keyscan gives a 0 exit code and prints nothing on timeout
if rc != 0 or not out:
msg = 'failed to retrieve hostkey'
if not out:
msg += '. "%s" returned no matches.' % this_cmd
else:
msg += ' using command "%s". [stdout]: %s' % (this_cmd, out)
if err:
msg += ' [stderr]: %s' % err
module.fail_json(msg=msg)
module.append_to_file(user_host_file, out)
return rc, out, err
| gpl-3.0 |
mlee92/Programming | Econ/supply_demand_elasticity/demand_elasticity.py | 2 | 1413 | # Elasticity of demand is a measure of how strongly consumers respond to a change in the price of a good
# Formally, % change in demand / % change in price
# Problem: Graph the histogram of average-elasticity for a linear-demand good with random coefficients (a, b)
import random
import matplotlib.pyplot as plt
import numpy as np
SIM = 1000;
UNIT_RANGE = range(1, 50)
AVGS = list()
COEF = [0, 0]
def generate_coefficients():
global COEF
a = random.randint(1, 25)
b = random.randint(a*50, 25*50)
COEF = [a, b]
def price(unit):
return COEF[1] - COEF[0]*unit
def graph_price():
x = np.linspace(1,50,50)
y = price(x)
plt.plot(x, y)
plt.show()
def elasticity(d1, d2):
cPrice = price(d2) - price(d1)
cDemand = d2 - d1
pPrice = cPrice / price(d1)
pDemand = cDemand / d1
return abs(pDemand / pPrice)
def simulate():
global AVGS, COEF, UNIT_RANGE
generate_coefficients()
elast_list = list()
for i in UNIT_RANGE:
for j in UNIT_RANGE:
if(i != j):
elast_list.append(elasticity(i, j))
mu = np.mean(elast_list)
print(COEF, mu)
AVGS.append(mu)
def init():
for i in range(0, SIM):
simulate()
init()
print(SIM)
plt.hist(AVGS)
plt.show()
| gpl-2.0 |
rajiteh/taiga-back | taiga/base/storage.py | 17 | 1413 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.core.files import storage
import django_sites as sites
class FileSystemStorage(storage.FileSystemStorage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if settings.MEDIA_URL.startswith("/"):
site = sites.get_current()
url_tmpl = "{scheme}//{domain}{url}"
scheme = site.scheme and "{0}:".format(site.scheme) or ""
self.base_url = url_tmpl.format(scheme=scheme, domain=site.domain,
url=settings.MEDIA_URL)
| agpl-3.0 |
jasonwee/asus-rt-n14uhp-mrtg | tmp/ve_asus-rt-n14uhp-mrtg/lib/python3.4/site-packages/django/contrib/gis/feeds.py | 336 | 5978 | from __future__ import unicode_literals
from django.contrib.syndication.views import Feed as BaseFeed
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin(object):
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, this will return
a unicode GeoRSS representation.
"""
return ' '.join('%f %f' % (coord[1], coord[0]) for coord in coords)
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more popular
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement('geo:lat', '%f' % lat)
handler.addQuickElement('geo:lon', '%f' % lon)
else:
handler.addQuickElement('georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""
This routine adds a GeoRSS XML element using the given item and handler.
"""
# Getting the Geometry object.
geom = item.get('geometry')
if geom is not None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if box_coords is not None:
if w3c_geo:
raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement('georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo:
raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement('georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement('georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
# ### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(GeoRSSFeed, self).rss_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoRSSFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoRSSFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super(GeoAtom1Feed, self).root_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoAtom1Feed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoAtom1Feed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(W3CGeoFeed, self).rss_attributes()
attrs['xmlns:geo'] = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super(W3CGeoFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super(W3CGeoFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
# ### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry': self.__get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry': self.__get_dynamic_attr('item_geometry', item)}
| apache-2.0 |
tdsimao/tt | django/db/models/fields/__init__.py | 33 | 43560 | import datetime
import decimal
import re
import time
import math
from itertools import tee
import django.utils.copycompat as copy
from django.db import connection
from django.db.models.fields.subclassing import LegacyConnection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils import datetime_safe
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
__metaclass__ = LegacyConnection
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if self.empty_strings_allowed and connection.features.interprets_empty_strings_as_nulls:
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = unique_for_date, unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(self.error_messages['invalid_choice'] % value)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors from to_python
and validate are propagated. The correct value is returned if no error is
raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is the
# same as the TextField Django field type, which means the custom field's
# get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def unique(self):
return self._unique or self.primary_key
unique = property(unique)
def set_attributes_from_name(self, name):
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and name:
self.verbose_name = name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name, curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"Perform preliminary non-db specific value checks and conversions."
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"Returns field's value prepared for saving into a database."
return self.get_db_prep_value(value, connection=connection, prepared=False)
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day', 'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection, prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection, prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"Returns a boolean of whether this field has a default value."
return self.default is not NOT_PROVIDED
def get_default(self):
"Returns the default value for this field."
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if not self.empty_strings_allowed or (self.null and not connection.features.interprets_empty_strings_as_nulls):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"Returns flattened choices with a default blank choice included."
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"Returns a django.forms.Field instance for this database Field."
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = self.blank or not (self.has_default() or 'initial' in kwargs)
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be an integer.'),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, "%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def validate(self, value, model_instance):
pass
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, "A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be either True or False.'),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = self.null or not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
ansi_date_re = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$')
class DateField(Field):
description = _("Date (without time)")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid date in YYYY-MM-DD format.'),
'invalid_date': _('Invalid date: %s'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
#HACKs : auto_now_add/auto_now should be done as a default or a pre_save.
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
if not ansi_date_re.search(value):
raise exceptions.ValidationError(self.error_messages['invalid'])
# Now that we have the date string in YYYY-MM-DD format, check to make
# sure it's a valid date.
# We could use time.strptime here and catch errors, but datetime.date
# produces much friendlier error messages.
year, month, day = map(int, value.split('-'))
try:
return datetime.date(year, month, day)
except ValueError, e:
msg = self.error_messages['invalid_date'] % _(str(e))
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = datetime_safe.new_date(val).strftime("%Y-%m-%d")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
default_error_messages = {
'invalid': _(u'Enter a valid date/time in YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.'),
}
description = _("Date (with time)")
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5],
**kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
d = datetime_safe.new_datetime(val)
data = d.strftime('%Y-%m-%d %H:%M:%S')
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be a decimal number.'),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(self.error_messages['invalid'])
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed twice
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if (lookup_type == 'gte' or lookup_type == 'lt') \
and isinstance(value, float):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
description = _("Time")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid time in HH:MM[:ss[.uuuuuu]] format.'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.time(*time.strptime(value, '%H:%M:%S')[3:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.time(*time.strptime(value, '%H:%M')[3:5],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = val.strftime("%H:%M:%S")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=False, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists))
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed twice
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class XMLField(TextField):
description = _("XML text")
def __init__(self, verbose_name=None, name=None, schema_path=None, **kwargs):
import warnings
warnings.warn("Use of XMLField has been deprecated; please use TextField instead.",
DeprecationWarning)
self.schema_path = schema_path
Field.__init__(self, verbose_name, name, **kwargs)
| gpl-2.0 |
Panos512/inspire-next | inspirehep/modules/records/receivers.py | 1 | 12132 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Pre-record receivers."""
from flask import current_app
from invenio_indexer.signals import before_record_index
from invenio_records.signals import (
before_record_insert,
before_record_update,
)
from inspirehep.utils.date import create_valid_date
from inspirehep.dojson.utils import get_recid_from_ref, classify_field
from inspirehep.dojson.utils import get_recid_from_ref
from inspirehep.utils.date import create_valid_date
from invenio_indexer.signals import before_record_index
import six
from .signals import after_record_enhanced
@before_record_index.connect
def enhance_record(sender, json, *args, **kwargs):
"""Runs all the record enhancers and fires the after_record_enhanced signals
to allow receivers work with a fully populated record."""
populate_inspire_subjects(sender, json, *args, **kwargs)
populate_inspire_document_type(sender, json, *args, **kwargs)
match_valid_experiments(sender, json, *args, **kwargs)
dates_validator(sender, json, *args, **kwargs)
add_recids_and_validate(sender, json, *args, **kwargs)
after_record_enhanced.send(json)
@before_record_insert.connect
@before_record_update.connect
def normalize_field_categories(sender, *args, **kwargs):
"""Normalize field_categories."""
for idx, field in enumerate(sender.get('field_categories', [])):
if field.get('scheme') == "INSPIRE" or '_scheme' in field or '_term' in field:
# Already normalized form
continue
original_term = field.get('term')
normalized_term = classify_field(original_term)
scheme = 'INSPIRE' if normalized_term else None
original_scheme = field.get('scheme')
if isinstance(original_scheme, (list, tuple)):
original_scheme = original_scheme[0]
updated_field = {
'_scheme': original_scheme,
'scheme': scheme,
'_term': original_term,
'term': normalized_term,
}
source = field.get('source')
if source:
if 'automatically' in source:
source = 'INSPIRE'
updated_field['source'] = source
sender['field_categories'][idx].update(updated_field)
def populate_inspire_subjects(recid, json, *args, **kwargs):
"""
Populate a json record before indexing it to elastic.
Adds a field for faceting INSPIRE subjects
"""
inspire_subjects = [
s['term'] for s in json.get('field_categories', [])
if s.get('scheme', '') == 'INSPIRE' and s.get('term')
]
json['facet_inspire_subjects'] = inspire_subjects
def populate_inspire_document_type(recid, json, *args, **kwargs):
""" Populates a json record before indexing it to elastic.
Adds a field for faceting INSPIRE document type
"""
inspire_doc_type = []
if 'collections' in json:
for element in json.get('collections', []):
if 'primary' in element and element.get('primary', ''):
if element['primary'].lower() == 'published':
inspire_doc_type.append('peer reviewed')
break
elif element['primary'].lower() == 'thesis':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'book':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'bookchapter':
inspire_doc_type.append('book chapter')
break
elif element['primary'].lower() == 'proceedings':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'conferencepaper':
inspire_doc_type.append('conference paper')
break
elif element['primary'].lower() == 'note':
inspire_doc_type.append('note')
break
elif element['primary'].lower() == 'report':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'activityreport':
inspire_doc_type.append('activity report')
break
complete_pub_info = []
if not inspire_doc_type:
for field in json.get('publication_info', []):
for k, v in field.iteritems():
complete_pub_info.append(k)
if ('page_start' not in complete_pub_info and
'artid' not in 'complete_pub_info'):
inspire_doc_type.append('preprint')
inspire_doc_type.extend([s['primary'].lower() for s in
json.get('collections', []) if 'primary'
in s and s['primary'] is not None and
s['primary'].lower() in
('review', 'lectures')])
json['facet_inspire_doc_type'] = inspire_doc_type
def match_valid_experiments(recid, json, *args, **kwargs):
"""Matches misspelled experiment names with valid experiments.
Tries to match with valid experiments by matching lowercased and
whitespace-free versions of known experiments.
"""
experiments = json.get("accelerator_experiments")
if experiments:
for exp in experiments:
# FIXME: These lists are temporary. We should have a list of experiment names
# that is generated from the current state of our data.
from .experiment_list import EXPERIMENTS_NAMES as experiments_list_original, experiments_list
facet_experiments_list = []
experiments = exp.get("experiment")
if experiments:
if type(experiments) is not list:
experiments = [experiments]
for experiment in experiments:
experiment = experiment.lower()
experiment = experiment.replace(' ', '')
try:
# Check if normalized form of experiment is in the list of
# valid experiments
x = experiments_list.index(experiment)
facet_experiment = experiments_list_original[x]
except ValueError:
# If the experiment cannot be matched it is considered
# valid
facet_experiment = exp.get("experiment")
facet_experiments_list.append(facet_experiment)
exp.update({"facet_experiment": [facet_experiments_list]})
def dates_validator(recid, json, *args, **kwargs):
"""Find and assign the correct dates in a record."""
dates_to_check = ['opening_date', 'closing_date', 'deadline_date']
for date_key in dates_to_check:
if date_key in json:
valid_date = create_valid_date(json[date_key])
if valid_date != json[date_key]:
current_app.logger.warning(
'MALFORMED: {0} value in {1}: {3}'.format(
date_key, recid, json[date_key]
)
)
json[date_key] = valid_date
def references_validator(recid, json, *args, **kwargs):
"""Find and assign the correct references in a record."""
for ref in json.get('references', []):
if ref.get('recid') and not six.text_type(ref.get('recid')).isdigit():
# Bad recid! Remove.
current_app.logger.warning(
'MALFORMED: recid value found in references of {0}: {1}'.format(recid, ref.get('recid')))
del ref['recid']
def populate_recid_from_ref(recid, json, *args, **kwargs):
"""Extracts recids from all reference fields and adds them to ES.
For every field that has as a value a reference object to another record,
add a sibling after extracting the record id. e.g.
{"record": {"$ref": "http://x/y/2}}
is transformed to:
{"record": {"$ref": "http://x/y/2},
"recid": 2}
Siblings are renamed using the following scheme:
Remove "record" occurrences and append _recid without doubling or
prepending underscores to the original name.
For every known list of object references add a new list with the
corresponding recids. e.g.
{"records": [{"$ref": "http://x/y/1"}, {"$ref": "http://x/y/2"}]}
is transformed to:
{"records": [{"$ref": "http://x/y/1"}, {"$ref": "http://x/y/2"}]
"recids": [1, 2]}
"""
list_ref_fields_translations = {
'deleted_records': 'deleted_recids'
}
def _recusive_find_refs(json_root):
if isinstance(json_root, list):
items = enumerate(json_root)
elif isinstance(json_root, dict):
# Note that items have to be generated before altering the dict.
# In this case, iteritems might break during iteration.
items = json_root.items()
else:
items = []
for key, value in items:
if (isinstance(json_root, dict) and isinstance(value, dict) and
'$ref' in value):
# Append '_recid' and remove 'record' from the key name.
key_basename = key.replace('record', '').rstrip('_')
new_key = '{}_recid'.format(key_basename).lstrip('_')
json_root[new_key] = get_recid_from_ref(value)
elif (isinstance(json_root, dict) and isinstance(value, list) and
key in list_ref_fields_translations):
new_list = [get_recid_from_ref(v) for v in value]
new_key = list_ref_fields_translations[key]
json_root[new_key] = new_list
else:
_recusive_find_refs(value)
_recusive_find_refs(json)
def add_recids_and_validate(recid, json, *args, **kwargs):
"""Ensure that recids are generated before being validated."""
populate_recid_from_ref(recid, json, *args, **kwargs)
references_validator(recid, json, *args, **kwargs)
@before_record_insert.connect
@before_record_update.connect
def normalize_field_categories(sender, *args, **kwargs):
"""Normalize field_categories."""
for idx, field in enumerate(sender.get('field_categories', [])):
if field.get('scheme') == "INSPIRE" or '_scheme' in field or '_term' in field:
# Already normalized form
continue
original_term = field.get('term')
normalized_term = classify_field(original_term)
scheme = 'INSPIRE' if normalized_term else None
original_scheme = field.get('scheme')
if isinstance(original_scheme, (list, tuple)):
original_scheme = original_scheme[0]
updated_field = {
'_scheme': original_scheme,
'scheme': scheme,
'_term': original_term,
'term': normalized_term,
}
source = field.get('source')
if source:
if 'automatically' in source:
source = 'INSPIRE'
updated_field['source'] = source
sender['field_categories'][idx].update(updated_field)
| gpl-2.0 |
vitan/hue | desktop/core/src/desktop/lib/conf_test.py | 38 | 7692 | #!/usr/bin/python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configobj
from cStringIO import StringIO
import logging
import re
from desktop.lib.conf import *
from nose.tools import assert_true, assert_false, assert_equals, assert_raises
def my_dynamic_default():
"""
Calculates a sum
"""
return 3 + 4
class TestConfig(object):
"""Unit tests for the configuration module."""
# Some test configurations to load
CONF_ONE="""
foo = 123
list=a,b,c
"""
CONF_TWO = """
req = 345
hello.greeting = "hello world"
[clusters]
[[clustera]]
host="localhost"
[[clusterb]]
host="philipscomputer"
"""
@classmethod
def setup_class(cls):
logging.basicConfig(level=logging.DEBUG)
cls.conf = ConfigSection(
members=dict(
FOO = Config("foo",
help="A vanilla configuration param",
type=int),
BAR = Config("bar", default=456,
help="Config with default",
type=int),
REQ = Config("req", required=True,
help="A required config",
type=int),
OPT_NOT_THERE = Config("blahblah"),
REQ_NOT_THERE = Config("blah", required=True, help="Another required"),
PRIVATE_CONFIG= Config("dontseeme",private=True),
DYNAMIC_DEF = Config("dynamic_default", dynamic_default=my_dynamic_default,
type=int),
SOME_SECTION = ConfigSection(
"some_section",
private=True,
members=dict(BAZ = Config("baz", default="baz_default"))),
LIST = Config("list", type=list),
CLUSTERS = UnspecifiedConfigSection(
"clusters",
help="Details about your Hadoop cluster(s)",
each=ConfigSection(
help="Details about a cluster - one section for each.",
members=dict(HOST = Config("host", help="Hostname for the NN",
required=True),
PORT = Config("port", help="Thrift port for the NN",
type=int, default=10090))))))
cls.conf = cls.conf.bind(
load_confs([configobj.ConfigObj(infile=StringIO(cls.CONF_ONE)),
configobj.ConfigObj(infile=StringIO(cls.CONF_TWO))]),
prefix='')
def test_type_safety(self):
assert_raises(ValueError, Config, key="test_type", type=42)
assert_raises(ValueError, Config, key="test_type", type=str, default=42)
assert_raises(ValueError, Config, key="test_type", default=False)
bool_conf = Config("bool_conf", type=bool)
assert_true(bool_conf.type == coerce_bool)
def test_dynamic_default(self):
assert_equals(7, self.conf.DYNAMIC_DEF.get())
def test_load(self):
assert_equals(123, self.conf.FOO.get())
assert_equals(456, self.conf.BAR.get())
assert_equals(345, self.conf.REQ.get())
assert_equals(None, self.conf.OPT_NOT_THERE.get())
assert_raises(KeyError, self.conf.REQ_NOT_THERE.get)
def test_list_values(self):
assert_equals(["a","b","c"], self.conf.LIST.get())
def test_sections(self):
assert_equals(2, len(self.conf.CLUSTERS))
assert_equals(['clustera', 'clusterb'], sorted(self.conf.CLUSTERS.keys()))
assert_true("clustera" in self.conf.CLUSTERS)
assert_equals("localhost", self.conf.CLUSTERS['clustera'].HOST.get())
assert_equals(10090, self.conf.CLUSTERS['clustera'].PORT.get())
def test_full_key_name(self):
assert_equals(self.conf.REQ.get_fully_qualifying_key(), 'req')
assert_equals(self.conf.CLUSTERS.get_fully_qualifying_key(), 'clusters')
assert_equals(self.conf.CLUSTERS['clustera'].get_fully_qualifying_key(),
'clusters.clustera')
assert_equals(self.conf.CLUSTERS['clustera'].HOST.get_fully_qualifying_key(),
'clusters.clustera.host')
def test_set_for_testing(self):
# Test base case
assert_equals(123, self.conf.FOO.get())
# Override with 456
close_foo = self.conf.FOO.set_for_testing(456)
try:
assert_equals(456, self.conf.FOO.get())
# Check nested overriding
close_foo2 = self.conf.FOO.set_for_testing(789)
try:
assert_equals(789, self.conf.FOO.get())
finally:
close_foo2()
# Check that we pop the stack appropriately.
assert_equals(456, self.conf.FOO.get())
# Check default values
close_foo3 = self.conf.FOO.set_for_testing(present=False)
try:
assert_equals(None, self.conf.FOO.get())
finally:
close_foo3()
finally:
close_foo()
# Check that it got set back correctly
assert_equals(123, self.conf.FOO.get())
# Test something inside an unspecified config setting with a default
close = self.conf.CLUSTERS['clustera'].PORT.set_for_testing(123)
try:
assert_equals(123, self.conf.CLUSTERS['clustera'].PORT.get())
finally:
close()
assert_equals(10090, self.conf.CLUSTERS['clustera'].PORT.get())
# Test something inside a config section that wasn't provided in conf file
assert_equals("baz_default", self.conf.SOME_SECTION.BAZ.get())
close = self.conf.SOME_SECTION.BAZ.set_for_testing("hello")
try:
assert_equals("hello", self.conf.SOME_SECTION.BAZ.get())
finally:
close()
assert_equals("baz_default", self.conf.SOME_SECTION.BAZ.get())
def test_coerce_bool(self):
assert_equals(False, coerce_bool(False))
assert_equals(False, coerce_bool("FaLsE"))
assert_equals(False, coerce_bool("no"))
assert_equals(False, coerce_bool("0"))
assert_equals(True, coerce_bool("TrUe"))
assert_equals(True, coerce_bool("YES"))
assert_equals(True, coerce_bool("1"))
assert_equals(True, coerce_bool(True))
assert_raises(Exception, coerce_bool, tuple("foo"))
def test_print_help(self):
out = StringIO()
self.conf.print_help(out=out, skip_header=True)
out = out.getvalue().strip()
assert_false("dontseeme" in out)
assert_equals(re.sub("^ (?m)", "", """
Key: bar (optional)
Default: 456
Config with default
Key: blah (required)
Another required
Key: blahblah (optional)
[no help text provided]
[clusters]
Details about your Hadoop cluster(s)
Consists of some number of sections like:
[<user specified name>]
Details about a cluster - one section for each.
Key: host (required)
Hostname for the NN
Key: port (optional)
Default: 10090
Thrift port for the NN
Key: dynamic_default (optional)
Dynamic default: Calculates a sum
[no help text provided]
Key: foo (optional)
A vanilla configuration param
Key: list (optional)
[no help text provided]
Key: req (required)
A required config
""").strip(), out)
| apache-2.0 |
zoufishanmehdi/Uncharted | node_modules/node-gyp/gyp/pylib/gyp/generator/xcode.py | 426 | 56534 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import gyp.xcode_ninja
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'ios_app_extension',
'ios_watch_app',
'ios_watchkit_extension',
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
# Optionally configure each spec to use ninja as the external builder.
ninja_wrapper = params.get('flavor') == 'ninja'
if ninja_wrapper:
(target_list, target_dicts, data) = \
gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
'executable+extension+bundle': 'com.apple.product-type.app-extension',
'executable+watch+extension+bundle':
'com.apple.product-type.watchkit-extension',
'executable+watch+bundle': 'com.apple.product-type.application.watchapp',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
is_app_extension = int(spec.get('ios_app_extension', 0))
is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
is_watch_app = int(spec.get('ios_watch_app', 0))
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_app_extension:
assert is_bundle, ('ios_app_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+extension+bundle'
elif is_watchkit_extension:
assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+extension+bundle'
elif is_watch_app:
assert is_bundle, ('ios_watch_app flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+bundle'
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
# The Xcode "issues" don't affect xcode-ninja builds, since the dependency
# logic all happens in ninja. Don't bother creating the extra targets in
# that case.
if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_suffix = generator_flags.get(
'support_target_suffix', ' Support')
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + support_target_suffix,
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
code_sign = int(copy_group.get('xcode_code_sign', 0))
settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign];
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file, settings)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| mit |
matmutant/sl4a | python-build/python-libs/xmpppy/xmpp/commands.py | 200 | 16116 | ## $Id: commands.py,v 1.17 2007/08/28 09:54:15 normanr Exp $
## Ad-Hoc Command manager
## Mike Albon (c) 5th January 2005
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
"""This module is a ad-hoc command processor for xmpppy. It uses the plug-in mechanism like most of the core library. It depends on a DISCO browser manager.
There are 3 classes here, a command processor Commands like the Browser, and a command template plugin Command, and an example command.
To use this module:
Instansiate the module with the parent transport and disco browser manager as parameters.
'Plug in' commands using the command template.
The command feature must be added to existing disco replies where neccessary.
What it supplies:
Automatic command registration with the disco browser manager.
Automatic listing of commands in the public command list.
A means of handling requests, by redirection though the command manager.
"""
from protocol import *
from client import PlugIn
class Commands(PlugIn):
"""Commands is an ancestor of PlugIn and can be attached to any session.
The commands class provides a lookup and browse mechnism. It follows the same priciple of the Browser class, for Service Discovery to provide the list of commands, it adds the 'list' disco type to your existing disco handler function.
How it works:
The commands are added into the existing Browser on the correct nodes. When the command list is built the supplied discovery handler function needs to have a 'list' option in type. This then gets enumerated, all results returned as None are ignored.
The command executed is then called using it's Execute method. All session management is handled by the command itself.
"""
def __init__(self, browser):
"""Initialises class and sets up local variables"""
PlugIn.__init__(self)
DBG_LINE='commands'
self._exported_methods=[]
self._handlers={'':{}}
self._browser = browser
def plugin(self, owner):
"""Makes handlers within the session"""
# Plug into the session and the disco manager
# We only need get and set, results are not needed by a service provider, only a service user.
owner.RegisterHandler('iq',self._CommandHandler,typ='set',ns=NS_COMMANDS)
owner.RegisterHandler('iq',self._CommandHandler,typ='get',ns=NS_COMMANDS)
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid='')
def plugout(self):
"""Removes handlers from the session"""
# unPlug from the session and the disco manager
self._owner.UnregisterHandler('iq',self._CommandHandler,ns=NS_COMMANDS)
for jid in self._handlers:
self._browser.delDiscoHandler(self._DiscoHandler,node=NS_COMMANDS)
def _CommandHandler(self,conn,request):
"""The internal method to process the routing of command execution requests"""
# This is the command handler itself.
# We must:
# Pass on command execution to command handler
# (Do we need to keep session details here, or can that be done in the command?)
jid = str(request.getTo())
try:
node = request.getTagAttr('command','node')
except:
conn.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
if self._handlers.has_key(jid):
if self._handlers[jid].has_key(node):
self._handlers[jid][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif self._handlers[''].has_key(node):
self._handlers[''][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
def _DiscoHandler(self,conn,request,typ):
"""The internal method to process service discovery requests"""
# This is the disco manager handler.
if typ == 'items':
# We must:
# Generate a list of commands and return the list
# * This handler does not handle individual commands disco requests.
# Pseudo:
# Enumerate the 'item' disco of each command for the specified jid
# Build responce and send
# To make this code easy to write we add an 'list' disco type, it returns a tuple or 'none' if not advertised
list = []
items = []
jid = str(request.getTo())
# Get specific jid based results
if self._handlers.has_key(jid):
for each in self._handlers[jid].keys():
items.append((jid,each))
else:
# Get generic results
for each in self._handlers[''].keys():
items.append(('',each))
if items != []:
for each in items:
i = self._handlers[each[0]][each[1]]['disco'](conn,request,'list')
if i != None:
list.append(Node(tag='item',attrs={'jid':i[0],'node':i[1],'name':i[2]}))
iq = request.buildReply('result')
if request.getQuerynode(): iq.setQuerynode(request.getQuerynode())
iq.setQueryPayload(list)
conn.send(iq)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif typ == 'info':
return {'ids':[{'category':'automation','type':'command-list'}],'features':[]}
def addCommand(self,name,cmddisco,cmdexecute,jid=''):
"""The method to call if adding a new command to the session, the requred parameters of cmddisco and cmdexecute are the methods to enable that command to be executed"""
# This command takes a command object and the name of the command for registration
# We must:
# Add item into disco
# Add item into command list
if not self._handlers.has_key(jid):
self._handlers[jid]={}
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid=jid)
if self._handlers[jid].has_key(name):
raise NameError,'Command Exists'
else:
self._handlers[jid][name]={'disco':cmddisco,'execute':cmdexecute}
# Need to add disco stuff here
self._browser.setDiscoHandler(cmddisco,node=name,jid=jid)
def delCommand(self,name,jid=''):
"""Removed command from the session"""
# This command takes a command object and the name used for registration
# We must:
# Remove item from disco
# Remove item from command list
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
if not self._handlers[jid].has_key(name):
raise NameError, 'Command not found'
else:
#Do disco removal here
command = self.getCommand(name,jid)['disco']
del self._handlers[jid][name]
self._browser.delDiscoHandler(command,node=name,jid=jid)
def getCommand(self,name,jid=''):
"""Returns the command tuple"""
# This gets the command object with name
# We must:
# Return item that matches this name
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
elif not self._handlers[jid].has_key(name):
raise NameError,'Command not found'
else:
return self._handlers[jid][name]
class Command_Handler_Prototype(PlugIn):
"""This is a prototype command handler, as each command uses a disco method
and execute method you can implement it any way you like, however this is
my first attempt at making a generic handler that you can hang process
stages on too. There is an example command below.
The parameters are as follows:
name : the name of the command within the jabber environment
description : the natural language description
discofeatures : the features supported by the command
initial : the initial command in the from of {'execute':commandname}
All stages set the 'actions' dictionary for each session to represent the possible options available.
"""
name = 'examplecommand'
count = 0
description = 'an example command'
discofeatures = [NS_COMMANDS,NS_DATA]
# This is the command template
def __init__(self,jid=''):
"""Set up the class"""
PlugIn.__init__(self)
DBG_LINE='command'
self.sessioncount = 0
self.sessions = {}
# Disco information for command list pre-formatted as a tuple
self.discoinfo = {'ids':[{'category':'automation','type':'command-node','name':self.description}],'features': self.discofeatures}
self._jid = jid
def plugin(self,owner):
"""Plug command into the commands class"""
# The owner in this instance is the Command Processor
self._commands = owner
self._owner = owner._owner
self._commands.addCommand(self.name,self._DiscoHandler,self.Execute,jid=self._jid)
def plugout(self):
"""Remove command from the commands class"""
self._commands.delCommand(self.name,self._jid)
def getSessionID(self):
"""Returns an id for the command session"""
self.count = self.count+1
return 'cmd-%s-%d'%(self.name,self.count)
def Execute(self,conn,request):
"""The method that handles all the commands, and routes them to the correct method for that stage."""
# New request or old?
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
try:
action = request.getTagAttr('command','action')
except:
action = None
if action == None: action = 'execute'
# Check session is in session list
if self.sessions.has_key(session):
if self.sessions[session]['jid']==request.getFrom():
# Check action is vaild
if self.sessions[session]['actions'].has_key(action):
# Execute next action
self.sessions[session]['actions'][action](conn,request)
else:
# Stage not presented as an option
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# Jid and session don't match. Go away imposter
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
elif session != None:
# Not on this sessionid you won't.
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# New session
self.initial[action](conn,request)
def _DiscoHandler(self,conn,request,type):
"""The handler for discovery events"""
if type == 'list':
return (request.getTo(),self.name,self.description)
elif type == 'items':
return []
elif type == 'info':
return self.discoinfo
class TestCommand(Command_Handler_Prototype):
""" Example class. You should read source if you wish to understate how it works.
Generally, it presents a "master" that giudes user through to calculate something.
"""
name = 'testcommand'
description = 'a noddy example command'
def __init__(self,jid=''):
""" Init internal constants. """
Command_Handler_Prototype.__init__(self,jid)
self.initial = {'execute':self.cmdFirstStage}
def cmdFirstStage(self,conn,request):
""" Determine """
# This is the only place this should be repeated as all other stages should have SessionIDs
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
if session == None:
session = self.getSessionID()
self.sessions[session]={'jid':request.getFrom(),'actions':{'cancel':self.cmdCancel,'next':self.cmdSecondStage,'execute':self.cmdSecondStage},'data':{'type':None}}
# As this is the first stage we only send a form
reply = request.buildReply('result')
form = DataForm(title='Select type of operation',data=['Use the combobox to select the type of calculation you would like to do, then click Next',DataField(name='calctype',desc='Calculation Type',value=self.sessions[session]['data']['type'],options=[['circlediameter','Calculate the Diameter of a circle'],['circlearea','Calculate the area of a circle']],typ='list-single',required=1)])
replypayload = [Node('actions',attrs={'execute':'next'},payload=[Node('next')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':session,'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdSecondStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
self.sessions[request.getTagAttr('command','sessionid')]['data']['type']=form.getField('calctype').getValue()
self.sessions[request.getTagAttr('command','sessionid')]['actions']={'cancel':self.cmdCancel,None:self.cmdThirdStage,'previous':self.cmdFirstStage,'execute':self.cmdThirdStage,'next':self.cmdThirdStage}
# The form generation is split out to another method as it may be called by cmdThirdStage
self.cmdSecondStageReply(conn,request)
def cmdSecondStageReply(self,conn,request):
reply = request.buildReply('result')
form = DataForm(title = 'Enter the radius', data=['Enter the radius of the circle (numbers only)',DataField(desc='Radius',name='radius',typ='text-single')])
replypayload = [Node('actions',attrs={'execute':'complete'},payload=[Node('complete'),Node('prev')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdThirdStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
try:
num = float(form.getField('radius').getValue())
except:
self.cmdSecondStageReply(conn,request)
from math import pi
if self.sessions[request.getTagAttr('command','sessionid')]['data']['type'] == 'circlearea':
result = (num**2)*pi
else:
result = num*2*pi
reply = request.buildReply('result')
form = DataForm(typ='result',data=[DataField(desc='result',name='result',value=result)])
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'completed'},payload=[form])
self._owner.send(reply)
raise NodeProcessed
def cmdCancel(self,conn,request):
reply = request.buildReply('result')
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'cancelled'})
self._owner.send(reply)
del self.sessions[request.getTagAttr('command','sessionid')]
| apache-2.0 |
woodpecker1/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py | 119 | 18940 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import time
from webkitpy.layout_tests.controllers import test_result_writer
from webkitpy.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_results import TestResult
_log = logging.getLogger(__name__)
def run_single_test(port, options, results_directory, worker_name, driver, test_input, stop_when_done):
runner = SingleTestRunner(port, options, results_directory, worker_name, driver, test_input, stop_when_done)
return runner.run()
class SingleTestRunner(object):
(ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')
def __init__(self, port, options, results_directory, worker_name, driver, test_input, stop_when_done):
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._results_directory = results_directory
self._driver = driver
self._timeout = test_input.timeout
self._worker_name = worker_name
self._test_name = test_input.test_name
self._should_run_pixel_test = test_input.should_run_pixel_test
self._reference_files = test_input.reference_files
self._stop_when_done = stop_when_done
if self._reference_files:
# Detect and report a test which has a wrong combination of expectation files.
# For example, if 'foo.html' has two expectation files, 'foo-expected.html' and
# 'foo-expected.txt', we should warn users. One test file must be used exclusively
# in either layout tests or reftests, but not in both.
for suffix in ('.txt', '.png', '.wav'):
expected_filename = self._port.expected_filename(self._test_name, suffix)
if self._filesystem.exists(expected_filename):
_log.error('%s is a reftest, but has an unused expectation file. Please remove %s.',
self._test_name, expected_filename)
def _expected_driver_output(self):
return DriverOutput(self._port.expected_text(self._test_name),
self._port.expected_image(self._test_name),
self._port.expected_checksum(self._test_name),
self._port.expected_audio(self._test_name))
def _should_fetch_expected_checksum(self):
return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results)
def _driver_input(self):
# The image hash is used to avoid doing an image dump if the
# checksums match, so it should be set to a blank value if we
# are generating a new baseline. (Otherwise, an image from a
# previous run will be copied into the baseline."""
image_hash = None
if self._should_fetch_expected_checksum():
image_hash = self._port.expected_checksum(self._test_name)
return DriverInput(self._test_name, self._timeout, image_hash, self._should_run_pixel_test)
def run(self):
if self._reference_files:
if self._port.get_option('no_ref_tests') or self._options.reset_results:
reftest_type = set([reference_file[0] for reference_file in self._reference_files])
result = TestResult(self._test_name, reftest_type=reftest_type)
result.type = test_expectations.SKIP
return result
return self._run_reftest()
if self._options.reset_results:
return self._run_rebaseline()
return self._run_compare_test()
def _run_compare_test(self):
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
expected_driver_output = self._expected_driver_output()
if self._options.ignore_metrics:
expected_driver_output.strip_metrics()
driver_output.strip_metrics()
test_result = self._compare_output(expected_driver_output, driver_output)
if self._options.new_test_results:
self._add_missing_baselines(test_result, driver_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
return test_result
def _run_rebaseline(self):
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
failures = self._handle_error(driver_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
# FIXME: It the test crashed or timed out, it might be better to avoid
# to write new baselines.
self._overwrite_baselines(driver_output)
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
_render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
def _add_missing_baselines(self, test_result, driver_output):
missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt'))
if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
self._save_baseline_data(driver_output.audio, '.wav', self._location_for_new_baseline(driver_output.audio, '.wav'))
if missingImage:
self._save_baseline_data(driver_output.image, '.png', self._location_for_new_baseline(driver_output.image, '.png'))
def _location_for_new_baseline(self, data, extension):
if self._options.add_platform_exceptions:
return self.VERSION_DIR
if extension == '.png':
return self.PLATFORM_DIR
if extension == '.wav':
return self.ALONGSIDE_TEST
if extension == '.txt' and self._render_tree_dump_pattern.match(data):
return self.PLATFORM_DIR
return self.ALONGSIDE_TEST
def _overwrite_baselines(self, driver_output):
location = self.VERSION_DIR if self._options.add_platform_exceptions else self.UPDATE
self._save_baseline_data(driver_output.text, '.txt', location)
self._save_baseline_data(driver_output.audio, '.wav', location)
if self._should_run_pixel_test:
self._save_baseline_data(driver_output.image, '.png', location)
def _save_baseline_data(self, data, extension, location):
if data is None:
return
port = self._port
fs = self._filesystem
if location == self.ALONGSIDE_TEST:
output_dir = fs.dirname(port.abspath_for_test(self._test_name))
elif location == self.VERSION_DIR:
output_dir = fs.join(port.baseline_version_dir(), fs.dirname(self._test_name))
elif location == self.PLATFORM_DIR:
output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(self._test_name))
elif location == self.UPDATE:
output_dir = fs.dirname(port.expected_filename(self._test_name, extension))
else:
raise AssertionError('unrecognized baseline location: %s' % location)
fs.maybe_make_directory(output_dir)
output_basename = fs.basename(fs.splitext(self._test_name)[0] + "-expected" + extension)
output_path = fs.join(output_dir, output_basename)
_log.info('Writing new expected result "%s"' % port.relative_test_filename(output_path))
port.update_baseline(output_path, data)
def _handle_error(self, driver_output, reference_filename=None):
"""Returns test failures if some unusual errors happen in driver's run.
Args:
driver_output: The output from the driver.
reference_filename: The full path to the reference file which produced the driver_output.
This arg is optional and should be used only in reftests until we have a better way to know
which html file is used for producing the driver_output.
"""
failures = []
fs = self._filesystem
if driver_output.timeout:
failures.append(test_failures.FailureTimeout(bool(reference_filename)))
if reference_filename:
testname = self._port.relative_test_filename(reference_filename)
else:
testname = self._test_name
if driver_output.crash:
failures.append(test_failures.FailureCrash(bool(reference_filename),
driver_output.crashed_process_name,
driver_output.crashed_pid))
if driver_output.error:
_log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
else:
_log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
elif driver_output.error:
_log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
for line in driver_output.error.splitlines():
_log.debug(" %s" % line)
return failures
def _compare_output(self, expected_driver_output, driver_output):
failures = []
failures.extend(self._handle_error(driver_output))
if driver_output.crash:
# Don't continue any more if we already have a crash.
# In case of timeouts, we continue since we still want to see the text and image output.
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
failures.extend(self._compare_text(expected_driver_output.text, driver_output.text))
failures.extend(self._compare_audio(expected_driver_output.audio, driver_output.audio))
if self._should_run_pixel_test:
failures.extend(self._compare_image(expected_driver_output, driver_output))
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
def _compare_text(self, expected_text, actual_text):
failures = []
if (expected_text and actual_text and
# Assuming expected_text is already normalized.
self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
failures.append(test_failures.FailureTextMismatch())
elif actual_text and not expected_text:
failures.append(test_failures.FailureMissingResult())
return failures
def _compare_audio(self, expected_audio, actual_audio):
failures = []
if (expected_audio and actual_audio and
self._port.do_audio_results_differ(expected_audio, actual_audio)):
failures.append(test_failures.FailureAudioMismatch())
elif actual_audio and not expected_audio:
failures.append(test_failures.FailureMissingAudio())
return failures
def _get_normalized_output_text(self, output):
"""Returns the normalized text output, i.e. the output in which
the end-of-line characters are normalized to "\n"."""
# Running tests on Windows produces "\r\n". The "\n" part is helpfully
# changed to "\r\n" by our system (Python/Cygwin), resulting in
# "\r\r\n", when, in fact, we wanted to compare the text output with
# the normalized text expectation files.
return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n")
# FIXME: This function also creates the image diff. Maybe that work should
# be handled elsewhere?
def _compare_image(self, expected_driver_output, driver_output):
failures = []
# If we didn't produce a hash file, this test must be text-only.
if driver_output.image_hash is None:
return failures
if not expected_driver_output.image:
failures.append(test_failures.FailureMissingImage())
elif not expected_driver_output.image_hash:
failures.append(test_failures.FailureMissingImageHash())
elif driver_output.image_hash != expected_driver_output.image_hash:
diff_result = self._port.diff_image(expected_driver_output.image, driver_output.image)
err_str = diff_result[2]
if err_str:
_log.warning(' %s : %s' % (self._test_name, err_str))
failures.append(test_failures.FailureImageHashMismatch())
driver_output.error = (driver_output.error or '') + err_str
else:
driver_output.image_diff = diff_result[0]
if driver_output.image_diff:
failures.append(test_failures.FailureImageHashMismatch(diff_result[1]))
else:
# See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
_log.warning(' %s -> pixel hash failed (but diff passed)' % self._test_name)
return failures
def _run_reftest(self):
test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
total_test_time = 0
reference_output = None
test_result = None
# A reftest can have multiple match references and multiple mismatch references;
# the test fails if any mismatch matches and all of the matches don't match.
# To minimize the number of references we have to check, we run all of the mismatches first,
# then the matches, and short-circuit out as soon as we can.
# Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.
putAllMismatchBeforeMatch = sorted
reference_test_names = []
for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
reference_test_name = self._port.relative_test_filename(reference_filename)
reference_test_names.append(reference_test_name)
reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done)
test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
break
total_test_time += test_result.test_run_time
assert(reference_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
reftest_type = set([reference_file[0] for reference_file in self._reference_files])
return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid, references=reference_test_names)
def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
failures = []
failures.extend(self._handle_error(actual_driver_output))
if failures:
# Don't continue any more if we already have crash or timeout.
return TestResult(self._test_name, failures, total_test_time, has_stderr)
failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
if failures:
return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
elif mismatch:
if reference_driver_output.image_hash == actual_driver_output.image_hash:
diff_result = self._port.diff_image(reference_driver_output.image, actual_driver_output.image, tolerance=0)
if not diff_result[0]:
failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
else:
_log.warning(" %s -> ref test hashes matched but diff failed" % self._test_name)
elif reference_driver_output.image_hash != actual_driver_output.image_hash:
diff_result = self._port.diff_image(reference_driver_output.image, actual_driver_output.image, tolerance=0)
if diff_result[0]:
failures.append(test_failures.FailureReftestMismatch(reference_filename))
else:
_log.warning(" %s -> ref test hashes didn't match but diff passed" % self._test_name)
return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
| bsd-3-clause |
Shekharrajak/Django-facebook | setup.py | 22 | 5672 | #!/usr/bin/env python
from distutils.util import convert_path
from django_facebook import __version__, __maintainer__, __email__
from fnmatch import fnmatchcase
import os
import sys
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
standard_exclude = ['*.py', '*.pyc', '*~', '.*', '*.bak']
standard_exclude_directories = [
'.*', 'CVS', '_darcs', './build', './docs',
'./dist', 'EGG-INFO', '*.egg-info', 'facebook_profiles'
]
def find_package_data(where='.', package='', exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True, show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
'Directory %s ignored by pattern %s'
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append(
(fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
'File %s ignored by pattern %s'
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
excluded_directories = standard_exclude_directories + ['./requirements',
'./scripts']
package_data = find_package_data(exclude_directories=excluded_directories)
license_text = open('LICENSE.txt').read()
long_description = open('README.rest').read()
if 'alpha' in __version__:
development_status = 'Development Status :: 3 - Alpha'
else:
development_status = 'Development Status :: 5 - Production/Stable'
CLASSIFIERS = [
development_status,
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Django',
'Environment :: Web Environment',
]
DESCRIPTION = """Facebook open graph API client in python. Enables django applications to register users using facebook.
Fixes issues with the official but unsupported Facebook python-sdk. Enables mobile facebook authentication.
Canvas page authentication for facebook applications. FQL access via the server side api.
"""
download_url = 'https://github.com/tschellenbach/Django-facebook/archive/v%s.tar.gz' % __version__
setup(
name='django-facebook',
version=__version__,
url='http://github.com/tschellenbach/Django-facebook',
author=__maintainer__,
author_email=__email__,
license=license_text,
packages=find_packages(),
package_data=package_data,
description=DESCRIPTION,
long_description=long_description,
classifiers=CLASSIFIERS,
install_requires=['unidecode'],
tests_require=[
'django',
'python-memcached',
'pil',
'mock',
'pytest',
'pytest-django'
],
test_suite='runtests.runtests',
zip_safe=False, # South can't run migrations on zipped eggs.
)
| bsd-3-clause |
Daniel-CA/odoo-addons | account_invoice_attachment_download/wizard/wiz_save_invoice_attachment.py | 4 | 1771 | # -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, _
from subprocess import Popen, PIPE
import base64
import os
class WizSaveInvoiceAttachment(models.TransientModel):
_name = 'wiz.save.invoice.attachment'
_description = 'Wizard for save attachtments in folder'
data = fields.Binary('File', readonly=True)
name = fields.Char('Filename', readonly=True)
@api.model
def default_get(self, var_fields):
attachment_obj = self.env['ir.attachment']
res = super(WizSaveInvoiceAttachment, self).default_get(var_fields)
process = Popen(['mktemp', '-d'], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
my_path = stdout[:-1]
for a in attachment_obj.browse(self.env.context.get('active_ids')):
file_path = "{}/{}".format(stdout[:-1], a.name)
result = a._data_get('datas', None)
b = result.get(a.id)
with open(file_path, "a+") as file:
file.write(base64.b64decode(b))
zip_name = "{}-{}".format(_('invoices_attachments'),
fields.Date.context_today(self))
params = "zip {}/{} {}/*".format(stdout[:-1], zip_name, stdout[:-1])
process = Popen([params], stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = process.communicate()
path = "{}/{}.zip".format(my_path, zip_name)
if os.path.exists(path):
f = open(path, 'rb')
file_data = f.read()
f.close()
res.update({'data': base64.b64encode(file_data),
'name': "{}.zip".format(zip_name)})
return res
| agpl-3.0 |
dharmabumstead/ansible | lib/ansible/utils/module_docs_fragments/ldap.py | 17 | 1334 | # -*- coding: utf-8 -*-
# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
# Copyright: (c) 2017-2018 Keller Fuchs (@kellerfuchs) <kellerfuchs@hashbang.sh>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard LDAP documentation fragment
DOCUMENTATION = '''
options:
bind_dn:
description:
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
- If this is blank, we'll use an anonymous bind.
bind_pw:
description:
- The password to use with I(bind_dn).
dn:
required: true
description:
- The DN of the entry to add or remove.
server_uri:
default: ldapi:///
description:
- A URI to the LDAP server.
- The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
start_tls:
default: 'no'
type: bool
description:
- If true, we'll use the START_TLS LDAP extension.
validate_certs:
default: 'yes'
type: bool
description:
- If set to C(no), SSL certificates will not be validated.
- This should only be used on sites using self-signed certificates.
version_added: "2.4"
'''
| gpl-3.0 |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/encodings/euc_jp.py | 816 | 1027 | #
# euc_jp.py: Python Unicode Codec for EUC_JP
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jp')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jp',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
wgrose/leanto | google/appengine/api/urlfetch_stub.py | 2 | 7959 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the urlfetch API, based on httplib."""
import httplib
import logging
import socket
import urllib
import urlparse
from google.appengine.api import apiproxy_stub
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from google.appengine.api import urlfetch_service_pb
from google.appengine.runtime import apiproxy_errors
MAX_RESPONSE_SIZE = 2 ** 24
MAX_REDIRECTS = urlfetch.MAX_REDIRECTS
REDIRECT_STATUSES = frozenset([
httplib.MOVED_PERMANENTLY,
httplib.FOUND,
httplib.SEE_OTHER,
httplib.TEMPORARY_REDIRECT,
])
PORTS_ALLOWED_IN_PRODUCTION = (
None, '80', '443', '4443', '8080', '8081', '8082', '8083', '8084', '8085',
'8086', '8087', '8088', '8089', '8188', '8444', '8990')
_API_CALL_DEADLINE = 5.0
_UNTRUSTED_REQUEST_HEADERS = frozenset([
'content-length',
'host',
'referer',
'user-agent',
'vary',
'via',
'x-forwarded-for',
])
class URLFetchServiceStub(apiproxy_stub.APIProxyStub):
"""Stub version of the urlfetch API to be used with apiproxy_stub_map."""
def __init__(self, service_name='urlfetch'):
"""Initializer.
Args:
service_name: Service name expected for all calls.
"""
super(URLFetchServiceStub, self).__init__(service_name)
def _Dynamic_Fetch(self, request, response):
"""Trivial implementation of URLFetchService::Fetch().
Args:
request: the fetch to perform, a URLFetchRequest
response: the fetch response, a URLFetchResponse
"""
(protocol, host, path, parameters, query, fragment) = urlparse.urlparse(request.url())
payload = ''
if request.method() == urlfetch_service_pb.URLFetchRequest.GET:
method = 'GET'
elif request.method() == urlfetch_service_pb.URLFetchRequest.POST:
method = 'POST'
payload = request.payload()
elif request.method() == urlfetch_service_pb.URLFetchRequest.HEAD:
method = 'HEAD'
elif request.method() == urlfetch_service_pb.URLFetchRequest.PUT:
method = 'PUT'
payload = request.payload()
elif request.method() == urlfetch_service_pb.URLFetchRequest.DELETE:
method = 'DELETE'
else:
logging.error('Invalid method: %s', request.method())
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.UNSPECIFIED_ERROR)
if not (protocol == 'http' or protocol == 'https'):
logging.error('Invalid protocol: %s', protocol)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.INVALID_URL)
sanitized_headers = self._SanitizeHttpHeaders(_UNTRUSTED_REQUEST_HEADERS,
request.header_list())
request.clear_header()
request.header_list().extend(sanitized_headers)
self._RetrieveURL(request.url(), payload, method,
request.header_list(), response,
follow_redirects=request.followredirects())
def _RetrieveURL(self, url, payload, method, headers, response,
follow_redirects=True):
"""Retrieves a URL.
Args:
url: String containing the URL to access.
payload: Request payload to send, if any.
method: HTTP method to use (e.g., 'GET')
headers: List of additional header objects to use for the request.
response: Response object
follow_redirects: optional setting (defaulting to True) for whether or not
we should transparently follow redirects (up to MAX_REDIRECTS)
Raises:
Raises an apiproxy_errors.ApplicationError exception with FETCH_ERROR
in cases where:
- MAX_REDIRECTS is exceeded
- The protocol of the redirected URL is bad or missing.
"""
last_protocol = ''
last_host = ''
for redirect_number in xrange(MAX_REDIRECTS + 1):
parsed = urlparse.urlparse(url)
protocol, host, path, parameters, query, fragment = parsed
port = urllib.splitport(urllib.splituser(host)[1])[1]
if port not in PORTS_ALLOWED_IN_PRODUCTION:
logging.warning(
'urlfetch received %s ; port %s is not allowed in production!' %
(url, port))
if host == '' and protocol == '':
host = last_host
protocol = last_protocol
adjusted_headers = {
'Content-Length': len(payload),
'Host': host,
'Accept': '*/*',
}
if method == 'POST' and payload:
adjusted_headers['Content-Type'] = 'application/x-www-form-urlencoded'
for header in headers:
adjusted_headers[header.key().title()] = header.value()
logging.debug('Making HTTP request: host = %s, '
'url = %s, payload = %s, headers = %s',
host, url, payload, adjusted_headers)
try:
if protocol == 'http':
connection = httplib.HTTPConnection(host)
elif protocol == 'https':
connection = httplib.HTTPSConnection(host)
else:
error_msg = 'Redirect specified invalid protocol: "%s"' % protocol
logging.error(error_msg)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, error_msg)
last_protocol = protocol
last_host = host
if query != '':
full_path = path + '?' + query
else:
full_path = path
orig_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(_API_CALL_DEADLINE)
connection.request(method, full_path, payload, adjusted_headers)
http_response = connection.getresponse()
http_response_data = http_response.read()
finally:
socket.setdefaulttimeout(orig_timeout)
connection.close()
except (httplib.error, socket.error, IOError), e:
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, str(e))
if http_response.status in REDIRECT_STATUSES and follow_redirects:
url = http_response.getheader('Location', None)
if url is None:
error_msg = 'Redirecting response was missing "Location" header'
logging.error(error_msg)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, error_msg)
else:
response.set_statuscode(http_response.status)
response.set_content(http_response_data[:MAX_RESPONSE_SIZE])
for header_key, header_value in http_response.getheaders():
header_proto = response.add_header()
header_proto.set_key(header_key)
header_proto.set_value(header_value)
if len(http_response_data) > MAX_RESPONSE_SIZE:
response.set_contentwastruncated(True)
break
else:
error_msg = 'Too many repeated redirects'
logging.error(error_msg)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, error_msg)
def _SanitizeHttpHeaders(self, untrusted_headers, headers):
"""Cleans "unsafe" headers from the HTTP request/response.
Args:
untrusted_headers: set of untrusted headers names
headers: list of string pairs, first is header name and the second is header's value
"""
return (h for h in headers if h.key().lower() not in untrusted_headers)
| apache-2.0 |
so-sure/tagged-route53 | tagged-route53.py | 1 | 10149 | #!/usr/bin/python
import requests
import boto3
import argparse
class Dns(object):
# Default constructor of the class.
def __init__(self):
self.ec2_client = boto3.client('ec2')
self.dns_client = boto3.client('route53')
self.role = None
self.env = None
self.instance_id = None
self.instances = None
self.indexes = None
self.instance_count = None
self.hostname = None
self.ip = None
self.use_public_ip = None
self.domain = None
self.set_tag_name = True
self.set_dns_registration = True
self.force_dns_registration = False
self.tag_env = None
self.tag_role = None
self.tag_index = None
self.name = None
self.update_dns = True
self.quiet = False
self.update_index = True
def current_instance(self):
response = requests.get('http://169.254.169.254/latest/meta-data/instance-id')
self.instance_id = response.text
if not self.quiet:
print 'Instance: %s' % (self.instance_id)
def current_public_ip(self):
response = self.ec2_client.describe_instances(InstanceIds=[self.instance_id])
instances = response['Reservations']
self.ip = instances[0]['Instances'][0]['PublicIpAddress']
if not self.quiet:
print 'IP: %s' % (self.ip)
def current_private_ip(self):
response = self.ec2_client.describe_instances(InstanceIds=[self.instance_id])
instances = response['Reservations']
self.ip = instances[0]['Instances'][0]['PrivateIpAddress']
if not self.quiet:
print 'IP: %s' % (self.ip)
def current_role_env(self):
if self.instance_id is None:
self.current_instance()
response = self.ec2_client.describe_instances(InstanceIds=[self.instance_id])
instances = response['Reservations']
# Only 1 instance
tags = instances[0]['Instances'][0]['Tags']
for tag in tags:
if self.env is None and tag['Key'] == self.tag_env:
self.env = tag['Value']
elif self.role is None and tag['Key'] == self.tag_role:
self.role = tag['Value']
if not self.quiet:
print 'Env: %s Role: %s' % (self.env, self.role)
def get_instance_ids(self):
if self.env is None or self.role is None:
self.current_role_env()
filters = [
{ 'Name':'tag:%s' % (self.tag_env), 'Values':[self.env]},
{ 'Name':'tag:%s' % (self.tag_role), 'Values':[self.role]}
]
response = self.ec2_client.describe_instances(Filters=filters)
instances = response['Reservations']
if not self.quiet:
print 'Checking tags'
self.instances = {}
self.indexes = []
for instance in instances:
index = -1
if instance['Instances'][0]['State']['Name'] == 'running':
instance_id = instance['Instances'][0]['InstanceId']
tags = instance['Instances'][0]['Tags']
for tag in tags:
if tag['Key'] == self.tag_index:
index = tag['Value']
self.indexes.append(index)
self.instances[instance_id] = int(index)
def get_instance_count(self):
if self.instances is None:
self.get_instance_ids()
# the current instance will be in the list, but as we want to start at 1, that's good
self.instance_count = len(self.instances)
if not self.quiet:
print 'Instance count: %d' % (self.instance_count)
if self.instances.has_key(self.instance_id) and self.instances[self.instance_id] >= 0:
self.instance_count = self.instances[self.instance_id]
if not self.quiet:
print 'Index is already set %s' % (self.instance_count)
self.update_dns = False
self.update_index = False
if self.instance_count < 1:
raise Exception('Instance count must be 1 or more')
if not self.quiet:
print self.indexes
if self.update_index:
# May be replacing a previous server
for i in range(1, self.instance_count + 2):
if str(i) not in self.indexes:
self.instance_count = i
break
if not self.quiet:
print 'Using index: %d' % (self.instance_count)
if self.update_index:
self.ec2_client.create_tags(
Resources=[self.instance_id],
Tags=[{'Key': self.tag_index, 'Value': str(self.instance_count) }]
)
if self.set_tag_name:
name = '%s-%s-%d' % (self.env, self.role, self.instance_count)
if not self.quiet:
print 'Setting instance name: %s' % (name)
self.ec2_client.create_tags(
Resources=[self.instance_id],
Tags=[{'Key': 'Name', 'Value': name }]
)
def get_hostname(self):
if self.instance_count is None:
self.get_instance_count()
if self.name is None:
self.hostname = '%s-%d.%s.%s' % (self.role, self.instance_count, self.env, self.domain)
else:
self.hostname = "%s.%s" % (self.name, self.domain)
if not self.quiet:
print 'Hostname: %s' % (self.hostname)
else:
print self.hostname
def run_update_all(self):
self.get_instance_ids()
if not self.quiet:
print self.instances
for instance_id in self.instances.keys():
if not self.quiet:
print 'Updating instance %s' % (instance_id)
self.instance_id = instance_id
self.run_update_dns()
self.indexes.append(str(self.instance_count))
self.hostname = None
self.ip = None
self.instance_count = None
self.update_dns = True
def run_update_dns(self):
if self.hostname is None:
self.get_hostname()
if not self.update_dns and not self.force_dns_registration:
if not self.quiet:
print 'Skipping dns update as server already exists'
return
if not self.set_dns_registration:
if not self.quiet:
print 'Skipping dns registration as per request'
return
if self.ip is None:
if self.use_public_ip:
self.current_public_ip()
else:
self.current_private_ip()
response = self.dns_client.list_hosted_zones_by_name(
DNSName=self.domain
)
zone_id = response['HostedZones'][0]['Id'].replace('/hostedzone/', '')
response = self.dns_client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.hostname,
'Type': 'A',
'TTL': 60,
'ResourceRecords': [
{
'Value': self.ip
},
]
}
},
]
}
)
if not self.quiet:
print response
def main(self):
parser = argparse.ArgumentParser(description='Update route 53 dns based on server tags')
parser.add_argument('domain', help='Domain name')
parser.add_argument('--skip-tag-name', action='store_true', default=False, help='Skip setting the tag name')
parser.add_argument('--skip-dns-registration', action='store_true', default=False, help='If set, only display the dns entry and do run any dns updates')
parser.add_argument('--force-dns-registration', action='store_true', default=False, help='If set, only display the dns entry and do run any dns updates')
parser.add_argument('--quiet', action='store_true', default=False, help='If set, only output the hostname')
parser.add_argument('--tag-role', default='role', help='Role tag name (default: %(default)s)')
parser.add_argument('--tag-env', default='env', help='Environment tag name (default: %(default)s)')
parser.add_argument('--tag-index', default='index', help='Index tag name (default: %(default)s)')
parser.add_argument('--public-ip', action='store_true', default=False, help='Use public ip instead of private ip')
parser.add_argument('--name', default=None, help='Ignore tags and just set name')
parser.add_argument('--role', default=None, help='Ignore tags and use given role')
parser.add_argument('--env', default=None, help='Ignore tags and use given env')
parser.add_argument('--instance-id', default=None, help='If given, use instance id given rather than local instance')
parser.add_argument('--all-tags', action='store_true', default=False, help='If given, run for all instances that match tags for role/env. Can be used with --role and/or --env.')
args = parser.parse_args()
self.domain = args.domain
self.set_tag_name = not args.skip_tag_name
self.set_dns_registration = not args.skip_dns_registration
self.force_dns_registration = args.force_dns_registration
self.quiet = args.quiet
self.tag_env = args.tag_env
self.tag_role = args.tag_role
self.role = args.role
self.env = args.env
self.tag_index = args.tag_index
self.name = args.name
self.use_public_ip = args.public_ip
self.instance_id = args.instance_id
if args.all_tags:
self.run_update_all()
else:
self.run_update_dns()
if __name__ == '__main__':
launcher = Dns()
launcher.main()
| apache-2.0 |
El-Nath/bidji-find5 | scripts/gcc-wrapper.py | 234 | 4095 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
"f_qdss.c:586",
"mipi_tc358764_dsi2lvds.c:746",
"dynamic_debug.h:75",
"hci_conn.c:407",
"f_qdss.c:740",
"mipi_novatek.c:569",
"swab.h:34",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
OpenBfS/dokpool-plone | Plone/src/docpool.config/docpool/config/local/elan_de.py | 1 | 8161 | # -*- coding: utf-8 -*-
from ..utils import set_local_roles
from datetime import datetime
from docpool.base.content.documentpool import APPLICATIONS_KEY
from docpool.config import _
from docpool.config.general.elan import connectTypesAndCategories
from docpool.config.local.base import CONTENT_AREA
from docpool.config.utils import CHILDREN
from docpool.config.utils import createPloneObjects
from docpool.config.utils import ID
from docpool.config.utils import TITLE
from docpool.config.utils import TYPE
from docpool.elan.config import ELAN_APP
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import log_exc
from zExceptions import BadRequest
from zope.annotation.interfaces import IAnnotations
import transaction
# ELAN specific structures
def dpAdded(self):
"""
"""
annotations = IAnnotations(self)
fresh = ELAN_APP not in annotations[APPLICATIONS_KEY]
if fresh:
annotations[APPLICATIONS_KEY].append(ELAN_APP)
copyCurrentSituation(self, fresh)
transaction.commit()
createBasicPortalStructure(self, fresh)
transaction.commit()
createContentConfig(self, fresh)
transaction.commit()
if fresh:
self.esd.correctAllDocTypes()
transaction.commit()
connectTypesAndCategories(self)
placeful_wf = getToolByName(self, 'portal_placeful_workflow')
try:
self.archive.manage_addProduct[
'CMFPlacefulWorkflow'
].manage_addWorkflowPolicyConfig()
except BadRequest as e:
# print type(e)
log_exc(e)
config = placeful_wf.getWorkflowPolicyConfig(self.archive)
placefulWfName = 'elan-archive'
config.setPolicyIn(policy=placefulWfName, update_security=False)
config.setPolicyBelow(policy=placefulWfName, update_security=False)
createELANUsers(self)
createELANGroups(self)
setELANLocalRoles(self)
self.reindexAll()
BASICSTRUCTURE = [
{TYPE: 'ELANArchives', TITLE: u'Archive', ID: 'archive', CHILDREN: [], 'local_behaviors': ['elan']}
]
ARCHIVESTRUCTURE = [
{
TYPE: 'ELANCurrentSituation',
TITLE: 'Elektronische Lagedarstellung',
ID: 'esd',
CHILDREN: [],
},
CONTENT_AREA,
]
SPECIAL_PAGES = [
{TYPE: 'Text', TITLE: u'Hilfe', ID: 'help', CHILDREN: []},
]
ADMINSTRUCTURE = [
{
TYPE: 'ELANContentConfig',
TITLE: 'Konfiguration Inhalte',
ID: 'contentconfig',
CHILDREN: [
{
TYPE: 'DPEvents',
TITLE: u'Ereignisse',
ID: 'scen',
CHILDREN: [
{
TYPE: 'DPEvent',
TITLE: u'Normalfall',
ID: 'routinemode',
"Status": "active",
"TimeOfEvent": datetime.now(),
CHILDREN: [],
}
],
},
{TYPE: 'Text', TITLE: u'Ticker', ID: 'ticker', CHILDREN: []},
{TYPE: 'Text', TITLE: u'Hilfe', ID: 'help', CHILDREN: []},
{
TYPE: 'DashboardsConfig',
TITLE: u'Dokumentsammlungen Pinnwand',
ID: 'dbconfig',
CHILDREN: [],
},
],
}
] + SPECIAL_PAGES
def createBasicPortalStructure(plonesite, fresh):
"""
"""
createPloneObjects(plonesite, BASICSTRUCTURE, fresh)
def createContentConfig(plonesite, fresh):
"""
"""
createPloneObjects(plonesite, ADMINSTRUCTURE, fresh)
def createELANUsers(self):
# Set type for user folders
mtool = getToolByName(self, "portal_membership")
prefix = self.prefix or self.getId()
prefix = str(prefix)
title = self.Title()
mtool.addMember(
'%s_elanadmin' % prefix, 'ELAN Administrator (%s)' % title, [
'Member'], []
)
elanadmin = mtool.getMemberById('%s_elanadmin' % prefix)
elanadmin.setMemberProperties(
{"fullname": 'ELAN Administrator (%s)' % title, "dp": self.UID()}
)
elanadmin.setSecurityProfile(password="admin")
mtool.addMember(
'%s_contentadmin' % prefix, 'Content Admin (%s)' % title, [
'Member'], []
)
contentadmin = mtool.getMemberById('%s_contentadmin' % prefix)
contentadmin.setMemberProperties(
{"fullname": 'Content Admin (%s)' % title, "dp": self.UID()}
)
contentadmin.setSecurityProfile(password="admin")
def setELANLocalRoles(self):
"""
Normal local members: Reader
Administrators: Site Administrator
ContentAdministrators: Reviewer
Receivers: Owner, Editor
Senders: Contributor
"""
contentadmin = "{0}_ContentAdministrators"
set_local_roles(self, self, "{0}_SituationReportAdmins", ["SituationReportAdmin"])
set_local_roles(self, self.contentconfig, contentadmin, ["ContentAdmin"])
for pagedef in SPECIAL_PAGES:
name = pagedef[ID]
set_local_roles(self, self[name], contentadmin, ["ContentAdmin"])
set_local_roles(self, self.archive, contentadmin, ["DocPoolAdmin"])
set_local_roles(self, self.content.Groups, contentadmin, ["Site Administrator"])
set_local_roles(self, self.esd, contentadmin, ["ContentAdmin"])
set_local_roles(self, self, "{0}_ELANUsers", ["ELANUser"])
set_local_roles(self, self.config, contentadmin, ["Owner"])
def createELANGroups(self):
# We need local groups for
# - General access to the ESD
# - Administration
# - Content Administration
# - Receiving content from others
# - Sending content to others
prefix = self.prefix or self.getId()
prefix = str(prefix)
title = self.Title()
gtool = getToolByName(self, 'portal_groups')
gtool.addPrincipalToGroup('%s_elanadmin' % prefix, '%s_Members' % prefix)
gtool.addPrincipalToGroup(
'%s_contentadmin' %
prefix,
'%s_Members' %
prefix)
gtool.addPrincipalToGroup(
'%s_elanadmin' %
prefix,
'%s_Administrators' %
prefix)
# Content administrator group
props = {
'allowedDocTypes': [],
'title': 'Content Administrators (%s)' % title,
'description': 'Responsible for the definition of scenarios, ticker texts and additional content.',
'dp': self.UID(),
}
gtool.addGroup("%s_ContentAdministrators" % prefix, properties=props)
gtool.addPrincipalToGroup(
'%s_contentadmin' % prefix, '%s_ContentAdministrators' % prefix
)
# Group for ELAN application rights
props = {
'allowedDocTypes': [],
'title': 'ELAN Users (%s)' % title,
'description': 'Users with access to ELAN functions.',
'dp': self.UID(),
}
gtool.addGroup("%s_ELANUsers" % prefix, properties=props)
gtool.addPrincipalToGroup('%s_elanadmin' % prefix, '%s_ELANUsers' % prefix)
gtool.addPrincipalToGroup(
'%s_contentadmin' %
prefix,
'%s_ELANUsers' %
prefix)
gtool.addPrincipalToGroup('%s_dpadmin' % prefix, '%s_ELANUsers' % prefix)
# Group for Situation Report users
props = {
'allowedDocTypes': [],
'title': 'Situation Report Admins/Lagebild (%s)' % title,
'description': 'Users who can manage situation reports.',
'dp': self.UID(),
}
gtool.addGroup("%s_SituationReportAdmins" % prefix, properties=props)
gtool.addPrincipalToGroup(
'%s_contentadmin' % prefix, '%s_SituationReportAdmins' % prefix)
gtool.addPrincipalToGroup(
'%s_dpadmin' % prefix, '%s_SituationReportAdmins' % prefix)
gtool.addPrincipalToGroup(
'%s_elanadmin' % prefix, '%s_SituationReportAdmins' % prefix)
def copyCurrentSituation(self, fresh):
"""
"""
if not fresh:
return
esd = self.esd
from docpool.base.utils import _copyPaste
_copyPaste(esd, self, safe=False)
self.esd.setTitle(_("Aktuelle Lage"))
self.esd.reindexObject()
# make sure the current situation is first
self.moveObject("esd", 0)
def dpRemoved(self):
"""
@param self:
@return:
"""
return
| gpl-3.0 |
RazorLove/cloaked-octo-spice | contrib/bitrpc/bitrpc.py | 2348 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit |
mycodeday/crm-platform | portal_gamification/__openerp__.py | 381 | 1571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Gamification',
'version': '1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds security rules for gamification to allow portal users to participate to challenges
===================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['gamification','portal'],
'data': [
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
| gpl-3.0 |
thresholdsoftware/asylum-v2.0 | openerp/addons/portal/acquirer.py | 22 | 5378 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from urllib import quote as quote
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools import float_repr
_logger = logging.getLogger(__name__)
try:
from mako.template import Template as MakoTemplate
except ImportError:
_logger.warning("payment_acquirer: mako templates not available, payment acquirer will not work!")
class acquirer(osv.Model):
_name = 'portal.payment.acquirer'
_description = 'Online Payment Acquirer'
_columns = {
'name': fields.char('Name', required=True),
'form_template': fields.text('Payment form template (HTML)', translate=True, required=True),
'visible': fields.boolean('Visible', help="Make this payment acquirer available in portal forms (Customer invoices, etc.)"),
}
_defaults = {
'visible': True,
}
def render(self, cr, uid, id, object, reference, currency, amount, context=None, **kwargs):
""" Renders the form template of the given acquirer as a mako template """
if not isinstance(id, (int,long)):
id = id[0]
this = self.browse(cr, uid, id)
if context is None:
context = {}
try:
i18n_kind = _(object._description) # may fail to translate, but at least we try
result = MakoTemplate(this.form_template).render_unicode(object=object,
reference=reference,
currency=currency,
amount=amount,
kind=i18n_kind,
quote=quote,
# context kw would clash with mako internals
ctx=context,
format_exceptions=True)
return result.strip()
except Exception:
_logger.exception("failed to render mako template value for payment.acquirer %s: %r", this.name, this.form_template)
return
def _wrap_payment_block(self, cr, uid, html_block, amount, currency, context=None):
if not html_block:
link = '#action=account.action_account_config'
payment_header = _('You can finish the configuration in the <a href="%s">Bank&Cash settings</a>') % link
amount = _('No online payment acquirers configured')
group_ids = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context).groups_id
if any(group.is_portal for group in group_ids):
return ''
else:
payment_header = _('Pay safely online')
amount_str = float_repr(amount, self.pool.get('decimal.precision').precision_get(cr, uid, 'Account'))
currency_str = currency.symbol or currency.name
amount = u"%s %s" % ((currency_str, amount_str) if currency.position == 'before' else (amount_str, currency_str))
result = """<div class="payment_acquirers">
<div class="payment_header">
<div class="payment_amount">%s</div>
%s
</div>
%%s
</div>""" % (amount, payment_header)
return result % html_block
def render_payment_block(self, cr, uid, object, reference, currency, amount, context=None, **kwargs):
""" Renders all visible payment acquirer forms for the given rendering context, and
return them wrapped in an appropriate HTML block, ready for direct inclusion
in an OpenERP v7 form view """
acquirer_ids = self.search(cr, uid, [('visible', '=', True)])
if not acquirer_ids:
return
html_forms = []
for this in self.browse(cr, uid, acquirer_ids):
content = this.render(object, reference, currency, amount, context=context, **kwargs)
if content:
html_forms.append(content)
html_block = '\n'.join(filter(None,html_forms))
return self._wrap_payment_block(cr, uid, html_block, amount, currency, context=context)
| agpl-3.0 |
mdietrichc2c/OCB | addons/account/wizard/account_period_close.py | 341 | 2646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_period_close(osv.osv_memory):
"""
close period
"""
_name = "account.period.close"
_description = "period close"
_columns = {
'sure': fields.boolean('Check this box'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close period
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account period close’s ID or list of IDs
"""
journal_period_pool = self.pool.get('account.journal.period')
period_pool = self.pool.get('account.period')
account_move_obj = self.pool.get('account.move')
mode = 'done'
for form in self.read(cr, uid, ids, context=context):
if form['sure']:
for id in context['active_ids']:
account_move_ids = account_move_obj.search(cr, uid, [('period_id', '=', id), ('state', '=', "draft")], context=context)
if account_move_ids:
raise osv.except_osv(_('Invalid Action!'), _('In order to close a period, you must first post related journal entries.'))
cr.execute('update account_journal_period set state=%s where period_id=%s', (mode, id))
cr.execute('update account_period set state=%s where id=%s', (mode, id))
self.invalidate_cache(cr, uid, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mayankcu/Django-social | venv/Lib/encodings/iso8859_2.py | 593 | 13660 | """ Python Character Mapping Codec iso8859_2 generated from 'MAPPINGS/ISO8859/8859-2.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u02d8' # 0xA2 -> BREVE
u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u013d' # 0xA5 -> LATIN CAPITAL LETTER L WITH CARON
u'\u015a' # 0xA6 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u0164' # 0xAB -> LATIN CAPITAL LETTER T WITH CARON
u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u02db' # 0xB2 -> OGONEK
u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\u013e' # 0xB5 -> LATIN SMALL LETTER L WITH CARON
u'\u015b' # 0xB6 -> LATIN SMALL LETTER S WITH ACUTE
u'\u02c7' # 0xB7 -> CARON
u'\xb8' # 0xB8 -> CEDILLA
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u0165' # 0xBB -> LATIN SMALL LETTER T WITH CARON
u'\u017a' # 0xBC -> LATIN SMALL LETTER Z WITH ACUTE
u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
hifly/OpenUpgrade | addons/hr_holidays/hr_holidays.py | 159 | 33482 | # -*- coding: utf-8 -*-
##################################################################################
#
# Copyright (c) 2005-2006 Axelor SARL. (http://www.axelor.com)
# and 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# $Id: hr.py 4656 2006-11-24 09:58:42Z Cyp $
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import math
import time
from operator import attrgetter
from openerp.exceptions import Warning
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_holidays_status(osv.osv):
_name = "hr.holidays.status"
_description = "Leave Type"
def get_days(self, cr, uid, ids, employee_id, context=None):
result = dict((id, dict(max_leaves=0, leaves_taken=0, remaining_leaves=0,
virtual_remaining_leaves=0)) for id in ids)
holiday_ids = self.pool['hr.holidays'].search(cr, uid, [('employee_id', '=', employee_id),
('state', 'in', ['confirm', 'validate1', 'validate']),
('holiday_status_id', 'in', ids)
], context=context)
for holiday in self.pool['hr.holidays'].browse(cr, uid, holiday_ids, context=context):
status_dict = result[holiday.holiday_status_id.id]
if holiday.type == 'add':
status_dict['virtual_remaining_leaves'] += holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['max_leaves'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] += holiday.number_of_days_temp
elif holiday.type == 'remove': # number of days is negative
status_dict['virtual_remaining_leaves'] -= holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['leaves_taken'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] -= holiday.number_of_days_temp
return result
def _user_left_days(self, cr, uid, ids, name, args, context=None):
employee_id = False
if context and 'employee_id' in context:
employee_id = context['employee_id']
else:
employee_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if employee_ids:
employee_id = employee_ids[0]
if employee_id:
res = self.get_days(cr, uid, ids, employee_id, context=context)
else:
res = dict((res_id, {'leaves_taken': 0, 'remaining_leaves': 0, 'max_leaves': 0}) for res_id in ids)
return res
_columns = {
'name': fields.char('Leave Type', size=64, required=True, translate=True),
'categ_id': fields.many2one('calendar.event.type', 'Meeting Type',
help='Once a leave is validated, Odoo will create a corresponding meeting of this type in the calendar.'),
'color_name': fields.selection([('red', 'Red'),('blue','Blue'), ('lightgreen', 'Light Green'), ('lightblue','Light Blue'), ('lightyellow', 'Light Yellow'), ('magenta', 'Magenta'),('lightcyan', 'Light Cyan'),('black', 'Black'),('lightpink', 'Light Pink'),('brown', 'Brown'),('violet', 'Violet'),('lightcoral', 'Light Coral'),('lightsalmon', 'Light Salmon'),('lavender', 'Lavender'),('wheat', 'Wheat'),('ivory', 'Ivory')],'Color in Report', required=True, help='This color will be used in the leaves summary located in Reporting\Leaves by Department.'),
'limit': fields.boolean('Allow to Override Limit', help='If you select this check box, the system allows the employees to take more leaves than the available ones for this type and will not take them into account for the "Remaining Legal Leaves" defined on the employee form.'),
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the leave type without removing it."),
'max_leaves': fields.function(_user_left_days, string='Maximum Allowed', help='This value is given by the sum of all holidays requests with a positive value.', multi='user_left_days'),
'leaves_taken': fields.function(_user_left_days, string='Leaves Already Taken', help='This value is given by the sum of all holidays requests with a negative value.', multi='user_left_days'),
'remaining_leaves': fields.function(_user_left_days, string='Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken', multi='user_left_days'),
'virtual_remaining_leaves': fields.function(_user_left_days, string='Virtual Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken - Leaves Waiting Approval', multi='user_left_days'),
'double_validation': fields.boolean('Apply Double Validation', help="When selected, the Allocation/Leave Requests for this type require a second validation to be approved."),
}
_defaults = {
'color_name': 'red',
'active': True,
}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not context.get('employee_id',False):
# leave counts is based on employee_id, would be inaccurate if not based on correct employee
return super(hr_holidays_status, self).name_get(cr, uid, ids, context=context)
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if not record.limit:
name = name + (' (%g/%g)' % (record.leaves_taken or 0.0, record.max_leaves or 0.0))
res.append((record.id, name))
return res
class hr_holidays(osv.osv):
_name = "hr.holidays"
_description = "Leave"
_order = "type desc, date_from asc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'hr_holidays.mt_holidays_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'validate',
'hr_holidays.mt_holidays_refused': lambda self, cr, uid, obj, ctx=None: obj.state == 'refuse',
'hr_holidays.mt_holidays_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirm',
},
}
def _employee_get(self, cr, uid, context=None):
emp_id = context.get('default_employee_id', False)
if emp_id:
return emp_id
ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
def _compute_number_of_days(self, cr, uid, ids, name, args, context=None):
result = {}
for hol in self.browse(cr, uid, ids, context=context):
if hol.type=='remove':
result[hol.id] = -hol.number_of_days_temp
else:
result[hol.id] = hol.number_of_days_temp
return result
def _get_can_reset(self, cr, uid, ids, name, arg, context=None):
"""User can reset a leave request if it is its own leave request or if
he is an Hr Manager. """
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
group_hr_manager_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager')[1]
if group_hr_manager_id in [g.id for g in user.groups_id]:
return dict.fromkeys(ids, True)
result = dict.fromkeys(ids, False)
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.employee_id and holiday.employee_id.user_id and holiday.employee_id.user_id.id == uid:
result[holiday.id] = True
return result
def _check_date(self, cr, uid, ids, context=None):
for holiday in self.browse(cr, uid, ids, context=context):
domain = [
('date_from', '<=', holiday.date_to),
('date_to', '>=', holiday.date_from),
('employee_id', '=', holiday.employee_id.id),
('id', '!=', holiday.id),
('state', 'not in', ['cancel', 'refuse']),
]
nholidays = self.search_count(cr, uid, domain, context=context)
if nholidays:
return False
return True
_check_holidays = lambda self, cr, uid, ids, context=None: self.check_holidays(cr, uid, ids, context=context)
_columns = {
'name': fields.char('Description', size=64),
'state': fields.selection([('draft', 'To Submit'), ('cancel', 'Cancelled'),('confirm', 'To Approve'), ('refuse', 'Refused'), ('validate1', 'Second Approval'), ('validate', 'Approved')],
'Status', readonly=True, track_visibility='onchange', copy=False,
help='The status is set to \'To Submit\', when a holiday request is created.\
\nThe status is \'To Approve\', when holiday request is confirmed by user.\
\nThe status is \'Refused\', when holiday request is refused by manager.\
\nThe status is \'Approved\', when holiday request is approved by manager.'),
'user_id':fields.related('employee_id', 'user_id', type='many2one', relation='res.users', string='User', store=True),
'date_from': fields.datetime('Start Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, select=True, copy=False),
'date_to': fields.datetime('End Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'holiday_status_id': fields.many2one("hr.holidays.status", "Leave Type", required=True,readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'employee_id': fields.many2one('hr.employee', "Employee", select=True, invisible=False, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'manager_id': fields.many2one('hr.employee', 'First Approval', invisible=False, readonly=True, copy=False,
help='This area is automatically filled by the user who validate the leave'),
'notes': fields.text('Reasons',readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'number_of_days_temp': fields.float('Allocation', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'number_of_days': fields.function(_compute_number_of_days, string='Number of Days', store=True),
'meeting_id': fields.many2one('calendar.event', 'Meeting'),
'type': fields.selection([('remove','Leave Request'),('add','Allocation Request')], 'Request Type', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help="Choose 'Leave Request' if someone wants to take an off-day. \nChoose 'Allocation Request' if you want to increase the number of leaves available for someone", select=True),
'parent_id': fields.many2one('hr.holidays', 'Parent'),
'linked_request_ids': fields.one2many('hr.holidays', 'parent_id', 'Linked Requests',),
'department_id':fields.related('employee_id', 'department_id', string='Department', type='many2one', relation='hr.department', readonly=True, store=True),
'category_id': fields.many2one('hr.employee.category', "Employee Tag", help='Category of Employee', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'holiday_type': fields.selection([('employee','By Employee'),('category','By Employee Tag')], 'Allocation Mode', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help='By Employee: Allocation/Request for individual Employee, By Employee Tag: Allocation/Request for group of employees in category', required=True),
'manager_id2': fields.many2one('hr.employee', 'Second Approval', readonly=True, copy=False,
help='This area is automaticly filled by the user who validate the leave with second level (If Leave type need second validation)'),
'double_validation': fields.related('holiday_status_id', 'double_validation', type='boolean', relation='hr.holidays.status', string='Apply Double Validation'),
'can_reset': fields.function(
_get_can_reset,
type='boolean'),
}
_defaults = {
'employee_id': _employee_get,
'state': 'confirm',
'type': 'remove',
'user_id': lambda obj, cr, uid, context: uid,
'holiday_type': 'employee'
}
_constraints = [
(_check_date, 'You can not have 2 leaves that overlaps on same day!', ['date_from','date_to']),
(_check_holidays, 'The number of remaining leaves is not sufficient for this leave type', ['state','number_of_days_temp'])
]
_sql_constraints = [
('type_value', "CHECK( (holiday_type='employee' AND employee_id IS NOT NULL) or (holiday_type='category' AND category_id IS NOT NULL))",
"The employee or employee category of this request is missing. Please make sure that your user login is linked to an employee."),
('date_check2', "CHECK ( (type='add') OR (date_from <= date_to))", "The start date must be anterior to the end date."),
('date_check', "CHECK ( number_of_days_temp >= 0 )", "The number of days must be greater than 0."),
]
def _create_resource_leave(self, cr, uid, leaves, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays validated '''
obj_res_leave = self.pool.get('resource.calendar.leaves')
for leave in leaves:
vals = {
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_id.calendar_id.id
}
obj_res_leave.create(cr, uid, vals, context=context)
return True
def _remove_resource_leave(self, cr, uid, ids, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays cancel/removed'''
obj_res_leave = self.pool.get('resource.calendar.leaves')
leave_ids = obj_res_leave.search(cr, uid, [('holiday_id', 'in', ids)], context=context)
return obj_res_leave.unlink(cr, uid, leave_ids, context=context)
def onchange_type(self, cr, uid, ids, holiday_type, employee_id=False, context=None):
result = {}
if holiday_type == 'employee' and not employee_id:
ids_employee = self.pool.get('hr.employee').search(cr, uid, [('user_id','=', uid)])
if ids_employee:
result['value'] = {
'employee_id': ids_employee[0]
}
elif holiday_type != 'employee':
result['value'] = {
'employee_id': False
}
return result
def onchange_employee(self, cr, uid, ids, employee_id):
result = {'value': {'department_id': False}}
if employee_id:
employee = self.pool.get('hr.employee').browse(cr, uid, employee_id)
result['value'] = {'department_id': employee.department_id.id}
return result
# TODO: can be improved using resource calendar method
def _get_number_of_days(self, date_from, date_to):
"""Returns a float equals to the timedelta between two dates given as string."""
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
from_dt = datetime.datetime.strptime(date_from, DATETIME_FORMAT)
to_dt = datetime.datetime.strptime(date_to, DATETIME_FORMAT)
timedelta = to_dt - from_dt
diff_day = timedelta.days + float(timedelta.seconds) / 86400
return diff_day
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel', 'confirm']:
raise osv.except_osv(_('Warning!'),_('You cannot delete a leave which is in %s state.')%(rec.state))
return super(hr_holidays, self).unlink(cr, uid, ids, context)
def onchange_date_from(self, cr, uid, ids, date_to, date_from):
"""
If there are no date set for date_to, automatically set one 8 hours later than
the date_from.
Also update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# No date_to set so far: automatically compute one 8 hours later
if date_from and not date_to:
date_to_with_delta = datetime.datetime.strptime(date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=8)
result['value']['date_to'] = str(date_to_with_delta)
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def onchange_date_to(self, cr, uid, ids, date_to, date_from):
"""
Update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def create(self, cr, uid, values, context=None):
""" Override to avoid automatic logging of creation """
if context is None:
context = {}
context = dict(context, mail_create_nolog=True)
if values.get('state') and values['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state'))
return super(hr_holidays, self).create(cr, uid, values, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('state') and vals['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % vals.get('state'))
return super(hr_holidays, self).write(cr, uid, ids, vals, context=context)
def holidays_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'draft',
'manager_id': False,
'manager_id2': False,
})
to_unlink = []
for record in self.browse(cr, uid, ids, context=context):
for record2 in record.linked_request_ids:
self.holidays_reset(cr, uid, [record2.id], context=context)
to_unlink.append(record2.id)
if to_unlink:
self.unlink(cr, uid, to_unlink, context=context)
return True
def holidays_first_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.holidays_first_validate_notificate(cr, uid, ids, context=context)
return self.write(cr, uid, ids, {'state':'validate1', 'manager_id': manager})
def holidays_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.write(cr, uid, ids, {'state':'validate'})
data_holiday = self.browse(cr, uid, ids)
for record in data_holiday:
if record.double_validation:
self.write(cr, uid, [record.id], {'manager_id2': manager})
else:
self.write(cr, uid, [record.id], {'manager_id': manager})
if record.holiday_type == 'employee' and record.type == 'remove':
meeting_obj = self.pool.get('calendar.event')
meeting_vals = {
'name': record.name or _('Leave Request'),
'categ_ids': record.holiday_status_id.categ_id and [(6,0,[record.holiday_status_id.categ_id.id])] or [],
'duration': record.number_of_days_temp * 8,
'description': record.notes,
'user_id': record.user_id.id,
'start': record.date_from,
'stop': record.date_to,
'allday': False,
'state': 'open', # to block that meeting date in the calendar
'class': 'confidential'
}
#Add the partner_id (if exist) as an attendee
if record.user_id and record.user_id.partner_id:
meeting_vals['partner_ids'] = [(4,record.user_id.partner_id.id)]
ctx_no_email = dict(context or {}, no_email=True)
meeting_id = meeting_obj.create(cr, uid, meeting_vals, context=ctx_no_email)
self._create_resource_leave(cr, uid, [record], context=context)
self.write(cr, uid, ids, {'meeting_id': meeting_id})
elif record.holiday_type == 'category':
emp_ids = obj_emp.search(cr, uid, [('category_ids', 'child_of', [record.category_id.id])])
leave_ids = []
for emp in obj_emp.browse(cr, uid, emp_ids):
vals = {
'name': record.name,
'type': record.type,
'holiday_type': 'employee',
'holiday_status_id': record.holiday_status_id.id,
'date_from': record.date_from,
'date_to': record.date_to,
'notes': record.notes,
'number_of_days_temp': record.number_of_days_temp,
'parent_id': record.id,
'employee_id': emp.id
}
leave_ids.append(self.create(cr, uid, vals, context=None))
for leave_id in leave_ids:
# TODO is it necessary to interleave the calls?
for sig in ('confirm', 'validate', 'second_validate'):
self.signal_workflow(cr, uid, [leave_id], sig)
return True
def holidays_confirm(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.employee_id and record.employee_id.parent_id and record.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [record.id], user_ids=[record.employee_id.parent_id.user_id.id], context=context)
return self.write(cr, uid, ids, {'state': 'confirm'})
def holidays_refuse(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.state == 'validate1':
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id': manager})
else:
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id2': manager})
self.holidays_cancel(cr, uid, ids, context=context)
return True
def holidays_cancel(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids):
# Delete the meeting
if record.meeting_id:
record.meeting_id.unlink()
# If a category that created several holidays, cancel all related
self.signal_workflow(cr, uid, map(attrgetter('id'), record.linked_request_ids or []), 'refuse')
self._remove_resource_leave(cr, uid, ids, context=context)
return True
def check_holidays(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.holiday_type != 'employee' or record.type != 'remove' or not record.employee_id or record.holiday_status_id.limit:
continue
leave_days = self.pool.get('hr.holidays.status').get_days(cr, uid, [record.holiday_status_id.id], record.employee_id.id, context=context)[record.holiday_status_id.id]
if leave_days['remaining_leaves'] < 0 or leave_days['virtual_remaining_leaves'] < 0:
# Raising a warning gives a more user-friendly feedback than the default constraint error
raise Warning(_('The number of remaining leaves is not sufficient for this leave type.\n'
'Please verify also the leaves waiting for validation.'))
return True
# -----------------------------
# OpenChatter and notifications
# -----------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
# if this user is a hr.manager, he should do second validations
if self.pool.get('res.users').has_group(cr, uid, 'base.group_hr_manager'):
dom = ['|'] + dom + [('state', '=', 'validate1')]
return dom
def holidays_first_validate_notificate(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
self.message_post(cr, uid, [obj.id],
_("Request approved, waiting second validation."), context=context)
class resource_calendar_leaves(osv.osv):
_inherit = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'holiday_id': fields.many2one("hr.holidays", "Leave Request"),
}
class hr_employee(osv.osv):
_inherit="hr.employee"
def create(self, cr, uid, vals, context=None):
# don't pass the value of remaining leave if it's 0 at the creation time, otherwise it will trigger the inverse
# function _set_remaining_days and the system may not be configured for. Note that we don't have this problem on
# the write because the clients only send the fields that have been modified.
if 'remaining_leaves' in vals and not vals['remaining_leaves']:
del(vals['remaining_leaves'])
return super(hr_employee, self).create(cr, uid, vals, context=context)
def _set_remaining_days(self, cr, uid, empl_id, name, value, arg, context=None):
employee = self.browse(cr, uid, empl_id, context=context)
diff = value - employee.remaining_leaves
type_obj = self.pool.get('hr.holidays.status')
holiday_obj = self.pool.get('hr.holidays')
# Find for holidays status
status_ids = type_obj.search(cr, uid, [('limit', '=', False)], context=context)
if len(status_ids) != 1 :
raise osv.except_osv(_('Warning!'),_("The feature behind the field 'Remaining Legal Leaves' can only be used when there is only one leave type with the option 'Allow to Override Limit' unchecked. (%s Found). Otherwise, the update is ambiguous as we cannot decide on which leave type the update has to be done. \nYou may prefer to use the classic menus 'Leave Requests' and 'Allocation Requests' located in 'Human Resources \ Leaves' to manage the leave days of the employees if the configuration does not allow to use this field.") % (len(status_ids)))
status_id = status_ids and status_ids[0] or False
if not status_id:
return False
if diff > 0:
leave_id = holiday_obj.create(cr, uid, {'name': _('Allocation for %s') % employee.name, 'employee_id': employee.id, 'holiday_status_id': status_id, 'type': 'add', 'holiday_type': 'employee', 'number_of_days_temp': diff}, context=context)
elif diff < 0:
raise osv.except_osv(_('Warning!'), _('You cannot reduce validated allocation requests'))
else:
return False
for sig in ('confirm', 'validate', 'second_validate'):
holiday_obj.signal_workflow(cr, uid, [leave_id], sig)
return True
def _get_remaining_days(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
sum(h.number_of_days) as days,
h.employee_id
from
hr_holidays h
join hr_holidays_status s on (s.id=h.holiday_status_id)
where
h.state='validate' and
s.limit=False and
h.employee_id in %s
group by h.employee_id""", (tuple(ids),))
res = cr.dictfetchall()
remaining = {}
for r in res:
remaining[r['employee_id']] = r['days']
for employee_id in ids:
if not remaining.get(employee_id):
remaining[employee_id] = 0.0
return remaining
def _get_leave_status(self, cr, uid, ids, name, args, context=None):
holidays_obj = self.pool.get('hr.holidays')
holidays_id = holidays_obj.search(cr, uid,
[('employee_id', 'in', ids), ('date_from','<=',time.strftime('%Y-%m-%d %H:%M:%S')),
('date_to','>=',time.strftime('%Y-%m-%d 23:59:59')),('type','=','remove'),('state','not in',('cancel','refuse'))],
context=context)
result = {}
for id in ids:
result[id] = {
'current_leave_state': False,
'current_leave_id': False,
'leave_date_from':False,
'leave_date_to':False,
}
for holiday in self.pool.get('hr.holidays').browse(cr, uid, holidays_id, context=context):
result[holiday.employee_id.id]['leave_date_from'] = holiday.date_from
result[holiday.employee_id.id]['leave_date_to'] = holiday.date_to
result[holiday.employee_id.id]['current_leave_state'] = holiday.state
result[holiday.employee_id.id]['current_leave_id'] = holiday.holiday_status_id.id
return result
def _leaves_count(self, cr, uid, ids, field_name, arg, context=None):
Holidays = self.pool['hr.holidays']
return {
employee_id: Holidays.search_count(cr,uid, [('employee_id', '=', employee_id), ('type', '=', 'remove')], context=context)
for employee_id in ids
}
_columns = {
'remaining_leaves': fields.function(_get_remaining_days, string='Remaining Legal Leaves', fnct_inv=_set_remaining_days, type="float", help='Total number of legal leaves allocated to this employee, change this value to create allocation/leave request. Total based on all the leave types without overriding limit.'),
'current_leave_state': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Status", type="selection",
selection=[('draft', 'New'), ('confirm', 'Waiting Approval'), ('refuse', 'Refused'),
('validate1', 'Waiting Second Approval'), ('validate', 'Approved'), ('cancel', 'Cancelled')]),
'current_leave_id': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Type",type='many2one', relation='hr.holidays.status'),
'leave_date_from': fields.function(_get_leave_status, multi='leave_status', type='date', string='From Date'),
'leave_date_to': fields.function(_get_leave_status, multi='leave_status', type='date', string='To Date'),
'leaves_count': fields.function(_leaves_count, type='integer', string='Leaves'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
diegojromerolopez/djanban | src/djanban/apps/password_reseter/email_sender.py | 1 | 1645 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from crequest.middleware import CrequestMiddleware
from django.core.mail import send_mail
from django.template.loader import get_template
from django.conf import settings
from django.urls import reverse
def send_password_request_link(password_request, user):
current_request = CrequestMiddleware.get_request()
absolute_reset_password_url = current_request.build_absolute_uri(
reverse('password_reseter:reset_password', args=(password_request.uuid,))
)
replacements = {"user": user, "absolute_reset_password_url": absolute_reset_password_url}
txt_message = get_template('password_reseter/emails/request_password_reset.txt').render(replacements)
html_message = get_template('password_reseter/emails/request_password_reset.html').render(replacements)
subject = "Djanban :: Request password reset"
return send_mail(subject, txt_message, settings.EMAIL_HOST_USER, recipient_list=[user.email],
fail_silently=False, html_message=html_message)
# The password has been reset successfully
def send_password_reset_successfully_email(user):
replacements = {"user": user}
txt_message = get_template('password_reseter/emails/password_reset_successfully.txt').render(replacements)
html_message = get_template('password_reseter/emails/password_reset_successfully.html').render(replacements)
subject = "Djanban :: Password reset successfully"
return send_mail(subject, txt_message, settings.EMAIL_HOST_USER, recipient_list=[user.email],
fail_silently=False, html_message=html_message)
| mit |
southpawtech/TACTIC-DEV | src/context/client/create_set_asset.py | 6 | 2651 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import re, sys, cStringIO, xmlrpclib
from common import *
import checkin, load
def create(set_name, cat_name, context):
'''update a node in the session'''
info = TacticInfo.get()
ticket = info.get_ticket()
server = info.get_xmlrpc_server()
app = info.get_app()
# get all of the top level assets
selected = app.mel("ls -sl -as")
if not selected:
raise TacticException('At least 1 asset (top node) needs to be selected in the session!')
xml, asset_code = server.create_set(ticket, set_name, cat_name, selected)
builder = info.get_builder()
builder.execute(xml)
if asset_code:
# checkin_set and upload the files
checkin.checkin_set(set_name, asset_code)
# publish the set
snapshot_code = server.checkin_set(ticket, asset_code, context)
# update tacticNodeInfo
#context = "publish"
load.update(snapshot_code, asset_code, set_name, context)
'''
#OLD CODE
class CreateSetAsset:
def __init__(my, set_code):
my.set_code = set_code
my.info = TacticInfo.get()
my.ticket = my.info.get_ticket()
def execute(my):
# get all of the top level assets
selected = mel("ls -sl -as")
for instance in selected:
print instance
# export the node
mel("select %s" % instance)
path = mel("file -rename %s" % instance )
mel("file -f -es -type mayaAscii")
# for some reason file -rename always returns .mb extension?!?
p = re.compile(r'\.mb$')
path = p.sub('.ma', path)
my.upload(path)
mel("select %s" % " ".join(selected) )
# now create all of the assets through xmlrpc
server = my.info.get_xmlrpc_server()
asset_codes = server.create_assets(my.ticket, my.set_code, selected)
# rename nodes
count = 0
for instance in selected:
mel("rename %s %s" % (instance, asset_codes[count]) )
count += 1
def upload(my, from_path):
my.info.upload(from_path)
if __name__ == '__main__':
executable = sys.argv[0]
args = sys.argv[1:]
set_code = args[0]
cmd = CreateSetAsset(set_code)
cmd.execute()
'''
| epl-1.0 |
seanxwzhang/LeetCode | 148 Sort List/solution.py | 1 | 1263 | #! /usr/bin/env python
# Sort a linked list in O(n log n) time using constant space complexity.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# a merge sort implementation
class Solution(object):
def sortList(self, head):
if not head or not head.next:
return head
fast, slow = head.next, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
# now slow reaches the middle of the list
mid = slow.next
slow.next = None
sorted_head = self.sortList(head)
sorted_mid = self.sortList(mid)
# now two sub lists are sorted, sort them in O(n)
dummyNode = ListNode(0)
track = dummyNode
while sorted_head and sorted_mid:
if sorted_head.val < sorted_mid.val:
track.next = sorted_head
sorted_head = sorted_head.next
else:
track.next = sorted_mid
sorted_mid = sorted_mid.next
track = track.next
if sorted_head: track.next = sorted_head
if sorted_mid: track.next = sorted_mid
return dummyNode.next
| mit |
xavfernandez/pip | src/pip/_vendor/distlib/_backport/tarfile.py | 422 | 92628 | #-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| mit |
crazcalm/chat-server | server.py | 1 | 11418 | import help_text
import asyncio
import argparse
import logging
from random import randint
clients = []
class SimpleChatClientProtocol(asyncio.Protocol):
"""
This class is the heart of the Chat Server. For each client that
connects to the server, an instance of this class is created. These
instances are saved in a global list.
"""
def __init__(self, name):
self.chatroom_name = name
def _send_msg(self, client, msg, format=True):
"""
This method sends messages clients to other clients
in the chatroom.
Args:
client (SimpleChatClientProtocol): A chat server client
msg (str): message to be sent
"""
if format:
client.transport.write("{}: {}\n".format(self.name,
msg).encode())
else:
client.transport.write("{}\n".format(msg).encode())
def _send_to_self(self, msg, client=False):
"""
This method sends messages to self. Typically used for
help dialogs and other interactions that are meant only
for this client.
Args:
msg (str): message to be sent
"""
if client:
self.transport.write("CLIENT**: {}".format(msg).encode())
else:
self.transport.write("{}\n".format(msg).encode())
def _unique_name(self, name):
"""
This method checks to see if the name that was passed
in as a parameter is unique among the names of the
clients in the chatroom.
Args:
name (str): a potential name
Return:
str or false: Returns False or name, which is Truthy
"""
logging.debug("Is the name {} unique?".format(name))
result = True
for client in clients:
logging.debug("Checking against: {}".format(client.name))
if name == client.name and self != client:
result = False
break
logging.debug("unique: {}".format(result))
return result
def connection_made(self, transport):
"""
This method designates what will happen when a client
makes a connection to the server.
Args:
transport (socket): The incoming socket from the client
"""
self.transport = transport
self.peername = transport.get_extra_info("peername")
self.name = "No Name"
while not self._unique_name(self.name):
self.name += str(randint(0, 9))
self.description = "None"
logging.info("connection_made: {}".format(self.peername).encode())
clients.append(self)
self._send_to_self("Welcome to {}!".format(self.chatroom_name))
self._send_to_self("To see the options available to you type `/help`")
self._send_to_self("Your username name is: {}".format(self.name))
self.send_to_everyone("<--- {} joined the room".format(self.name),
format=False)
def send_to_everyone(self, msg, format=True):
"""
This method sends a message to everyone in the chatroom.
Args:
msg (str): The message to be sent
"""
for client in clients:
self._send_msg(client, msg, format=format)
def find_client_by_name(self, name):
"""
This method attempts to find a client that has a
name that matches the name passed into the method.
If the client is found, a reference to that client
is returned. If the client is not found, then a None
object is returned.
Args:
name (str): The name used in the search
Returns:
False or client: False or client, which is truthy
"""
found = None
for client in clients:
if client.name.strip() == name:
found = client
break
return found
def send_to_list_of_people(self, people, msg):
"""
This method sends a message to a list of people.
Args:
people (list): list of clients
msg (str): The message to be sent
"""
# Currently not used. If I dediced to add groups
# to the app, then I will use this method.
for client in people:
self._send_msg(client, msg)
def data_received(self, data):
"""
This method is in charge of receiving the data that
has been sent from the client. The rules for how
this data is dealt with exist here.
Args:
data (byte): The data received over the socket connection
"""
msg = data.decode().strip()
logging.debug("data_received: {}".format(msg))
if msg == "/disconnect":
self.send_to_everyone("---> {} left the room".format(self.name),
format=False)
self.transport.close()
logging.info("command: /quit")
elif msg == "/whoami":
logging.info("command: /whoami")
self._send_to_self("You are {}\n".format(self.name))
self._send_to_self("Description: {}\n".format(
self.description))
elif msg == "/people":
logging.info("command: /people")
people = [client for client in clients if client != self]
if not people:
self._send_to_self("****No one else is in the room....*****")
for index, client in enumerate(people):
self._send_to_self("{}: {}\n".format(index, client.name))
elif msg == "/chatroom":
logging.info("command: /chatroom")
self._send_to_self("Chatroom name: {}".format(
self.chatroom_name))
elif msg == "/help":
logging.info("command: /help")
self._send_to_self("{}".format(help_text.HELP_GENERAL))
elif msg.startswith("/whois "):
if len(msg.split(' ')) >= 2:
command, name = msg.split(' ', 1)
logging.info("command: {}\Args: {}".format(
command, name))
found = self.find_client_by_name(name.strip())
if found:
self._send_to_self('Name: {}\nDescription: {}'.format(
found.name, found.description))
else:
self._send_to_self("I don't know")
else:
self._send_to_self(help_text.HELP_WHOIS)
elif msg.startswith("/msg "):
if len(msg.split(' ')) and ',' in msg:
args = msg.split(' ', 1)[1]
name, direct_msg = args.split(',', 1)
logging.info("command: /msg-{}, {}".format(name, direct_msg))
found = self.find_client_by_name(name.strip())
if found:
direct_msg = ''.join(direct_msg.strip())
self._send_msg(found, "*{}".format(direct_msg))
self._send_to_self('msg sent')
else:
logging.debug("Not Found: {}".format(name))
self._send_to_self('Could not find {}'.format(name))
else:
self._send_to_self(help_text.HELP_MSG)
elif msg.startswith("/help "):
command_args = msg.split(' ')[:2]
logging.info("command: {}".format(command_args))
error_msg = "{} is not a valid command".format(command_args[1])
msg = help_text.HELP_DICT.get(command_args[1], error_msg)
self._send_to_self(msg)
elif msg.startswith("/set "):
command_args = msg.strip().split(' ')
logging.info("command: {}\n".format(command_args))
key, value = None, None
if len(command_args) >= 3 and\
command_args[1] in ['name', 'description']:
key, *value = command_args[1:]
if key == 'name':
name = ' '.join(value)
if self._unique_name(name):
logging.debug('setting name to {}'.format(value))
self.name = name
self._send_to_self("Name: {}".format(self.name))
else:
self._send_to_self(
"The name you selected is all ready in use."
"\nPlease select another name.")
elif key == 'description':
logging.debug('setting description to {}'.format(value))
self.description = ' '.join(value)
self._send_to_self("Description: {}".format(
self.description))
else:
self._send_to_self(help_text.HELP_SET)
elif msg.startswith("/CLIENT**: USER LIST"):
logging.debug("/CLIENT**: USER LIST")
user_list = [client.name for client in clients]
self._send_to_self(",".join(user_list), client=True)
else:
self.send_to_everyone(msg)
def connection_lost(self, ex):
"""
This method fires when the connections between
the client and server is lost.
Args:
ex (I do not know): I should learn what you are...
"""
logging.info("connection_lost: {}".format(self.peername))
clients.remove(self)
def cli_parser():
"""
This function contains the logic for the command line
parser.
"""
chat_server = argparse.ArgumentParser(
description=help_text.CLI.get('description'),
epilog=help_text.CLI.get('epilog'))
chat_server.add_argument(
"--host",
type=str,
default="localhost",
help=help_text.CLI.get('host'))
chat_server.add_argument(
"--port",
type=int,
default=3333,
help=help_text.CLI.get('port'))
chat_server.add_argument(
"--name",
type=str,
default="Chat Room",
help=help_text.CLI.get('name'))
return chat_server
def run_server(host, port, name):
"""
This function is charge of running the server.
Args:
host (str): host name/ip address
port (int): port to which the app will run on
name (str): the name of the chatroom
"""
logging.info("starting up..")
print("Server running on {}:{}".format(host, port))
host = "127.0.0.1" if host == "localhost" else host
loop = asyncio.get_event_loop()
coro = loop.create_server(lambda: SimpleChatClientProtocol(name),
port=port, host=host)
server = loop.run_until_complete(coro)
for socket in server.sockets:
logging.info("serving on {}".format(socket.getsockname()))
loop.run_forever()
def main():
"""
This function contains the logic for the logger
and is in charge of running this application.
"""
logging.basicConfig(
filename="server_log",
filemode="w",
level=logging.DEBUG,
format='%(asctime)s--%(levelname)a--%(funcName)s--%(name)s:%(message)s'
)
cli_args = cli_parser().parse_args()
run_server(cli_args.host, cli_args.port, cli_args.name)
if __name__ == '__main__':
cli_args = cli_parser()
test = cli_args.parse_args()
main()
| mit |
youprofit/NewsBlur | vendor/feedvalidator/content.py | 16 | 6021 | """$Id: content.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
#
# item element.
#
class textConstruct(validatorBase,rfc2396,nonhtml):
from validators import mime_re
import re
def getExpectedAttrNames(self):
return [(None, u'type'),(None, u'src')]
def normalizeWhitespace(self):
pass
def maptype(self):
if self.type.find('/') > -1:
self.log(InvalidTextType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
def prevalidate(self):
if self.attrs.has_key((None,"src")):
self.type=''
else:
self.type='text'
if self.getFeedType() == TYPE_RSS2 and self.name != 'atom_summary':
self.log(DuplicateDescriptionSemantics({"element":self.name}))
if self.attrs.has_key((None,"type")):
self.type=self.attrs.getValue((None,"type"))
if not self.type:
self.log(AttrNotBlank({"parent":self.parent.name, "element":self.name, "attr":"type"}))
self.maptype()
if self.attrs.has_key((None,"src")):
self.children.append(True) # force warnings about "mixed" content
self.value=self.attrs.getValue((None,"src"))
rfc2396.validate(self, errorClass=InvalidURIAttribute, extraParams={"attr": "src"})
self.value=""
if not self.attrs.has_key((None,"type")):
self.log(MissingTypeAttr({"parent":self.parent.name, "element":self.name, "attr":"type"}))
if self.type in ['text','html','xhtml'] and not self.attrs.has_key((None,"src")):
pass
elif self.type and not self.mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
if not self.xmlLang:
self.log(MissingDCLanguage({"parent":self.name, "element":"xml:lang"}))
def validate(self):
if self.type in ['text','xhtml']:
if self.type=='xhtml':
nonhtml.validate(self, NotInline)
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
else:
if self.type.find('/') > -1 and not (
self.type.endswith('+xml') or self.type.endswith('/xml') or
self.type.startswith('text/')):
import base64
try:
self.value=base64.decodestring(self.value)
if self.type.endswith('/html'): self.type='html'
except:
self.log(NotBase64({"parent":self.parent.name, "element":self.name,"value":self.value}))
if self.type=='html' or self.type.endswith("/html"):
self.validateSafe(self.value)
if self.type.endswith("/html"):
if self.value.find("<html")<0 and not self.attrs.has_key((None,"src")):
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
if not self.value and len(self.children)==0 and not self.attrs.has_key((None,"src")):
self.log(NotBlank({"parent":self.parent.name, "element":self.name}))
def textOK(self):
if self.children: validatorBase.textOK(self)
def characters(self, string):
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}))
if (self.type=='xhtml') and string.strip() and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
validatorBase.characters(self,string)
def startElementNS(self, name, qname, attrs):
if (self.type<>'xhtml') and not (
self.type.endswith('+xml') or self.type.endswith('/xml')):
self.log(UndefinedElement({"parent":self.name, "element":name}))
if self.type=="xhtml":
if name<>'div' and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace: %s" % qname}))
if self.type=="application/xhtml+xml":
if name<>'html':
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace: %s" % qname}))
if self.attrs.has_key((None,"mode")):
if self.attrs.getValue((None,"mode")) == 'escaped':
self.log(NotEscaped({"parent":self.parent.name, "element":self.name}))
if name=="div" and qname=="http://www.w3.org/1999/xhtml":
handler=diveater()
else:
handler=eater()
self.children.append(handler)
self.push(handler, name, attrs)
# treat xhtml:div as part of the content for purposes of detecting escaped html
class diveater(eater):
def __init__(self):
eater.__init__(self)
self.mixed = False
def textOK(self):
pass
def characters(self, string):
validatorBase.characters(self, string)
def startElementNS(self, name, qname, attrs):
if not qname:
self.log(MissingNamespace({"parent":"xhtml:div", "element":name}))
self.mixed = True
eater.startElementNS(self, name, qname, attrs)
def validate(self):
if not self.mixed: self.parent.value += self.value
class content(textConstruct):
def maptype(self):
if self.type == 'multipart/alternative':
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
| mit |
goddardl/cortex | test/IECore/ConfigLoaderTest.py | 2 | 5626 | ##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
class ConfigLoaderTest( unittest.TestCase ) :
def testLoadConfig( self ) :
contextDict = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config/orderOne", ":" ),
contextDict,
)
self.assertEqual( contextDict["a"], 1 )
def testOrder( self ) :
contextDict = {}
IECore.loadConfig(
IECore.SearchPath(
os.path.dirname( __file__ ) + "/config/orderTwo:" +
os.path.dirname( __file__ ) + "/config/orderOne",
":"
),
contextDict,
)
self.assertEqual( contextDict["a"], 2 )
def testIgnoreExceptions( self ) :
contextDict = {}
m = IECore.CapturingMessageHandler()
with m :
IECore.loadConfig(
IECore.SearchPath(
os.path.dirname( __file__ ) + "/config/orderOne:" +
os.path.dirname( __file__ ) + "/config/exceptions",
":"
),
contextDict,
raiseExceptions = False
)
errors = [ msg for msg in m.messages if msg.level == IECore.Msg.Level.Error ]
self.assertEqual( len( errors ), 1 )
self.assertEqual( errors[0].level, IECore.Msg.Level.Error )
self.failUnless( "I am a very naughty boy" in errors[0].message )
self.assertEqual( contextDict["a"], 1 )
def testThrowExceptions( self ) :
contextDict = {}
self.assertRaises(
RuntimeError,
IECore.loadConfig,
IECore.SearchPath(
os.path.dirname( __file__ ) + "/config/orderOne:" +
os.path.dirname( __file__ ) + "/config/exceptions",
":"
),
contextDict,
raiseExceptions = True
)
self.failIf( "a" in contextDict )
def testScope( self ) :
contextDict = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config/scope", ":" ),
contextDict,
raiseExceptions = True
)
contextDict["functionToCallLater"]()
def testIgnoreFiles( self ) :
contextDict = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config/ignoreFiles", ":" ),
contextDict,
)
self.failIf( "tildeConfigRan" in contextDict )
self.failIf( "notDotPyRan" in contextDict )
self.assertEqual( contextDict["a"], 1000 )
def testOrderWithinDirectory( self ) :
os.utime( os.path.dirname( __file__ ) + "/config/orderDir/a.py", None )
contextDict = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config/orderDir", ":" ),
contextDict,
)
self.assertEqual( contextDict["lastRun"], "b" )
def testSubdirectory( self ) :
contextDict = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config", ":" ),
contextDict,
subdirectory = "orderDir",
)
self.assertTrue( "lastRun" in contextDict )
self.assertFalse( "a" in contextDict )
def testSearchPathAsEnvVar( self ) :
os.environ["IECORE_CONFIGLOADERTEST_PATHS"] = "%s:%s" % (
os.path.dirname( __file__ ) + "/config/orderOne",
os.path.dirname( __file__ ) + "/config/orderTwo"
)
contextDict = {}
IECore.loadConfig(
"IECORE_CONFIGLOADERTEST_PATHS",
contextDict,
)
self.assertEqual( contextDict["a"], 1 )
os.environ["IECORE_CONFIGLOADERTEST_PATHS"] = "%s:%s" % (
os.path.dirname( __file__ ) + "/config/orderTwo",
os.path.dirname( __file__ ) + "/config/orderOne"
)
contextDict = {}
IECore.loadConfig(
"IECORE_CONFIGLOADERTEST_PATHS",
contextDict,
)
self.assertEqual( contextDict["a"], 2 )
def testFile( self ) :
contextDict = {}
path = os.path.dirname( __file__ ) + "/config/getFile"
IECore.loadConfig(
IECore.SearchPath( path, ":" ),
contextDict,
)
expectedFile = os.path.abspath( os.path.join( path, "config.py" ) )
self.assertEqual( contextDict["myFile"], expectedFile )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
hchen1202/django-react | virtualenv/lib/python3.6/site-packages/django/template/context_processors.py | 55 | 2497 | """
A set of request processors that return dictionaries to be merged into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
These are referenced from the 'context_processors' option of the configuration
of a DjangoTemplates backend and used by RequestContext.
"""
from __future__ import unicode_literals
import itertools
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils.encoding import smart_text
from django.utils.functional import SimpleLazyObject, lazy
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
return smart_text(token)
return {'csrf_token': SimpleLazyObject(_get_val)}
def debug(request):
"""
Returns context variables helpful for debugging.
"""
context_extras = {}
if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
context_extras['debug'] = True
from django.db import connections
# Return a lazy reference that computes connection.queries on access,
# to ensure it contains queries triggered after this function runs.
context_extras['sql_queries'] = lazy(
lambda: list(itertools.chain(*[connections[x].queries for x in connections])),
list
)
return context_extras
def i18n(request):
from django.utils import translation
return {
'LANGUAGES': settings.LANGUAGES,
'LANGUAGE_CODE': translation.get_language(),
'LANGUAGE_BIDI': translation.get_language_bidi(),
}
def tz(request):
from django.utils import timezone
return {'TIME_ZONE': timezone.get_current_timezone_name()}
def static(request):
"""
Adds static-related context variables to the context.
"""
return {'STATIC_URL': settings.STATIC_URL}
def media(request):
"""
Adds media-related context variables to the context.
"""
return {'MEDIA_URL': settings.MEDIA_URL}
def request(request):
return {'request': request}
| mit |
ptemplier/ansible | test/units/modules/network/f5/test_bigip_gtm_wide_ip.py | 51 | 8377 | # -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import F5ModuleError
try:
from library.bigip_gtm_wide_ip import Parameters
from library.bigip_gtm_wide_ip import ModuleManager
from library.bigip_gtm_wide_ip import ArgumentSpec
from library.bigip_gtm_wide_ip import UntypedManager
from library.bigip_gtm_wide_ip import TypedManager
except ImportError:
try:
from ansible.modules.network.f5.bigip_gtm_wide_ip import Parameters
from ansible.modules.network.f5.bigip_gtm_wide_ip import ModuleManager
from ansible.modules.network.f5.bigip_gtm_wide_ip import ArgumentSpec
from ansible.modules.network.f5.bigip_gtm_wide_ip import UntypedManager
from ansible.modules.network.f5.bigip_gtm_wide_ip import TypedManager
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def set_module_args(args):
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args)
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo.baz.bar',
lb_method='round-robin'
)
p = Parameters(args)
assert p.name == 'foo.baz.bar'
assert p.lb_method == 'round-robin'
def test_api_parameters(self):
args = dict(
name='foo.baz.bar',
poolLbMode='round-robin'
)
p = Parameters(args)
assert p.name == 'foo.baz.bar'
assert p.lb_method == 'round-robin'
def test_api_not_fqdn_name(self):
args = dict(
name='foo.baz',
poolLbMode='round-robin'
)
with pytest.raises(F5ModuleError) as excinfo:
p = Parameters(args)
assert p.name == 'foo.baz'
assert 'The provided name must be a valid FQDN' in str(excinfo)
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestUntypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_wideip(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round-robin',
password='passsword',
server='localhost',
user='admin'
))
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
# Override methods in the specific type of manager
tm = UntypedManager(client)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(client)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestTypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_wideip(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round-robin',
type='a',
password='passsword',
server='localhost',
user='admin'
))
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
# Override methods in the specific type of manager
tm = TypedManager(client)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(client)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
def test_create_wideip_deprecated_lb_method1(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round_robin',
type='a',
password='passsword',
server='localhost',
user='admin'
))
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
# Override methods in the specific type of manager
tm = TypedManager(client)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(client)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
def test_create_wideip_deprecated_lb_method2(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='global_availability',
type='a',
password='passsword',
server='localhost',
user='admin'
))
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
# Override methods in the specific type of manager
tm = TypedManager(client)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(client)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'global-availability'
| gpl-3.0 |
clausqr/HTPC-Manager | libs/cherrypy/test/test_wsgi_ns.py | 22 | 2900 | import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy.test import helper
class WSGI_Namespace_Test(helper.CPWebCase):
def setup_server():
class WSGIResponse(object):
def __init__(self, appresults):
self.appresults = appresults
self.iter = iter(appresults)
def __iter__(self):
return self
def next(self):
return self.iter.next()
def __next__(self):
return next(self.iter)
def close(self):
if hasattr(self.appresults, "close"):
self.appresults.close()
class ChangeCase(object):
def __init__(self, app, to=None):
self.app = app
self.to = to
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class CaseResults(WSGIResponse):
def next(this):
return getattr(this.iter.next(), self.to)()
def __next__(this):
return getattr(next(this.iter), self.to)()
return CaseResults(res)
class Replacer(object):
def __init__(self, app, map={}):
self.app = app
self.map = map
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class ReplaceResults(WSGIResponse):
def next(this):
line = this.iter.next()
for k, v in self.map.iteritems():
line = line.replace(k, v)
return line
def __next__(this):
line = next(this.iter)
for k, v in self.map.items():
line = line.replace(k, v)
return line
return ReplaceResults(res)
class Root(object):
def index(self):
return "HellO WoRlD!"
index.exposed = True
root_conf = {'wsgi.pipeline': [('replace', Replacer)],
'wsgi.replace.map': {ntob('L'): ntob('X'),
ntob('l'): ntob('r')},
}
app = cherrypy.Application(Root())
app.wsgiapp.pipeline.append(('changecase', ChangeCase))
app.wsgiapp.config['changecase'] = {'to': 'upper'}
cherrypy.tree.mount(app, config={'/': root_conf})
setup_server = staticmethod(setup_server)
def test_pipeline(self):
if not cherrypy.server.httpserver:
return self.skip()
self.getPage("/")
# If body is "HEXXO WORXD!", the middleware was applied out of order.
self.assertBody("HERRO WORRD!")
| mit |
ImaginaryLandscape/django-filer | filer/admin/imageadmin.py | 35 | 1672 | #-*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from filer import settings as filer_settings, settings
from filer.admin.fileadmin import FileAdmin
from filer.models import Image
class ImageAdminForm(forms.ModelForm):
subject_location = forms.CharField(
max_length=64, required=False,
label=_('Subject location'),
help_text=_('Location of the main subject of the scene.'))
def sidebar_image_ratio(self):
if self.instance:
# this is very important. It forces the value to be returned as a
# string and always with a "." as seperator. If the conversion
# from float to string is done in the template, the locale will
# be used and in some cases there would be a "," instead of ".".
# javascript would parse that to an integer.
return "%.6F" % self.instance.sidebar_image_ratio()
else:
return ''
class Meta:
model = Image
class Media:
css = {
#'all': (settings.MEDIA_URL + 'filer/css/focal_point.css',)
}
js = (
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/raphael.js',
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/focal_point.js',
)
class ImageAdmin(FileAdmin):
form = ImageAdminForm
ImageAdmin.fieldsets = ImageAdmin.build_fieldsets(
extra_main_fields=('default_alt_text', 'default_caption',),
extra_fieldsets=(
('Subject Location', {
'fields': ('subject_location',),
'classes': ('collapse',),
}),
)
)
| bsd-3-clause |
LaoZhongGu/kbengine | kbe/src/lib/python/Lib/test/test_bufio.py | 64 | 2654 | import unittest
from test import support
import io # C implementation.
import _pyio as pyio # Python implementation.
# Simple test to ensure that optimizations in the IO library deliver the
# expected results. For best testing, run this under a debug-build Python too
# (to exercise asserts in the C code).
lengths = list(range(1, 257)) + [512, 1000, 1024, 2048, 4096, 8192, 10000,
16384, 32768, 65536, 1000000]
class BufferSizeTest(unittest.TestCase):
def try_one(self, s):
# Write s + "\n" + s to file, then open it and ensure that successive
# .readline()s deliver what we wrote.
# Ensure we can open TESTFN for writing.
support.unlink(support.TESTFN)
# Since C doesn't guarantee we can write/read arbitrary bytes in text
# files, use binary mode.
f = self.open(support.TESTFN, "wb")
try:
# write once with \n and once without
f.write(s)
f.write(b"\n")
f.write(s)
f.close()
f = open(support.TESTFN, "rb")
line = f.readline()
self.assertEqual(line, s + b"\n")
line = f.readline()
self.assertEqual(line, s)
line = f.readline()
self.assertTrue(not line) # Must be at EOF
f.close()
finally:
support.unlink(support.TESTFN)
def drive_one(self, pattern):
for length in lengths:
# Repeat string 'pattern' as often as needed to reach total length
# 'length'. Then call try_one with that string, a string one larger
# than that, and a string one smaller than that. Try this with all
# small sizes and various powers of 2, so we exercise all likely
# stdio buffer sizes, and "off by one" errors on both sides.
q, r = divmod(length, len(pattern))
teststring = pattern * q + pattern[:r]
self.assertEqual(len(teststring), length)
self.try_one(teststring)
self.try_one(teststring + b"x")
self.try_one(teststring[:-1])
def test_primepat(self):
# A pattern with prime length, to avoid simple relationships with
# stdio buffer sizes.
self.drive_one(b"1234567890\00\01\02\03\04\05\06")
def test_nullpat(self):
self.drive_one(bytes(1000))
class CBufferSizeTest(BufferSizeTest):
open = io.open
class PyBufferSizeTest(BufferSizeTest):
open = staticmethod(pyio.open)
def test_main():
support.run_unittest(CBufferSizeTest, PyBufferSizeTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
jonwright/ImageD11 | ImageD11/sparseframe.py | 1 | 10124 |
from __future__ import print_function, division
import time, sys
import h5py, scipy.sparse, numpy as np, pylab as pl
from ImageD11 import cImageD11
# see also sandbox/harvest_pixels.py
NAMES = {
"filename" : "original filename used to create a sparse frame",
"intensity" : "corrected pixel values",
"nlabel": "Number of unique labels for an image labelling",
"threshold" : "Cut off used for thresholding",
}
class sparse_frame( object ):
"""
Indices / shape mapping
"""
def __init__(self, row, col, shape, itype=np.uint16, pixels=None):
""" row = slow direction
col = fast direction
shape = size of full image
itype = the integer type to store the indices
our c codes currently use unsigned short...
nnz is implicit as len(row)==len(col)
pixels = numpy arrays in a dict to name them
throw in a ary.attrs if you want to save some
"""
self.check( row, col, shape, itype )
self.shape = shape
self.row = np.asarray(row, dtype = itype )
self.col = np.asarray(col, dtype = itype )
self.nnz = len(self.row)
# Things we could have using those indices:
# raw pixel intensities
# corrected intensities
# smoothed pixel intensities
# labelling via different algorithms
self.pixels = {}
self.meta = {}
if pixels is not None:
for name, val in pixels.items():
assert len(val) == self.nnz
self.pixels[name] = val
def check(self, row, col, shape, itype):
""" Ensure the index data makes sense and fits """
lo = np.iinfo(itype).min
hi = np.iinfo(itype).max
assert len(shape) == 2
assert shape[0] >= lo and shape[0] < hi
assert shape[1] >= lo and shape[1] < hi
assert np.min(row) >= lo and np.max(row) < hi
assert np.min(col) >= lo and np.max(col) < hi
assert len(row) == len(col)
def is_sorted(self):
""" Tests whether the data are sorted into slow/fast order
rows are slow direction
columns are fast """
# TODO: non uint16 cases
assert self.row.dtype == np.uint16 and \
cImageD11.sparse_is_sorted( self.row, self.col ) == 0
def to_dense(self, data=None, out=None):
""" returns the full 2D image
data = name in self.pixels or 1D array matching self.nnz
Does not handle repeated indices
e.g. obj.to_dense( obj.pixels['raw_intensity'] )
"""
if data in self.pixels:
data = self.pixels[data] # give back this array
else:
ks = list( self.pixels.keys() )
if len(ks)==1:
data = self.pixels[ks[0]] # default for only one
else:
data = np.ones( self.nnz, np.bool ) # give a mask
if out is None:
out = np.zeros( self.shape, data.dtype )
else:
assert out.shape == self.shape
assert len(data) == self.nnz
adr = self.row.astype(np.intp) * self.shape[1] + self.col
out.flat[adr] = data
return out
def mask( self, msk ):
""" returns a subset of itself """
spf = sparse_frame( self.row[msk],
self.col[msk],
self.shape, self.row.dtype )
for name, px in self.pixels.items():
if name in self.meta:
m = self.meta[name].copy()
else:
m = None
spf.set_pixels( name, px[msk], meta = m )
return spf
def set_pixels( self, name, values, meta=None ):
""" Named arrays sharing these labels """
assert len(values) == self.nnz
self.pixels[name] = values
if meta is not None:
self.meta[name] = meta
def sort_by( self, name ):
""" Not sure when you would do this. For sorting
by a peak labelling to get pixels per peak """
assert name in self.pixels
order = np.argsort( self.pixels[name] )
self.reorder( self, order )
def sort( self ):
""" Puts you into slow / fast looping order """
order = np.lexsort( ( self.col, self.row ) )
self.reorder( self, order )
def reorder( self, order ):
""" Put the pixels into a different order (in place) """
assert len(order) == self.nnz
self.row[:] = self.row[order]
self.col[:] = self.col[order]
for name, px in self.pixels.items():
px[:] = px[order]
def threshold(self, threshold, name='intensity'):
"""
returns a new sparse frame with pixels > threshold
"""
return self.mask( self.pixels[name] > threshold )
def to_hdf_group( frame, group ):
""" Save a 2D sparse frame to a hdf group
Makes 1 single frame per group
"""
itype = np.dtype( frame.row.dtype )
meta = { "itype" : itype.name,
"shape0" : frame.shape[0],
"shape1" : frame.shape[1] }
for name, value in meta.items():
group.attrs[name] = value
opts = { "compression": "lzf",
"shuffle" : True,
}
#opts = {}
group.require_dataset( "row", shape=(frame.nnz,),
dtype=itype, **opts )
group.require_dataset( "col", shape=(frame.nnz,),
dtype=itype, **opts )
group['row'][:] = frame.row
group['col'][:] = frame.col
for pxname, px in frame.pixels.items():
group.require_dataset( pxname, shape=(frame.nnz,),
dtype=px.dtype,
**opts )
group[pxname][:] = px
if pxname in self.meta:
group[pxname].attrs = dict( self.meta[pxname] )
def from_data_mask( mask, data, header ):
"""
Create a sparse from a dense array
"""
assert mask.shape == data.shape
# using uint16 here - perhaps make this general in the future
# ... but not for now
assert data.shape[0] < pow(2,16)-1
assert data.shape[1] < pow(2,16)-1
nnz = (mask>0).sum()
tmp = np.empty( data.shape[0],'i') # tmp hold px per row cumsums
row = np.empty( nnz, np.uint16 )
col = np.empty( nnz, np.uint16 )
cImageD11.mask_to_coo( mask, row, col, tmp )
intensity = data[ mask > 0 ]
# intensity.attrs = dict(header) # FIXME USE xarray ?
spf = sparse_frame( row, col, data.shape, itype=np.uint16 )
spf.set_pixels( "intensity" , intensity, dict( header ) )
return spf
def from_hdf_group( group ):
itype = np.dtype( group.attrs['itype'] )
shape = group.attrs['shape0'], group.attrs['shape1']
row = group['row'][:] # read it
col = group['col'][:]
spf = sparse_frame( row, col, shape, itype=itype )
for pxname in list(group):
if pxname in ["row", "col"]:
continue
data = group[pxname][:]
header = dict( group[pxname].attrs )
spf.set_pixels( pxname, data, header )
return spf
def sparse_moments( frame, intensity_name, labels_name ):
""" We rely on a labelling array carrying nlabel metadata (==labels.data.max())"""
nl = frame.meta[ labels_name ][ "nlabel" ]
return cImageD11.sparse_blob2Dproperties(
frame.pixels[intensity_name],
frame.row,
frame.col,
frame.pixels[labels_name],
nl )
def overlaps(frame1, labels1, frame2, labels2):
"""
figures out which label of self matches which label of other
Assumes the zero label does not exist (background)
Returns sparse array of:
label in self (row)
label in other (col)
number of shared pixels (data)
"""
ki = np.empty( frame1.nnz, 'i' )
kj = np.empty( frame2.nnz, 'i' )
npx = cImageD11.sparse_overlaps( frame1.row, frame1.col, ki,
frame2.row, frame2.col, kj)
# self.data and other.data filled during init
row = frame1.pixels[labels1][ ki[:npx] ] # my labels
col = frame2.pixels[labels2][ kj[:npx] ] # your labels
ect = np.empty( npx, 'i') # ect = counts of overlaps
tj = np.empty( npx, 'i') # tj = temporary for sorting
n1 = frame1.meta[labels1][ "nlabel" ]
n2 = frame2.meta[labels2][ "nlabel" ]
tmp = np.empty( max(n1, n2)+1, 'i') # for histogram
nedge = cImageD11.compress_duplicates( row, col, ect, tj, tmp )
# overwrites row/col in place : ignore the zero label (hope it is not there)
crow = row[:nedge]-1
ccol = col[:nedge]-1
cdata = ect[:nedge]
cedges = scipy.sparse.coo_matrix( ( cdata, (crow, ccol)), shape=(n1, n2) )
# really?
return cedges
def sparse_connected_pixels( frame,
label_name="connectedpixels",
data_name="intensity",
threshold=None ):
"""
frame = a sparse frame
label_name = the array to save labels to in that frame
data_name = an array in that frame
threshold = float value or take data.threshold
"""
labels = np.zeros( frame.nnz, "i" )
if threshold is None:
threshold = frame.meta[data_name]["threshold"]
nlabel = cImageD11.sparse_connectedpixels(
frame.pixels[data_name], frame.row, frame.col,
threshold, labels )
frame.set_pixels( label_name, labels, { 'nlabel' : nlabel } )
return nlabel
def sparse_localmax( frame,
label_name="localmax",
data_name = "intensity" ):
labels = np.zeros( frame.nnz, "i" )
vmx = np.zeros( frame.nnz, np.float32 )
imx = np.zeros( frame.nnz, 'i')
nlabel = cImageD11.sparse_localmaxlabel(
frame.pixels[data_name], frame.row, frame.col,
vmx, imx, labels )
frame.set_pixels( label_name, labels, { "nlabel" : nlabel } )
return nlabel
| gpl-2.0 |
eleonrk/SickRage | lib/hachoir_parser/file_system/iso9660.py | 85 | 4954 | """
ISO 9660 (cdrom) file system parser.
Documents:
- Standard ECMA-119 (december 1987)
http://www.nondot.org/sabre/os/files/FileSystems/iso9660.pdf
Author: Victor Stinner
Creation: 11 july 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt32, UInt64, Enum,
NullBytes, RawBytes, String)
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
class PrimaryVolumeDescriptor(FieldSet):
static_size = 2041*8
def createFields(self):
yield NullBytes(self, "unused[]", 1)
yield String(self, "system_id", 32, "System identifier", strip=" ")
yield String(self, "volume_id", 32, "Volume identifier", strip=" ")
yield NullBytes(self, "unused[]", 8)
yield UInt64(self, "space_size", "Volume space size")
yield NullBytes(self, "unused[]", 32)
yield UInt32(self, "set_size", "Volume set size")
yield UInt32(self, "seq_num", "Sequence number")
yield UInt32(self, "block_size", "Block size")
yield UInt64(self, "path_table_size", "Path table size")
yield UInt32(self, "occu_lpath", "Location of Occurrence of Type L Path Table")
yield UInt32(self, "opt_lpath", "Location of Optional of Type L Path Table")
yield UInt32(self, "occu_mpath", "Location of Occurrence of Type M Path Table")
yield UInt32(self, "opt_mpath", "Location of Optional of Type M Path Table")
yield RawBytes(self, "root", 34, "Directory Record for Root Directory")
yield String(self, "vol_set_id", 128, "Volume set identifier", strip=" ")
yield String(self, "publisher", 128, "Publisher identifier", strip=" ")
yield String(self, "data_preparer", 128, "Data preparer identifier", strip=" ")
yield String(self, "application", 128, "Application identifier", strip=" ")
yield String(self, "copyright", 37, "Copyright file identifier", strip=" ")
yield String(self, "abstract", 37, "Abstract file identifier", strip=" ")
yield String(self, "biographic", 37, "Biographic file identifier", strip=" ")
yield String(self, "creation_ts", 17, "Creation date and time", strip=" ")
yield String(self, "modification_ts", 17, "Modification date and time", strip=" ")
yield String(self, "expiration_ts", 17, "Expiration date and time", strip=" ")
yield String(self, "effective_ts", 17, "Effective date and time", strip=" ")
yield UInt8(self, "struct_ver", "Structure version")
yield NullBytes(self, "unused[]", 1)
yield String(self, "app_use", 512, "Application use", strip=" \0")
yield NullBytes(self, "unused[]", 653)
class BootRecord(FieldSet):
static_size = 2041*8
def createFields(self):
yield String(self, "sys_id", 31, "Boot system identifier", strip="\0")
yield String(self, "boot_id", 31, "Boot identifier", strip="\0")
yield RawBytes(self, "system_use", 1979, "Boot system use")
class Terminator(FieldSet):
static_size = 2041*8
def createFields(self):
yield NullBytes(self, "null", 2041)
class Volume(FieldSet):
endian = BIG_ENDIAN
TERMINATOR = 255
type_name = {
0: "Boot Record",
1: "Primary Volume Descriptor",
2: "Supplementary Volume Descriptor",
3: "Volume Partition Descriptor",
TERMINATOR: "Volume Descriptor Set Terminator",
}
static_size = 2048 * 8
content_handler = {
0: BootRecord,
1: PrimaryVolumeDescriptor,
TERMINATOR: Terminator,
}
def createFields(self):
yield Enum(UInt8(self, "type", "Volume descriptor type"), self.type_name)
yield RawBytes(self, "signature", 5, "ISO 9960 signature (CD001)")
if self["signature"].value != "CD001":
raise ParserError("Invalid ISO 9960 volume signature")
yield UInt8(self, "version", "Volume descriptor version")
cls = self.content_handler.get(self["type"].value, None)
if cls:
yield cls(self, "content")
else:
yield RawBytes(self, "raw_content", 2048-7)
class ISO9660(Parser):
endian = LITTLE_ENDIAN
MAGIC = "\x01CD001"
NULL_BYTES = 0x8000
PARSER_TAGS = {
"id": "iso9660",
"category": "file_system",
"description": "ISO 9660 file system",
"min_size": (NULL_BYTES + 6)*8,
"magic": ((MAGIC, NULL_BYTES*8),),
}
def validate(self):
if self.stream.readBytes(self.NULL_BYTES*8, len(self.MAGIC)) != self.MAGIC:
return "Invalid signature"
return True
def createFields(self):
yield self.seekByte(self.NULL_BYTES, null=True)
while True:
volume = Volume(self, "volume[]")
yield volume
if volume["type"].value == Volume.TERMINATOR:
break
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
| gpl-3.0 |
uppalk1/RackHD | test/tests/api/v1_1/nodes_tests.py | 13 | 16534 | from config.api1_1_config import *
from config.amqp import *
from modules.logger import Log
from modules.amqp import AMQPWorker
from modules.worker import WorkerThread, WorkerTasks
from on_http_api1_1 import NodesApi as Nodes
from on_http_api1_1 import WorkflowApi as Workflows
from on_http_api1_1 import rest
from datetime import datetime
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_is_not_none
from proboscis.asserts import assert_true
from proboscis.asserts import fail
from proboscis import SkipTest
from proboscis import test
from json import loads
from time import sleep
LOG = Log(__name__)
@test(groups=['nodes.tests'])
class NodesTests(object):
def __init__(self):
self.__client = config.api_client
self.__task = None
self.__discovery_duration = None
self.__discovered = 0
self.__test_nodes = [
{
'autoDiscover': 'false',
'name': 'test_switch_node',
'type': 'switch',
'snmpSettings': {
'host': '1.1.1.1',
'community': 'rackhd'
}
},
{
'autoDiscover': 'false',
'name': 'test_mgmt_node',
'type': 'mgmt',
'snmpSettings': {
'host': '1.1.1.1',
'community': 'rackhd'
}
},
{
'autoDiscover': 'false',
'name': 'test_pdu_node',
'type': 'pdu',
'snmpSettings': {
'host': '1.1.1.2',
'community': 'rackhd'
}
},
{
'autoDiscover': 'false',
'name': 'test_enclosure_node',
'type': 'enclosure'
},
{
'autoDiscover': 'false',
'name': 'test_compute_node',
'type': 'compute'
}
]
def __get_data(self):
return loads(self.__client.last_response.data)
def __get_workflow_status(self, id):
Nodes().nodes_identifier_workflows_active_get(id)
status = self.__client.last_response.status
if status == 200:
data = self.__get_data()
status = data.get('_status')
assert_is_not_none(status)
return status
def __post_workflow(self, id, graph_name, data):
status = self.__get_workflow_status(id)
if status != 'pending' and status != 'running':
Nodes().nodes_identifier_workflows_post(id,graph_name,body=data)
timeout = 20
while status != 'pending' and status != 'running' and timeout != 0:
LOG.warning('Workflow status for Node {0} (status={1},timeout={2})'.format(id,status,timeout))
status = self.__get_workflow_status(id)
sleep(1)
timeout -= 1
return timeout
def check_compute_count(self):
Nodes().nodes_get()
nodes = self.__get_data()
count = 0
for n in nodes:
type = n.get('type')
if type == 'compute':
count += 1
return count
@test(groups=['nodes.discovery.test'])
def test_nodes_discovery(self):
""" Testing Graph.Discovery completion """
count = defaults.get('RACKHD_NODE_COUNT', '')
if (count.isdigit() and self.check_compute_count() == int(count)) or self.check_compute_count():
LOG.warning('Nodes already discovered!')
return
self.__discovery_duration = datetime.now()
LOG.info('Wait start time: {0}'.format(self.__discovery_duration))
self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \
callbacks=[self.handle_graph_finish]), 'discovery')
def start(worker,id):
worker.start()
tasks = WorkerTasks(tasks=[self.__task], func=start)
tasks.run()
tasks.wait_for_completion(timeout_sec=1200)
assert_false(self.__task.timeout, \
message='timeout waiting for task {0}'.format(self.__task.id))
def handle_graph_finish(self,body,message):
routeId = message.delivery_info.get('routing_key').split('graph.finished.')[1]
Workflows().workflows_get()
workflows = self.__get_data()
for w in workflows:
definition = w.get('definition', {})
injectableName = definition.get('injectableName')
if injectableName == 'Graph.SKU.Discovery':
graphId = w.get('context',{}).get('graphId')
if graphId == routeId:
message.ack()
status = body.get('status')
if status == 'succeeded' or status == 'failed':
options = definition.get('options')
nodeid = options.get('defaults',{}).get('nodeId')
duration = datetime.now() - self.__discovery_duration
msg = {
'graph_name': injectableName,
'target': nodeid,
'status': status,
'route_id': routeId,
'duration': str(duration)
}
if status == 'failed':
msg['active_task'] = w.get('tasks',{})
LOG.error(msg, json=True)
else:
LOG.info(msg, json=True)
self.__discovered += 1
break
check = self.check_compute_count()
if check and check == self.__discovered:
self.__task.worker.stop()
self.__task.running = False
self.__discovered = 0
@test(groups=['test-nodes'], depends_on_groups=['nodes.discovery.test'])
def test_nodes(self):
""" Testing GET:/nodes """
Nodes().nodes_get()
nodes = self.__get_data()
LOG.debug(nodes,json=True)
assert_not_equal(0, len(nodes), message='Node list was empty!')
@test(groups=['test-node-id'], depends_on_groups=['test-nodes'])
def test_node_id(self):
""" Testing GET:/nodes/:id """
Nodes().nodes_get()
nodes = self.__get_data()
LOG.debug(nodes,json=True)
codes = []
for n in nodes:
LOG.info(n)
if n.get('type') == 'compute':
uuid = n.get('id')
Nodes().nodes_identifier_get(uuid)
rsp = self.__client.last_response
codes.append(rsp)
assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Nodes().nodes_identifier_get, 'fooey')
@test(groups=['create-node'], depends_on_groups=['test-node-id'])
def test_node_create(self):
""" Verify POST:/nodes/ """
for n in self.__test_nodes:
LOG.info('Creating node (name={0})'.format(n.get('name')))
Nodes().nodes_post(n)
rsp = self.__client.last_response
assert_equal(201, rsp.status, message=rsp.reason)
@test(groups=['test-node-id-obm'], depends_on_groups=['create-node'])
def test_node_id_obm(self):
""" Testing GET:/nodes/:id/obm """
Nodes().nodes_get()
nodes = self.__get_data()
LOG.debug(nodes,json=True)
codes = []
for n in nodes:
if n.get('name') == 'test_compute_node':
uuid = n.get('id')
Nodes().nodes_identifier_obm_get(uuid)
rsp = self.__client.last_response
LOG.info('OBM setting for node ID {0} is {1}'.format(uuid, rsp.data))
codes.append(rsp)
assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Nodes().nodes_identifier_obm_get, 'fooey')
@test(groups=['patch-node'], depends_on_groups=['test-node-id-obm'])
def test_node_patch(self):
""" Verify PATCH:/nodes/:id """
data = {"name": 'fake_name_test'}
Nodes().nodes_get()
nodes = self.__get_data()
codes = []
for n in nodes:
if n.get('name') == 'test_compute_node':
uuid = n.get('id')
Nodes().nodes_identifier_patch(uuid, data)
rsp = self.__client.last_response
test_nodes = self.__get_data()
assert_equal(test_nodes.get('name'), 'fake_name_test', 'Oops patch failed')
codes.append(rsp)
LOG.info('Restoring name to "test_compute_node"')
correct_data = {"name": 'test_compute_node'}
Nodes().nodes_identifier_patch(uuid, correct_data)
rsp = self.__client.last_response
restored_nodes = self.__get_data()
assert_equal(restored_nodes.get('name'), 'test_compute_node', 'Oops restoring failed')
codes.append(rsp)
assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Nodes().nodes_identifier_patch, 'fooey', data)
@test(groups=['delete-node'], depends_on_groups=['patch-node'])
def test_node_delete(self):
""" Testing DELETE:/nodes/:id """
codes = []
test_names = []
Nodes().nodes_get()
nodes = self.__get_data()
test_names = [t.get('name') for t in self.__test_nodes]
for n in nodes:
name = n.get('name')
if name in test_names:
uuid = n.get('id')
LOG.info('Deleting node {0} (name={1})'.format(uuid, name))
Nodes().nodes_identifier_delete(uuid)
codes.append(self.__client.last_response)
assert_not_equal(0, len(codes), message='Delete node list empty!')
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Nodes().nodes_identifier_delete, 'fooey')
@test(groups=['create-whitelist-node'], depends_on_groups=['delete-node'])
def test_whitelist_node_create(self):
""" Verify POST:/nodes/:mac/dhcp/whitelist """
Nodes().nodes_get()
nodes = self.__get_data()
macList = []
for n in nodes:
type = n.get('type')
assert_is_not_none(type)
if type == 'compute':
idList = n.get('identifiers')
assert_is_not_none(idList)
if len(idList) > 0:
macList.append(idList[0]) # grab the first mac
for addr in macList:
LOG.info('whitelisting MAC address {0}'.format(addr))
Nodes().nodes_macaddress_dhcp_whitelist_post(addr,body={})
data = self.__get_data()
assert_not_equal(0, len(data))
addrParsed = data[0].replace("-", ":")
LOG.info(addrParsed)
LOG.info(addr)
@test(groups=['delete-whitelist-node'], depends_on_groups=['create-whitelist-node'])
def test_whitelist_node_delete(self):
""" Verify Delete:/nodes/:mac/dhcp/whitelist """
Nodes().nodes_get()
nodes = self.__get_data()
macList = []
for n in nodes:
type = n.get('type')
assert_is_not_none(type)
if type == 'compute':
idList = n.get('identifiers')
assert_is_not_none(idList)
if len(idList) > 0:
macList.append(idList[0]) # grab the first mac
for addr in macList:
LOG.info('Deleting macaddress {0}' .format(addr))
Nodes().nodes_macaddress_dhcp_whitelist_delete(addr)
rsp = self.__client.last_response
assert_equal(204, rsp.status, message=rsp.reason)
@test(groups=['catalog_nodes', 'check-nodes-catalogs.test'], \
depends_on_groups=['nodes.discovery.test'])
def test_node_catalogs(self):
""" Testing GET id:/catalogs """
resps = []
Nodes().nodes_get()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
Nodes().nodes_identifier_catalogs_get( n.get('id'))
resps.append(self.__get_data())
for resp in resps:
assert_not_equal(0, len(resp), message='Node catalog is empty!')
assert_raises(rest.ApiException, Nodes().nodes_identifier_catalogs_get, 'fooey')
@test(groups=['catalog_source'], depends_on_groups=['catalog_nodes'])
def test_node_catalogs_bysource(self):
""" Testing GET id:/catalogs/source """
resps = []
Nodes().nodes_get()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
Nodes().nodes_identifier_catalogs_source_get( n.get('id'),'bmc')
resps.append(self.__client.last_response)
for resp in resps:
assert_equal(200,resp.status, message=resp.reason)
assert_raises(rest.ApiException, Nodes().nodes_identifier_catalogs_source_get, 'fooey','bmc')
@test(groups=['node_workflows'], depends_on_groups=['nodes.discovery.test'])
def test_node_workflows_get(self):
"""Testing node GET:id/workflows"""
resps = []
Nodes().nodes_get()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
Nodes().nodes_identifier_workflows_get(n.get('id'))
resps.append(self.__get_data())
for resp in resps:
assert_not_equal(0, len(resp), message='No Workflows found for Node')
assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_get, 'fooey')
@test(groups=['node_post_workflows'], depends_on_groups=['node_workflows'])
def test_node_workflows_post(self):
"""Testing node POST:id/workflows"""
resps = []
Nodes().nodes_get()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
id = n.get('id')
timeout = self.__post_workflow(id,'Graph.Discovery',{})
if timeout > 0:
data = self.__get_data()
resps.append({'data': data, 'id':id})
for resp in resps:
assert_not_equal(0, len(resp['data']),
message='No Workflows found for Node {0}'.format(resp['id']))
assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_post, 'fooey','Graph.Discovery',body={})
@test(groups=['node_workflows_active'], depends_on_groups=['node_post_workflows'])
def test_node_workflows_active(self):
"""Testing node GET:id/workflows/active"""
# test_node_workflows_post verifies the same functionality
self.test_node_workflows_post()
assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_active_get, 'fooey')
@test(groups=['node_workflows_del_active'], depends_on_groups=['node_workflows_active'])
def test_node_workflows_del_active(self):
"""Testing node DELETE:id/workflows/active"""
Nodes().nodes_get()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
id = n.get('id')
timeout = 5
done = False
while timeout > 0 and done == False:
if 0 == self.__post_workflow(id,'Graph.Discovery',{}):
fail('Timed out waiting for graph to start!')
try:
Nodes().nodes_identifier_workflows_active_delete(id)
done = True
except rest.ApiException as e:
if e.status != 404:
raise e
timeout -= 1
assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_active_delete, 'fooey')
| apache-2.0 |
nksheridan/elephantAI | test_Deter_as_Server_and_Play_Audio.py | 1 | 1414 | # DETER DEVICE
# this is test code for putting the deter device into server mode, and getting a message via bluetooth from the detection device, and
# then going ahead and playing scare sounds. You need to determine your MAC address. It is for the server in this case, so the MAC address
# of the deter device. You also need to pair the deter device with the detection device via Bluetooth prior to using this. You can do
# that from the Bluetooth icon in the Raspian GUI.
import socket
import time
import os
import random
hostMACaddress = 'xxx'
port = 9
backlog = 1
size = 1024
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
s.bind((hostMACaddress, port))
s.listen(backlog)
print("We are waiting for a message from the detection device to arrive via bluetooth!")
try:
client, address = s.accept()
data = client.recv(size)
if data:
print(data)
client.send(data)
#echo back
except:
print("closing the socket")
client.close()
s.close()
message = str(data)
#convert the data received to a string
print(message)
if message == "b'yes_audio'":
print("play scare sounds now")
time.sleep(3)
scare_sounds = ['aplay bees1.wav', 'aplay bees2.wav', aplay bees3.wav']
i = 0
while i <10:
i = i+1
to_play = random.choice(scare_sounds)
print(to_play)
os.system(to_play)
print("Finished scare. Now can message detection device, and await another message from it")
| mit |
tyler-cromwell/Acid | client.py | 1 | 2439 | #!/usr/bin/python3
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
The MIT License (MIT)
Copyright (c) 2016 Tyler Cromwell
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import getopt
import readline
import socket
import sys
"""
Readline settings
"""
readline.parse_and_bind('tab: complete')
"""
Connection settings
"""
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_ip = '10.0.0.20'
client_port = 8888
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:p:', ['ipaddress=', 'port='])
""" Process command line arguments """
for o, a in opts:
if o == '-i' or o == '--ipaddress':
client_ip = a
elif o == '-p' or o == '--port':
client_port = int(a)
""" One-time send """
if len(sys.argv) > 1:
message = ''
for i in range(1, len(sys.argv)):
message += sys.argv[i]
if i < (len(sys.argv)-1):
message += ' '
client.sendto(message.encode('utf-8'), (client_ip, client_port))
""" Loop for message """
while len(sys.argv) >= 1:
user_input = input('UDP> ')
if user_input == 'quit' or user_input == 'exit':
break
client.sendto(user_input.encode('utf-8'), (client_ip, client_port))
except EOFError:
print()
except KeyboardInterrupt:
print()
| mit |
pothosware/gnuradio | gnuradio-runtime/python/gnuradio/gr/qa_hier_block2.py | 7 | 2901 | #
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr_unittest
from gnuradio.gr.hier_block2 import _multiple_endpoints, _optional_endpoints
class test_hier_block2(gr_unittest.TestCase):
def setUp(self):
self.call_log = []
self.Block = type("Block", (), {"to_basic_block": lambda bl: bl})
def test_f(self, *args):
"""test doc"""
self.call_log.append(args)
multi = _multiple_endpoints(test_f)
opt = _optional_endpoints(test_f)
def test_000(self):
self.assertEqual(self.multi.__doc__, "test doc")
self.assertEqual(self.multi.__name__, "test_f")
def test_001(self):
b = self.Block()
self.multi(b)
self.assertEqual((b,), self.call_log[0])
def test_002(self):
b1, b2 = self.Block(), self.Block()
self.multi(b1, b2)
self.assertEqual([(b1, 0, b2, 0)], self.call_log)
def test_003(self):
b1, b2 = self.Block(), self.Block()
self.multi((b1, 1), (b2, 2))
self.assertEqual([(b1, 1, b2, 2)], self.call_log)
def test_004(self):
b1, b2, b3, b4 = [self.Block()] * 4
self.multi(b1, (b2, 5), b3, (b4, 0))
expected = [
(b1, 0, b2, 5),
(b2, 5, b3, 0),
(b3, 0, b4, 0),
]
self.assertEqual(expected, self.call_log)
def test_005(self):
with self.assertRaises(ValueError) as c:
self.multi((self.Block(), 5))
self.assertIsInstance(c.exception, ValueError)
def test_006(self):
with self.assertRaises(ValueError) as c:
self.multi(self.Block(), (self.Block(), 5, 5))
self.assertIsInstance(c.exception, ValueError)
def test_007(self):
b1, b2 = self.Block(), self.Block()
self.opt(b1, "in", b2, "out")
self.assertEqual([(b1, "in", b2, "out")], self.call_log)
def test_008(self):
f, b1, b2 = self.multi, self.Block(), self.Block()
self.opt((b1, "in"), (b2, "out"))
self.assertEqual([(b1, "in", b2, "out")], self.call_log)
if __name__ == '__main__':
gr_unittest.run(test_hier_block2, "test_hier_block2.xml")
| gpl-3.0 |
glaubitz/fs-uae-debian | launcher/OpenGL/GL/KHR/debug.py | 9 | 9706 | '''OpenGL extension KHR.debug
This module customises the behaviour of the
OpenGL.raw.GL.KHR.debug to provide a more
Python-friendly API
Overview (from the spec)
This extension allows the GL to notify applications when various events
occur that may be useful during application development, debugging and
profiling.
These events are represented in the form of enumerable messages with a
human-readable string representation. Examples of debug events include
incorrect use of the GL, warnings of undefined behavior, and performance
warnings.
A message is uniquely identified by a source, a type and an
implementation-dependent ID within the source and type pair.
A message's source identifies the origin of the message and can either
describe components of the GL, the window system, third-party external
sources such as external debuggers, or even the application itself.
The type of the message roughly identifies the nature of the event that
caused the message. Examples include errors, performance warnings,
warnings about undefined behavior or notifications identifying that the
application is within a specific section of the application code.
A message's ID for a given source and type further distinguishes messages
within namespaces. For example, an error caused by a negative parameter
value or an invalid internal texture format are both errors generated by
the API, but would likely have different message IDs.
Each message is also assigned to a severity level that denotes roughly how
"important" that message is in comparison to other messages across all
sources and types. For example, notification of a GL error would likely
have a higher severity than a performance warning due to redundant state
changes.
Furthermore, every message contains an implementation-dependent string
representation that provides a useful description of the event.
Messages are communicated to the application through an application-
defined callback function that is called by the GL implementation on each
debug message. The motivation for the callback routine is to free
application developers from actively having to query whether a GL error,
or any other debuggable event has happened after each call to a GL
function. With a callback, developers can keep their code free of debug
checks, set breakpoints in the callback function, and only have to react
to messages as they occur. In situations where using a callback is not
possible, a message log is also provided that stores only copies of recent
messages until they are actively queried.
To control the volume of debug output, messages can be disabled either
individually by ID, or entire sets of messages can be turned off based on
combination of source and type, through the entire application code or
only section of the code encapsulated in debug groups. A debug group may
also be used to annotate the command stream using descriptive texts.
This extension also defines debug markers, a mechanism for the OpenGL
application to annotate the command stream with markers for discrete
events.
When profiling or debugging an OpenGL application with a built-in or an
external debugger or profiler, it is difficult to relate the commands
within the command stream to the elements of the scene or parts of the
program code to which they correspond. Debug markers and debug groups help
obviate this by allowing applications to specify this link. For example, a
debug marker can be used to identify the beginning of a frame in the
command stream and a debug group can encapsulate a specific command stream
to identify a rendering pass. Debug groups also allow control of the debug
outputs volume per section of an application code providing an effective
way to handle the massive amount of debug outputs that drivers can
generate.
Some existing implementations of ARB_debug_output only expose the
ARB_debug_output extension string if the context was created with the
debug flag {GLX|WGL}_CONTEXT_DEBUG_BIT_ARB as specified in
{GLX|WGL}_ARB_create_context. The behavior is not obvious when the
functionality is brought into the OpenGL core specification because the
extension string and function entry points must always exist.
This extension modifies the existing ARB_debug_output extension to allow
implementations to always have an empty message log. The specific messages
written to the message log or callback routines are already implementation
defined, so this specification simply makes it explicit that it's fine for
there to be zero messages generated, even when a GL error occurs, which is
useful if the context is non-debug.
Debug output can be enabled and disabled by changing the DEBUG_OUTPUT
state. It is implementation defined how much debug output is generated if
the context was created without the CONTEXT_DEBUG_BIT set. This is a new
query bit added to the existing GL_CONTEXT_FLAGS state to specify whether
the context was created with debug enabled.
Finally, this extension defines a mechanism for OpenGL applications to
label their objects (textures, buffers, shaders, etc.) with a descriptive
string.
When profiling or debugging an OpenGL application within an external or
built-in (debut output API) debugger or profiler it is difficult to
identify objects from their object names (integers).
Even when the object itself is viewed it can be problematic to
differentiate between similar objects. Attaching a descriptive string, a
label, to an object obviates this difficulty.
The intended purpose of this extension is purely to improve the user
experience within OpenGL development tools and application built-in
profilers and debuggers. This extension typically improves OpenGL
programmers efficiency by allowing them to instantly detect issues and the
reason for these issues giving him more time to focus on adding new
features to an OpenGL application.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/KHR/debug.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.KHR.debug import *
from OpenGL.raw.GL.KHR.debug import _EXTENSION_NAME
def glInitDebugKHR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDebugMessageControl.ids size not checked against count
glDebugMessageControl=wrapper.wrapper(glDebugMessageControl).setInputArraySize(
'ids', None
)
# INPUT glDebugMessageInsert.buf size not checked against 'buf,length'
glDebugMessageInsert=wrapper.wrapper(glDebugMessageInsert).setInputArraySize(
'buf', None
)
# INPUT glGetDebugMessageLog.lengths size not checked against count
# INPUT glGetDebugMessageLog.ids size not checked against count
# INPUT glGetDebugMessageLog.severities size not checked against count
# INPUT glGetDebugMessageLog.sources size not checked against count
# INPUT glGetDebugMessageLog.messageLog size not checked against bufSize
# INPUT glGetDebugMessageLog.types size not checked against count
glGetDebugMessageLog=wrapper.wrapper(glGetDebugMessageLog).setInputArraySize(
'lengths', None
).setInputArraySize(
'ids', None
).setInputArraySize(
'severities', None
).setInputArraySize(
'sources', None
).setInputArraySize(
'messageLog', None
).setInputArraySize(
'types', None
)
# INPUT glPushDebugGroup.message size not checked against 'message,length'
glPushDebugGroup=wrapper.wrapper(glPushDebugGroup).setInputArraySize(
'message', None
)
# INPUT glObjectLabel.label size not checked against 'label,length'
glObjectLabel=wrapper.wrapper(glObjectLabel).setInputArraySize(
'label', None
)
# INPUT glGetObjectLabel.label size not checked against bufSize
glGetObjectLabel=wrapper.wrapper(glGetObjectLabel).setInputArraySize(
'length', 1
).setInputArraySize(
'label', None
)
# INPUT glObjectPtrLabel.label size not checked against 'label,length'
glObjectPtrLabel=wrapper.wrapper(glObjectPtrLabel).setInputArraySize(
'label', None
)
# INPUT glGetObjectPtrLabel.label size not checked against bufSize
glGetObjectPtrLabel=wrapper.wrapper(glGetObjectPtrLabel).setInputArraySize(
'length', 1
).setInputArraySize(
'label', None
)
glGetPointerv=wrapper.wrapper(glGetPointerv).setOutput(
'params',size=(1,),orPassIn=True
)
# INPUT glGetDebugMessageLogKHR.lengths size not checked against count
# INPUT glGetDebugMessageLogKHR.ids size not checked against count
# INPUT glGetDebugMessageLogKHR.severities size not checked against count
# INPUT glGetDebugMessageLogKHR.sources size not checked against count
# INPUT glGetDebugMessageLogKHR.messageLog size not checked against bufSize
# INPUT glGetDebugMessageLogKHR.types size not checked against count
glGetDebugMessageLogKHR=wrapper.wrapper(glGetDebugMessageLogKHR).setInputArraySize(
'lengths', None
).setInputArraySize(
'ids', None
).setInputArraySize(
'severities', None
).setInputArraySize(
'sources', None
).setInputArraySize(
'messageLog', None
).setInputArraySize(
'types', None
)
# INPUT glGetObjectLabelKHR.label size not checked against bufSize
glGetObjectLabelKHR=wrapper.wrapper(glGetObjectLabelKHR).setInputArraySize(
'label', None
)
# INPUT glGetObjectPtrLabelKHR.label size not checked against bufSize
glGetObjectPtrLabelKHR=wrapper.wrapper(glGetObjectPtrLabelKHR).setInputArraySize(
'length', 1
).setInputArraySize(
'label', None
)
### END AUTOGENERATED SECTION | gpl-2.0 |
xenserver/auto-cert-kit | autocertkit/status.py | 1 | 3793 | #!/usr/bin/python
# Copyright (c) Citrix Systems Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Module for checking the status of the kit. This will be of most interest
when the kit has rebooted in order to change it's backend, allowing automated clients
to keep track of progress."""
from test_report import *
import utils
import models
import os
TEST_FILE = "test_run.conf"
DEFAULT_RUN_LEVEL = 3
running = False
def get_process_strings():
ps = subprocess.Popen(
['ps', 'aux'], stdout=subprocess.PIPE).communicate()[0]
process_strings = []
for line in ps.split('\n'):
if 'ack_cli.py' in line or 'test_runner.py' in line:
process_strings.append(line)
return process_strings
def check_for_process():
process_strings = get_process_strings()
my_pid = str(os.getpid())
for line in process_strings:
if my_pid in line:
process_strings.remove(line)
if process_strings:
return True
def get_run_level():
output = subprocess.Popen(
['/sbin/runlevel'], stdout=subprocess.PIPE).communicate()[0]
_, level = output.split()
return int(level)
def main():
running = False
uptime_seconds = utils.os_uptime()
# Check for manifest file
if not os.path.exists(TEST_FILE):
print "4:Manifest file has not been created. Have run the kit? (Has an error occured?)"
sys.exit(0)
# Check for the python process
if check_for_process():
running = True
# Check the XML file to find out how many tests have been run
try:
ack_run = models.parse_xml(TEST_FILE)
except:
print "5:An error has occured reading. %s" % TEST_FILE
sys.exit(1)
p, f, s, w, r = ack_run.get_status()
if w+r == 0:
print "0:Finished (Passed:%d, Failed:%d, Skipped:%d)" % (p, f, s)
elif not running and uptime_seconds <= 600 and r > 0:
print "3:Server rebooting... (Passed:%d, Failed:%d, Skipped:%d, Waiting:%d, Running:%d)" % (
p, f, s, w, r)
elif not running and uptime_seconds > 600:
print "1:Process not running. An error has occurred. (Passed:%d, Failed:%d, Skipped: %d, Waiting:%d, Running:%d)" % (
p, f, s, w, r)
sys.exit(1)
else:
perc = float(p + f + s) / float(w + r + p + f + s) * 100
print "2:Running - %d%% Complete (Passed:%d, Failed:%d, Skipped:%d, Waiting:%d, Running:%d)" % (
perc, p, f, s, w, r)
if __name__ == "__main__":
main()
| bsd-2-clause |
mrquim/repository.mrquim | repo/plugin.video.poseidon/resources/lib/modules/dom_parser.py | 35 | 5326 | """
Based on Parsedom for XBMC plugins
Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from collections import namedtuple
DomMatch = namedtuple('DOMMatch', ['attrs', 'content'])
re_type = type(re.compile(''))
def __get_dom_content(html, name, match):
if match.endswith('/>'): return ''
# override tag name with tag from match if possible
tag = re.match('<([^\s/>]+)', match)
if tag: name = tag.group(1)
start_str = '<%s' % name
end_str = "</%s" % name
# start/end tags without matching case cause issues
start = html.find(match)
end = html.find(end_str, start)
pos = html.find(start_str, start + 1)
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(end_str, end + len(end_str))
if tend != -1:
end = tend
pos = html.find(start_str, pos + 1)
if start == -1 and end == -1:
result = ''
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
else:
result = ''
return result
def __get_dom_elements(item, name, attrs):
if not attrs:
pattern = '(<%s(?:\s[^>]*>|/?>))' % name
this_list = re.findall(pattern, item, re.M | re.S | re.I)
else:
last_list = None
for key, value in attrs.iteritems():
value_is_regex = isinstance(value, re_type)
value_is_str = isinstance(value, basestring)
pattern = '''(<{tag}[^>]*\s{key}=(?P<delim>['"])(.*?)(?P=delim)[^>]*>)'''.format(tag=name, key=key)
re_list = re.findall(pattern, item, re.M | re.S | re.I)
if value_is_regex:
this_list = [r[0] for r in re_list if re.match(value, r[2])]
else:
temp_value = [value] if value_is_str else value
this_list = [r[0] for r in re_list if set(temp_value) <= set(r[2].split(' '))]
if not this_list:
has_space = (value_is_regex and ' ' in value.pattern) or (value_is_str and ' ' in value)
if not has_space:
pattern = '''(<{tag}[^>]*\s{key}=((?:[^\s>]|/>)*)[^>]*>)'''.format(tag=name, key=key)
re_list = re.findall(pattern, item, re.M | re.S | re.I)
if value_is_regex:
this_list = [r[0] for r in re_list if re.match(value, r[1])]
else:
this_list = [r[0] for r in re_list if value == r[1]]
if last_list is None:
last_list = this_list
else:
last_list = [item for item in this_list if item in last_list]
this_list = last_list
return this_list
def __get_attribs(element):
attribs = {}
for match in re.finditer('''\s+(?P<key>[^=]+)=\s*(?:(?P<delim>["'])(?P<value1>.*?)(?P=delim)|(?P<value2>[^"'][^>\s]*))''', element):
match = match.groupdict()
value1 = match.get('value1')
value2 = match.get('value2')
value = value1 if value1 is not None else value2
if value is None: continue
attribs[match['key'].lower().strip()] = value
return attribs
def parse_dom(html, name='', attrs=None, req=False, exclude_comments=False):
if attrs is None: attrs = {}
name = name.strip()
if isinstance(html, unicode) or isinstance(html, DomMatch):
html = [html]
elif isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
try:
html = [html.decode("utf-8", "replace")]
except:
html = [html]
elif not isinstance(html, list):
return ''
if not name:
return ''
if not isinstance(attrs, dict):
return ''
if req:
if not isinstance(req, list):
req = [req]
req = set([key.lower() for key in req])
all_results = []
for item in html:
if isinstance(item, DomMatch):
item = item.content
if exclude_comments:
item = re.sub(re.compile('<!--.*?-->', re.DOTALL), '', item)
results = []
for element in __get_dom_elements(item, name, attrs):
attribs = __get_attribs(element)
if req and not req <= set(attribs.keys()): continue
temp = __get_dom_content(item, name, element).strip()
results.append(DomMatch(attribs, temp))
item = item[item.find(temp, item.find(element)):]
all_results += results
return all_results
| gpl-2.0 |
ykaneko/neutron | neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py | 4 | 10000 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Oleg Bondarev (obondarev@mirantis.com)
import contextlib
import mock
from oslo.config import cfg as config
from neutron.services.loadbalancer.drivers.haproxy import cfg
from neutron.tests import base
class TestHaproxyCfg(base.BaseTestCase):
def test_save_config(self):
with contextlib.nested(
mock.patch('neutron.services.loadbalancer.'
'drivers.haproxy.cfg._build_global'),
mock.patch('neutron.services.loadbalancer.'
'drivers.haproxy.cfg._build_defaults'),
mock.patch('neutron.services.loadbalancer.'
'drivers.haproxy.cfg._build_frontend'),
mock.patch('neutron.services.loadbalancer.'
'drivers.haproxy.cfg._build_backend'),
mock.patch('neutron.agent.linux.utils.replace_file')
) as (b_g, b_d, b_f, b_b, replace):
test_config = ['globals', 'defaults', 'frontend', 'backend']
b_g.return_value = [test_config[0]]
b_d.return_value = [test_config[1]]
b_f.return_value = [test_config[2]]
b_b.return_value = [test_config[3]]
cfg.save_config('test_path', mock.Mock())
replace.assert_called_once_with('test_path',
'\n'.join(test_config))
def test_build_global(self):
if not hasattr(config.CONF, 'user_group'):
config.CONF.register_opt(config.StrOpt('user_group'))
config.CONF.set_override('user_group', 'test_group')
expected_opts = ['global',
'\tdaemon',
'\tuser nobody',
'\tgroup test_group',
'\tlog /dev/log local0',
'\tlog /dev/log local1 notice',
'\tstats socket test_path mode 0666 level user']
opts = cfg._build_global(mock.Mock(), 'test_path')
self.assertEqual(expected_opts, list(opts))
config.CONF.reset()
def test_build_defaults(self):
expected_opts = ['defaults',
'\tlog global',
'\tretries 3',
'\toption redispatch',
'\ttimeout connect 5000',
'\ttimeout client 50000',
'\ttimeout server 50000']
opts = cfg._build_defaults(mock.Mock())
self.assertEqual(expected_opts, list(opts))
config.CONF.reset()
def test_build_frontend(self):
test_config = {'vip': {'id': 'vip_id',
'protocol': 'HTTP',
'port': {'fixed_ips': [
{'ip_address': '10.0.0.2'}]
},
'protocol_port': 80,
'connection_limit': 2000,
},
'pool': {'id': 'pool_id'}}
expected_opts = ['frontend vip_id',
'\toption tcplog',
'\tbind 10.0.0.2:80',
'\tmode http',
'\tdefault_backend pool_id',
'\tmaxconn 2000',
'\toption forwardfor']
opts = cfg._build_frontend(test_config)
self.assertEqual(expected_opts, list(opts))
test_config['vip']['connection_limit'] = -1
expected_opts.remove('\tmaxconn 2000')
opts = cfg._build_frontend(test_config)
self.assertEqual(expected_opts, list(opts))
def test_build_backend(self):
test_config = {'pool': {'id': 'pool_id',
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN'},
'members': [{'status': 'ACTIVE',
'admin_state_up': True,
'id': 'member1_id',
'address': '10.0.0.3',
'protocol_port': 80,
'weight': 1}],
'healthmonitors': [{'status': 'ACTIVE',
'admin_state_up': True,
'delay': 3,
'max_retries': 4,
'timeout': 2,
'type': 'TCP'}],
'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
expected_opts = ['backend pool_id',
'\tmode http',
'\tbalance roundrobin',
'\toption forwardfor',
'\ttimeout check 2s',
'\tcookie SRV insert indirect nocache',
'\tserver member1_id 10.0.0.3:80 weight 1 '
'check inter 3s fall 4 cookie 0']
opts = cfg._build_backend(test_config)
self.assertEqual(expected_opts, list(opts))
def test_get_server_health_option(self):
test_config = {'healthmonitors': [{'status': 'ERROR',
'admin_state_up': False,
'delay': 3,
'max_retries': 4,
'timeout': 2,
'type': 'TCP',
'http_method': 'GET',
'url_path': '/',
'expected_codes': '200'}]}
self.assertEqual(('', []), cfg._get_server_health_option(test_config))
test_config['healthmonitors'][0]['status'] = 'ACTIVE'
self.assertEqual(('', []), cfg._get_server_health_option(test_config))
test_config['healthmonitors'][0]['admin_state_up'] = True
expected = (' check inter 3s fall 4', ['timeout check 2s'])
self.assertEqual(expected, cfg._get_server_health_option(test_config))
test_config['healthmonitors'][0]['type'] = 'HTTPS'
expected = (' check inter 3s fall 4',
['timeout check 2s',
'option httpchk GET /',
'http-check expect rstatus 200',
'option ssl-hello-chk'])
self.assertEqual(expected, cfg._get_server_health_option(test_config))
def test_has_http_cookie_persistence(self):
config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
self.assertTrue(cfg._has_http_cookie_persistence(config))
config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}}
self.assertFalse(cfg._has_http_cookie_persistence(config))
config = {'vip': {'session_persistence': {}}}
self.assertFalse(cfg._has_http_cookie_persistence(config))
def test_get_session_persistence(self):
config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}}
self.assertEqual(cfg._get_session_persistence(config),
['stick-table type ip size 10k', 'stick on src'])
config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
self.assertEqual(cfg._get_session_persistence(config),
['cookie SRV insert indirect nocache'])
config = {'vip': {'session_persistence': {'type': 'APP_COOKIE',
'cookie_name': 'test'}}}
self.assertEqual(cfg._get_session_persistence(config),
['appsession test len 56 timeout 3h'])
config = {'vip': {'session_persistence': {'type': 'APP_COOKIE'}}}
self.assertEqual(cfg._get_session_persistence(config), [])
config = {'vip': {'session_persistence': {'type': 'UNSUPPORTED'}}}
self.assertEqual(cfg._get_session_persistence(config), [])
def test_expand_expected_codes(self):
exp_codes = ''
self.assertEqual(cfg._expand_expected_codes(exp_codes), set([]))
exp_codes = '200'
self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200']))
exp_codes = '200, 201'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201']))
exp_codes = '200, 201,202'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202']))
exp_codes = '200-202'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202']))
exp_codes = '200-202, 205'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202', '205']))
exp_codes = '200, 201-203'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202', '203']))
exp_codes = '200, 201-203, 205'
self.assertEqual(cfg._expand_expected_codes(exp_codes),
set(['200', '201', '202', '203', '205']))
exp_codes = '201-200, 205'
self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['205']))
| apache-2.0 |
Plain-Devices/android_kernel_lge_msm8974 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
ApsOps/zulip | zerver/migrations/0002_django_1_8.py | 125 | 2229 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='userprofile',
managers=[
(b'objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='appledevicetoken',
name='last_updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='mituser',
name='email',
field=models.EmailField(unique=True, max_length=254),
),
migrations.AlterField(
model_name='preregistrationuser',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='preregistrationuser',
name='streams',
field=models.ManyToManyField(to='zerver.Stream'),
),
migrations.AlterField(
model_name='pushdevicetoken',
name='last_updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='referral',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='userprofile',
name='email',
field=models.EmailField(unique=True, max_length=254, db_index=True),
),
migrations.AlterField(
model_name='userprofile',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='userprofile',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
]
| apache-2.0 |
4pr0n/rip | sites/site_instagrin.py | 2 | 2550 | #!/usr/bin/python
from basesite import basesite
from time import sleep
"""
Downloads instagram albums
"""
class instagram(basesite):
""" Parse/strip URL to acceptable format """
def sanitize_url(self, url):
if'instagram.com/' in url:
# Legit
pass
elif 'web.stagram.com/n/' in url:
# Convert to instagram
user = url[url.find('.com/n/')+len('.com/n/'):]
if '/' in user: user = user[:user.find('/')]
url = 'http://instagram.com/%s' % user
else:
raise Exception('')
url = url.replace('instagram.com/', 'instagr.in/u/')
if '?' in url: url = url[:url.find('?')]
if '#' in url: url = url[:url.find('#')]
while url.endswith('/'): url = url[:-1]
return url
""" Discover directory path based on URL """
def get_dir(self, url):
user = url[url.rfind('/')+1:]
return 'instagram_%s' % user
def download(self):
self.init_dir()
url = self.url
index = 0
r = self.web.get(url)
if not '"pod-title">Photos' in r:
self.wait_for_threads()
raise Exception('could not find total photos at %s' % url)
chunk = self.web.between(r, '"pod-title">Photos</div>', 'Followers')[0]
value = self.web.between(chunk, 'value">', '<')[0].replace(',', '')
if 'k' in value:
value = value[:value.find('k')]
total = int(float(value) * 1000.0)
elif value.isdigit():
total = int(self.web.between(chunk, 'value">', '<')[0])
else:
total = '?'
while True:
if not '<div class="image">' in r:
self.log('could not find image at %s' % url)
break
links = self.web.between(r, '<div class="image">', '</div>')
for link in links:
index += 1
media_url = self.web.between(link, 'src="', '"')[0]
media_url = media_url.replace('_102.mp4', '_101.mp4')
media_url = media_url.replace('_6.jpg', '_7.jpg')
self.download_image(media_url, index, total=total)
sleep(0.2)
if self.hit_image_limit(): break
if self.hit_image_limit(): break
if not '<div class="next_url">' in r: break
next_url = self.web.between(r, '<div class="next_url">', '</div>')[0]
if next_url.strip() == '' or total != '?' and index >= total: break
d = {
'next_url' : next_url,
'request' : next_url
}
headers = {
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With' : 'XMLHttpReqeust',
'Referer' : self.url,
'Accept' : '*/*',
'Accept-Language' : 'en-US,en;q=0.5',
'DNT' : '1'
}
url = 'http://instagr.in/action/load-more'
r = self.web.oldpost(url, postdict=d)
sleep(1)
self.wait_for_threads()
| gpl-2.0 |
ehashman/oh-mainline | vendor/packages/Django/tests/regressiontests/views/tests/shortcuts.py | 44 | 2884 | from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
TEMPLATE_CONTEXT_PROCESSORS=('django.core.context_processors.static',),
STATIC_URL='/path/to/static/media/',
)
class ShortcutTests(TestCase):
urls = 'regressiontests.views.generic_urls'
def test_render_to_response(self):
response = self.client.get('/shortcuts/render_to_response/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_to_response_with_request_context(self):
response = self.client.get('/shortcuts/render_to_response/request_context/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../path/to/static/media/\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_to_response_with_content_type(self):
response = self.client.get('/shortcuts/render_to_response/content_type/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
def test_render(self):
response = self.client.get('/shortcuts/render/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../path/to/static/media/\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
self.assertEqual(response.context.current_app, None)
def test_render_with_base_context(self):
response = self.client.get('/shortcuts/render/base_context/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_with_content_type(self):
response = self.client.get('/shortcuts/render/content_type/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../path/to/static/media/\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
def test_render_with_status(self):
response = self.client.get('/shortcuts/render/status/')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'FOO.BAR../path/to/static/media/\n')
def test_render_with_current_app(self):
response = self.client.get('/shortcuts/render/current_app/')
self.assertEqual(response.context.current_app, "foobar_app")
def test_render_with_current_app_conflict(self):
self.assertRaises(ValueError, self.client.get, '/shortcuts/render/current_app_conflict/')
| agpl-3.0 |
thiagodasilva/swift | test/unit/common/test_constraints.py | 7 | 27217 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import tempfile
import time
from six.moves import range
from test import safe_repr
from test.unit import MockTrue
from swift.common.swob import HTTPBadRequest, Request, HTTPException
from swift.common.http import HTTP_REQUEST_ENTITY_TOO_LARGE, \
HTTP_BAD_REQUEST, HTTP_LENGTH_REQUIRED, HTTP_NOT_IMPLEMENTED
from swift.common import constraints, utils
class TestConstraints(unittest.TestCase):
def assertIn(self, member, container, msg=None):
"""Copied from 2.7"""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def test_check_metadata_empty(self):
headers = {}
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
def test_check_metadata_good(self):
headers = {'X-Object-Meta-Name': 'Value'}
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
def test_check_metadata_empty_name(self):
headers = {'X-Object-Meta-': 'Value'}
self.assertTrue(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), HTTPBadRequest)
def test_check_metadata_name_length(self):
name = 'a' * constraints.MAX_META_NAME_LENGTH
headers = {'X-Object-Meta-%s' % name: 'v'}
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
name = 'a' * (constraints.MAX_META_NAME_LENGTH + 1)
headers = {'X-Object-Meta-%s' % name: 'v'}
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
self.assertIn(
('X-Object-Meta-%s' % name).lower(),
constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').body.lower())
def test_check_metadata_value_length(self):
value = 'a' * constraints.MAX_META_VALUE_LENGTH
headers = {'X-Object-Meta-Name': value}
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
value = 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)
headers = {'X-Object-Meta-Name': value}
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
self.assertIn(
'x-object-meta-name',
constraints.check_metadata(Request.blank(
'/', headers=headers),
'object').body.lower())
self.assertIn(
str(constraints.MAX_META_VALUE_LENGTH),
constraints.check_metadata(Request.blank(
'/', headers=headers),
'object').body)
def test_check_metadata_count(self):
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
headers['X-Object-Meta-Too-Many'] = 'v'
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
def test_check_metadata_size(self):
headers = {}
size = 0
chunk = constraints.MAX_META_NAME_LENGTH + \
constraints.MAX_META_VALUE_LENGTH
x = 0
while size + chunk < constraints.MAX_META_OVERALL_SIZE:
headers['X-Object-Meta-%04d%s' %
(x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
size += chunk
x += 1
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
# add two more headers in case adding just one falls exactly on the
# limit (eg one header adds 1024 and the limit is 2048)
headers['X-Object-Meta-%04d%s' %
(x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d%s' %
(x + 1, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
self.assertEqual(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
def test_check_object_creation_content_length(self):
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE),
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'), None)
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name').status_int,
HTTP_REQUEST_ENTITY_TOO_LARGE)
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'), None)
headers = {'Transfer-Encoding': 'gzip',
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name').status_int,
HTTP_BAD_REQUEST)
headers = {'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name').status_int,
HTTP_LENGTH_REQUIRED)
headers = {'Content-Length': 'abc',
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name').status_int,
HTTP_BAD_REQUEST)
headers = {'Transfer-Encoding': 'gzip,chunked',
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name').status_int,
HTTP_NOT_IMPLEMENTED)
def test_check_object_creation_copy(self):
headers = {'Content-Length': '0',
'X-Copy-From': 'c/o2',
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'), None)
headers = {'Content-Length': '1',
'X-Copy-From': 'c/o2',
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name').status_int,
HTTP_BAD_REQUEST)
headers = {'Transfer-Encoding': 'chunked',
'X-Copy-From': 'c/o2',
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'), None)
# a content-length header is always required
headers = {'X-Copy-From': 'c/o2',
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name').status_int,
HTTP_LENGTH_REQUIRED)
def test_check_object_creation_name_length(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'}
name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), name), None)
name = 'o' * (constraints.MAX_OBJECT_NAME_LENGTH + 1)
self.assertEqual(constraints.check_object_creation(
Request.blank('/', headers=headers), name).status_int,
HTTP_BAD_REQUEST)
def test_check_object_creation_content_type(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'}
self.assertEqual(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'), None)
headers = {'Transfer-Encoding': 'chunked'}
self.assertEqual(constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name').status_int,
HTTP_BAD_REQUEST)
def test_check_object_creation_bad_content_type(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': '\xff\xff'}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertTrue('Content-Type' in resp.body)
def test_check_object_creation_bad_delete_headers(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain',
'X-Delete-After': 'abc'}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertTrue('Non-integer X-Delete-After' in resp.body)
t = str(int(time.time() - 60))
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain',
'X-Delete-At': t}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertTrue('X-Delete-At in past' in resp.body)
def test_check_delete_headers(self):
# X-Delete-After
headers = {'X-Delete-After': '60'}
resp = constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertTrue(isinstance(resp, Request))
self.assertTrue('x-delete-at' in resp.headers)
headers = {'X-Delete-After': 'abc'}
try:
resp = constraints.check_delete_headers(
Request.blank('/', headers=headers))
except HTTPException as e:
self.assertEqual(e.status_int, HTTP_BAD_REQUEST)
self.assertTrue('Non-integer X-Delete-After' in e.body)
else:
self.fail("Should have failed with HTTPBadRequest")
headers = {'X-Delete-After': '60.1'}
try:
resp = constraints.check_delete_headers(
Request.blank('/', headers=headers))
except HTTPException as e:
self.assertEqual(e.status_int, HTTP_BAD_REQUEST)
self.assertTrue('Non-integer X-Delete-After' in e.body)
else:
self.fail("Should have failed with HTTPBadRequest")
headers = {'X-Delete-After': '-1'}
try:
resp = constraints.check_delete_headers(
Request.blank('/', headers=headers))
except HTTPException as e:
self.assertEqual(e.status_int, HTTP_BAD_REQUEST)
self.assertTrue('X-Delete-After in past' in e.body)
else:
self.fail("Should have failed with HTTPBadRequest")
# X-Delete-At
t = str(int(time.time() + 100))
headers = {'X-Delete-At': t}
resp = constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertTrue(isinstance(resp, Request))
self.assertTrue('x-delete-at' in resp.headers)
self.assertEqual(resp.headers.get('X-Delete-At'), t)
headers = {'X-Delete-At': 'abc'}
try:
resp = constraints.check_delete_headers(
Request.blank('/', headers=headers))
except HTTPException as e:
self.assertEqual(e.status_int, HTTP_BAD_REQUEST)
self.assertTrue('Non-integer X-Delete-At' in e.body)
else:
self.fail("Should have failed with HTTPBadRequest")
t = str(int(time.time() + 100)) + '.1'
headers = {'X-Delete-At': t}
try:
resp = constraints.check_delete_headers(
Request.blank('/', headers=headers))
except HTTPException as e:
self.assertEqual(e.status_int, HTTP_BAD_REQUEST)
self.assertTrue('Non-integer X-Delete-At' in e.body)
else:
self.fail("Should have failed with HTTPBadRequest")
t = str(int(time.time()))
headers = {'X-Delete-At': t}
try:
resp = constraints.check_delete_headers(
Request.blank('/', headers=headers))
except HTTPException as e:
self.assertEqual(e.status_int, HTTP_BAD_REQUEST)
self.assertTrue('X-Delete-At in past' in e.body)
else:
self.fail("Should have failed with HTTPBadRequest")
t = str(int(time.time() - 1))
headers = {'X-Delete-At': t}
try:
resp = constraints.check_delete_headers(
Request.blank('/', headers=headers))
except HTTPException as e:
self.assertEqual(e.status_int, HTTP_BAD_REQUEST)
self.assertTrue('X-Delete-At in past' in e.body)
else:
self.fail("Should have failed with HTTPBadRequest")
def test_check_delete_headers_sets_delete_at(self):
t = time.time() + 1000
# check delete-at is passed through
headers = {'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Delete-At': str(int(t))}
req = Request.blank('/', headers=headers)
constraints.check_delete_headers(req)
self.assertTrue('X-Delete-At' in req.headers)
self.assertEqual(req.headers['X-Delete-At'], str(int(t)))
# check delete-after is converted to delete-at
headers = {'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Delete-After': '42'}
req = Request.blank('/', headers=headers)
with mock.patch('time.time', lambda: t):
constraints.check_delete_headers(req)
self.assertTrue('X-Delete-At' in req.headers)
expected = str(int(t) + 42)
self.assertEqual(req.headers['X-Delete-At'], expected)
# check delete-after takes precedence over delete-at
headers = {'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Delete-After': '42',
'X-Delete-At': str(int(t) + 40)}
req = Request.blank('/', headers=headers)
with mock.patch('time.time', lambda: t):
constraints.check_delete_headers(req)
self.assertTrue('X-Delete-At' in req.headers)
self.assertEqual(req.headers['X-Delete-At'], expected)
headers = {'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Delete-After': '42',
'X-Delete-At': str(int(t) + 44)}
req = Request.blank('/', headers=headers)
with mock.patch('time.time', lambda: t):
constraints.check_delete_headers(req)
self.assertTrue('X-Delete-At' in req.headers)
self.assertEqual(req.headers['X-Delete-At'], expected)
def test_check_dir(self):
self.assertFalse(constraints.check_dir('', ''))
with mock.patch("os.path.isdir", MockTrue()):
self.assertTrue(constraints.check_dir('/srv', 'foo/bar'))
def test_check_mount(self):
self.assertFalse(constraints.check_mount('', ''))
with mock.patch("swift.common.utils.ismount", MockTrue()):
self.assertTrue(constraints.check_mount('/srv', '1'))
self.assertTrue(constraints.check_mount('/srv', 'foo-bar'))
self.assertTrue(constraints.check_mount(
'/srv', '003ed03c-242a-4b2f-bee9-395f801d1699'))
self.assertFalse(constraints.check_mount('/srv', 'foo bar'))
self.assertFalse(constraints.check_mount('/srv', 'foo/bar'))
self.assertFalse(constraints.check_mount('/srv', 'foo?bar'))
def test_check_float(self):
self.assertFalse(constraints.check_float(''))
self.assertTrue(constraints.check_float('0'))
def test_valid_timestamp(self):
self.assertRaises(HTTPException,
constraints.valid_timestamp,
Request.blank('/'))
self.assertRaises(HTTPException,
constraints.valid_timestamp,
Request.blank('/', headers={
'X-Timestamp': 'asdf'}))
timestamp = utils.Timestamp(time.time())
req = Request.blank('/', headers={'X-Timestamp': timestamp.internal})
self.assertEqual(timestamp, constraints.valid_timestamp(req))
req = Request.blank('/', headers={'X-Timestamp': timestamp.normal})
self.assertEqual(timestamp, constraints.valid_timestamp(req))
def test_check_utf8(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
unicode_with_null = u'abc\u0000def'
utf8_with_null = unicode_with_null.encode('utf-8')
for false_argument in [None,
'',
invalid_utf8_str,
unicode_with_null,
utf8_with_null]:
self.assertFalse(constraints.check_utf8(false_argument))
for true_argument in ['this is ascii and utf-8, too',
unicode_sample,
valid_utf8_str]:
self.assertTrue(constraints.check_utf8(true_argument))
def test_check_utf8_non_canonical(self):
self.assertFalse(constraints.check_utf8('\xed\xa0\xbc\xed\xbc\xb8'))
self.assertFalse(constraints.check_utf8('\xed\xa0\xbd\xed\xb9\x88'))
def test_check_utf8_lone_surrogates(self):
self.assertFalse(constraints.check_utf8('\xed\xa0\xbc'))
self.assertFalse(constraints.check_utf8('\xed\xb9\x88'))
def test_validate_bad_meta(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-object-meta-hello':
'ab' * constraints.MAX_HEADER_SIZE})
self.assertEqual(constraints.check_metadata(req, 'object').status_int,
HTTP_BAD_REQUEST)
self.assertIn('x-object-meta-hello', constraints.check_metadata(req,
'object').body.lower())
def test_validate_constraints(self):
c = constraints
self.assertTrue(c.MAX_META_OVERALL_SIZE > c.MAX_META_NAME_LENGTH)
self.assertTrue(c.MAX_META_OVERALL_SIZE > c.MAX_META_VALUE_LENGTH)
self.assertTrue(c.MAX_HEADER_SIZE > c.MAX_META_NAME_LENGTH)
self.assertTrue(c.MAX_HEADER_SIZE > c.MAX_META_VALUE_LENGTH)
def test_validate_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/o2'})
src_cont, src_obj = constraints.check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/subdir/o2'})
src_cont, src_obj = constraints.check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'subdir/o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': '/c/o2'})
src_cont, src_obj = constraints.check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
def test_validate_bad_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'bad_object'})
self.assertRaises(HTTPException,
constraints.check_copy_from_header, req)
def test_validate_destination(self):
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'c/o2'})
src_cont, src_obj = constraints.check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'c/subdir/o2'})
src_cont, src_obj = constraints.check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'subdir/o2')
req = Request.blank(
'/v/a/c/o',
headers={'destination': '/c/o2'})
src_cont, src_obj = constraints.check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
def test_validate_bad_destination(self):
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'bad_object'})
self.assertRaises(HTTPException,
constraints.check_destination_header, req)
def test_check_account_format(self):
req = Request.blank(
'/v/a/c/o',
headers={'X-Copy-From-Account': 'account/with/slashes'})
self.assertRaises(HTTPException,
constraints.check_account_format,
req, req.headers['X-Copy-From-Account'])
req = Request.blank(
'/v/a/c/o',
headers={'X-Copy-From-Account': ''})
self.assertRaises(HTTPException,
constraints.check_account_format,
req, req.headers['X-Copy-From-Account'])
def test_check_container_format(self):
invalid_versions_locations = (
'container/with/slashes',
'', # empty
)
for versions_location in invalid_versions_locations:
req = Request.blank(
'/v/a/c/o', headers={
'X-Versions-Location': versions_location})
try:
constraints.check_container_format(
req, req.headers['X-Versions-Location'])
except HTTPException as e:
self.assertTrue(e.body.startswith('Container name cannot'))
else:
self.fail('check_container_format did not raise error for %r' %
req.headers['X-Versions-Location'])
class TestConstraintsConfig(unittest.TestCase):
def test_default_constraints(self):
for key in constraints.DEFAULT_CONSTRAINTS:
# if there is local over-rides in swift.conf we just continue on
if key in constraints.OVERRIDE_CONSTRAINTS:
continue
# module level attrs (that aren't in OVERRIDE) should have the
# same value as the DEFAULT map
module_level_value = getattr(constraints, key.upper())
self.assertEqual(constraints.DEFAULT_CONSTRAINTS[key],
module_level_value)
def test_effective_constraints(self):
for key in constraints.DEFAULT_CONSTRAINTS:
# module level attrs should always mirror the same value as the
# EFFECTIVE map
module_level_value = getattr(constraints, key.upper())
self.assertEqual(constraints.EFFECTIVE_CONSTRAINTS[key],
module_level_value)
# if there are local over-rides in swift.conf those should be
# reflected in the EFFECTIVE, otherwise we expect the DEFAULTs
self.assertEqual(constraints.EFFECTIVE_CONSTRAINTS[key],
constraints.OVERRIDE_CONSTRAINTS.get(
key, constraints.DEFAULT_CONSTRAINTS[key]))
def test_override_constraints(self):
try:
with tempfile.NamedTemporaryFile() as f:
f.write('[swift-constraints]\n')
# set everything to 1
for key in constraints.DEFAULT_CONSTRAINTS:
f.write('%s = 1\n' % key)
f.flush()
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
for key in constraints.DEFAULT_CONSTRAINTS:
# module level attrs should all be 1
module_level_value = getattr(constraints, key.upper())
self.assertEqual(module_level_value, 1)
# all keys should be in OVERRIDE
self.assertEqual(constraints.OVERRIDE_CONSTRAINTS[key],
module_level_value)
# module level attrs should always mirror the same value as
# the EFFECTIVE map
self.assertEqual(constraints.EFFECTIVE_CONSTRAINTS[key],
module_level_value)
finally:
constraints.reload_constraints()
def test_reload_reset(self):
try:
with tempfile.NamedTemporaryFile() as f:
f.write('[swift-constraints]\n')
# set everything to 1
for key in constraints.DEFAULT_CONSTRAINTS:
f.write('%s = 1\n' % key)
f.flush()
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
self.assertTrue(constraints.SWIFT_CONSTRAINTS_LOADED)
self.assertEqual(sorted(constraints.DEFAULT_CONSTRAINTS.keys()),
sorted(constraints.OVERRIDE_CONSTRAINTS.keys()))
# file is now deleted...
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
# no constraints have been loaded from non-existent swift.conf
self.assertFalse(constraints.SWIFT_CONSTRAINTS_LOADED)
# no constraints are in OVERRIDE
self.assertEqual([], constraints.OVERRIDE_CONSTRAINTS.keys())
# the EFFECTIVE constraints mirror DEFAULT
self.assertEqual(constraints.EFFECTIVE_CONSTRAINTS,
constraints.DEFAULT_CONSTRAINTS)
finally:
constraints.reload_constraints()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
metwit/django-fulmine | fulmine/forms.py | 1 | 2406 | from django import forms
from django.core.exceptions import ValidationError
from fulmine.models import parse_scope
class SeparatedValuesField(forms.CharField):
def __init__(self, *args, **kwargs):
self.separator = kwargs.pop('separator', ' ')
super(SeparatedValuesField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value:
return []
return value.split(self.separator)
class AuthorizationForm(forms.Form):
response_type = forms.ChoiceField(
choices=[('code', 'code'), ('token', 'token')])
client_id = forms.CharField()
redirect_uri = forms.CharField(required=False)
scope = SeparatedValuesField(required=False)
state = forms.CharField(required=False)
def clean_scope(self):
scope = self.cleaned_data['scope']
return parse_scope(scope)
def clean_scope(form):
scope = form.cleaned_data['scope']
return parse_scope(scope)
def make_token_form(grant_type, required_fields=[], optional_fields=[],
django_fields={}):
class_dict = dict()
for field_name in optional_fields:
if field_name == 'scope':
field = SeparatedValuesField(required=False)
else:
field = forms.CharField(required=False)
class_dict[field_name] = field
for field_name in required_fields:
if field_name == 'scope':
field = SeparatedValuesField(required=True)
else:
field = forms.CharField(required=True)
class_dict[field_name] = field
for field_name, field in django_fields.iteritems():
class_dict[field_name] = field
class_dict['clean_scope'] = clean_scope
cls = type('%sTokenForm' % grant_type,
(forms.Form, ),
class_dict
)
return cls
AuthorizationCodeTokenForm = make_token_form('authorization_code',
required_fields=[
'code',
],
optional_fields=[
'redirect_uri',
'client_id',
'scope',
]
)
PasswordTokenForm = make_token_form('password',
required_fields=[
'username',
'password',
'scope',
]
)
ClientCredentialsTokenForm = make_token_form('client_credentials',
required_fields=['scope'],
)
RefreshTokenTokenForm = make_token_form('refresh_token',
required_fields=['refresh_token'],
optional_fields=['scope']
)
| bsd-3-clause |
roadmapper/ansible | lib/ansible/playbook/role_include.py | 37 | 6983 | #
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os.path import basename
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.block import Block
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
from ansible.utils.display import Display
__all__ = ['IncludeRole']
display = Display()
class IncludeRole(TaskInclude):
"""
A Role include is derived from a regular role to handle the special
circumstances related to the `- include_role: ...`
"""
BASE = ('name', 'role') # directly assigned
FROM_ARGS = ('tasks_from', 'vars_from', 'defaults_from', 'handlers_from') # used to populate from dict in role
OTHER_ARGS = ('apply', 'public', 'allow_duplicates') # assigned to matching property
VALID_ARGS = tuple(frozenset(BASE + FROM_ARGS + OTHER_ARGS)) # all valid args
# =================================================================================
# ATTRIBUTES
# private as this is a 'module options' vs a task property
_allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
_public = FieldAttribute(isa='bool', default=False, private=True)
def __init__(self, block=None, role=None, task_include=None):
super(IncludeRole, self).__init__(block=block, role=role, task_include=task_include)
self._from_files = {}
self._parent_role = role
self._role_name = None
self._role_path = None
def get_name(self):
''' return the name of the task '''
return self.name or "%s : %s" % (self.action, self._role_name)
def get_block_list(self, play=None, variable_manager=None, loader=None):
# only need play passed in when dynamic
if play is None:
myplay = self._parent._play
else:
myplay = play
ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader, collection_list=self.collections)
ri.vars.update(self.vars)
# build role
actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=self._from_files,
from_include=True)
actual_role._metadata.allow_duplicates = self.allow_duplicates
if self.statically_loaded or self.public:
myplay.roles.append(actual_role)
# save this for later use
self._role_path = actual_role._role_path
# compile role with parent roles as dependencies to ensure they inherit
# variables
if not self._parent_role:
dep_chain = []
else:
dep_chain = list(self._parent_role._parents)
dep_chain.append(self._parent_role)
p_block = self.build_parent_block()
# collections value is not inherited; override with the value we calculated during role setup
p_block.collections = actual_role.collections
blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)
for b in blocks:
b._parent = p_block
# HACK: parent inheritance doesn't seem to have a way to handle this intermediate override until squashed/finalized
b.collections = actual_role.collections
# updated available handlers in play
handlers = actual_role.get_handler_blocks(play=myplay)
for h in handlers:
h._parent = p_block
myplay.handlers = myplay.handlers + handlers
return blocks, handlers
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)
# Validate options
my_arg_names = frozenset(ir.args.keys())
# name is needed, or use role as alias
ir._role_name = ir.args.get('name', ir.args.get('role'))
if ir._role_name is None:
raise AnsibleParserError("'name' is a required field for %s." % ir.action, obj=data)
if 'public' in ir.args and ir.action != 'include_role':
raise AnsibleParserError('Invalid options for %s: public' % ir.action, obj=data)
# validate bad args, otherwise we silently ignore
bad_opts = my_arg_names.difference(IncludeRole.VALID_ARGS)
if bad_opts:
raise AnsibleParserError('Invalid options for %s: %s' % (ir.action, ','.join(list(bad_opts))), obj=data)
# build options for role includes
for key in my_arg_names.intersection(IncludeRole.FROM_ARGS):
from_key = key.replace('_from', '')
ir._from_files[from_key] = basename(ir.args.get(key))
apply_attrs = ir.args.get('apply', {})
if apply_attrs and ir.action != 'include_role':
raise AnsibleParserError('Invalid options for %s: apply' % ir.action, obj=data)
elif not isinstance(apply_attrs, dict):
raise AnsibleParserError('Expected a dict for apply but got %s instead' % type(apply_attrs), obj=data)
# manual list as otherwise the options would set other task parameters we don't want.
for option in my_arg_names.intersection(IncludeRole.OTHER_ARGS):
setattr(ir, option, ir.args.get(option))
return ir
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(IncludeRole, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
new_me.statically_loaded = self.statically_loaded
new_me._from_files = self._from_files.copy()
new_me._parent_role = self._parent_role
new_me._role_name = self._role_name
new_me._role_path = self._role_path
return new_me
def get_include_params(self):
v = super(IncludeRole, self).get_include_params()
if self._parent_role:
v.update(self._parent_role.get_role_params())
v.setdefault('ansible_parent_role_names', []).insert(0, self._parent_role.get_name())
v.setdefault('ansible_parent_role_paths', []).insert(0, self._parent_role._role_path)
return v
| gpl-3.0 |
tomviner/pyfakefs | fake_filesystem_unittest.py | 2 | 9078 | # Copyright 2014 Altera Corporation. All Rights Reserved.
# Copyright 2015 John McGehee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base class for unit tests using the :py:class:`pyfakefs` module.
This class searches `sys.modules` for modules that import the `os`, `glob`,
`shutil`, and `tempfile` modules.
The `setUp()` method binds these modules to the corresponding fake
modules from `pyfakefs`. Further, the built in functions `file()` and
`open()` are bound to fake functions.
The `tearDownPyfakefs()` method returns the module bindings to their original
state.
It is expected that `setUp()` be invoked at the beginning of the derived
class' `setUp()` method, and `tearDownPyfakefs()` be invoked at the end of the
derived class' `tearDown()` method.
During the test, everything uses the fake file system and modules. This means
that even in your test, you can use familiar functions like `open()` and
`os.makedirs()` to manipulate the fake file system.
This also means existing unit tests that use the real file system can be
retrofitted to use `pyfakefs` by simply changing their base class from
`:py:class`unittest.TestCase` to
`:py:class`pyfakefs.fake_filesystem_unittest.TestCase`.
"""
import sys
import unittest
import doctest
import inspect
import fake_filesystem
import fake_filesystem_glob
import fake_filesystem_shutil
import fake_tempfile
if sys.version_info < (3,):
import __builtin__ as builtins
else:
import builtins
import mox3.stubout
def load_doctests(loader, tests, ignore, module):
'''Load the doctest tests for the specified module into unittest.'''
_patcher = Patcher()
globs = _patcher.replaceGlobs(vars(module))
tests.addTests(doctest.DocTestSuite(module,
globs=globs,
setUp=_patcher.setUp,
tearDown=_patcher.tearDown))
return tests
class TestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
self._stubber = Patcher()
@property
def fs(self):
return self._stubber.fs
@property
def patches(self):
return self._stubber.patches
def setUpPyfakefs(self):
'''Bind the file-related modules to the :py:class:`pyfakefs` fake file
system instead of the real file system. Also bind the fake `file()` and
`open()` functions.
Invoke this at the beginning of the `setUp()` method in your unit test
class.
'''
self._stubber.setUp()
self.addCleanup(self._stubber.tearDown)
def tearDownPyfakefs(self):
''':meth:`pyfakefs.fake_filesystem_unittest.setUpPyfakefs` registers the
tear down procedure using :py:meth:`unittest.TestCase.addCleanup`. Thus this
method is deprecated, and remains just for backward compatibility.
'''
pass
class Patcher(object):
'''
Instantiate a stub creator to bind and un-bind the file-related modules to
the :py:mod:`pyfakefs` fake modules.
'''
SKIPMODULES = set([None, fake_filesystem, fake_filesystem_glob,
fake_filesystem_shutil, fake_tempfile, sys])
'''Stub nothing that is imported within these modules.
`sys` is included to prevent `sys.path` from being stubbed with the fake
`os.path`.
'''
assert None in SKIPMODULES, "sys.modules contains 'None' values; must skip them."
# To add py.test support per issue https://github.com/jmcgeheeiv/pyfakefs/issues/43,
# it appears that adding 'py', 'pytest', '_pytest' to SKIPNAMES will help
SKIPNAMES = set(['os', 'glob', 'path', 'shutil', 'tempfile'])
def __init__(self):
# Attributes set by _findModules()
self._osModules = None
self._globModules = None
self._pathModules = None
self._shutilModules = None
self._tempfileModules = None
self._findModules()
assert None not in vars(self).values(), \
"_findModules() missed the initialization of an instance variable"
# Attributes set by _refresh()
self._stubs = None
self.fs = None
self.fake_os = None
self.fake_glob = None
self.fake_path = None
self.fake_shutil = None
self.fake_tempfile_ = None
self.fake_open = None
# _isStale is set by tearDown(), reset by _refresh()
self._isStale = True
self._refresh()
assert None not in vars(self).values(), \
"_refresh() missed the initialization of an instance variable"
assert self._isStale == False, "_refresh() did not reset _isStale"
def _findModules(self):
'''Find and cache all modules that import file system modules.
Later, `setUp()` will stub these with the fake file system
modules.
'''
self._osModules = set()
self._globModules = set()
self._pathModules = set()
self._shutilModules = set()
self._tempfileModules = set()
for name, module in set(sys.modules.items()):
if (module in self.SKIPMODULES or
(not inspect.ismodule(module)) or
name.split('.')[0] in self.SKIPNAMES):
continue
if 'os' in module.__dict__:
self._osModules.add(module)
if 'glob' in module.__dict__:
self._globModules.add(module)
if 'path' in module.__dict__:
self._pathModules.add(module)
if 'shutil' in module.__dict__:
self._shutilModules.add(module)
if 'tempfile' in module.__dict__:
self._tempfileModules.add(module)
def _refresh(self):
'''Renew the fake file system and set the _isStale flag to `False`.'''
if self._stubs is not None:
self._stubs.SmartUnsetAll()
self._stubs = mox3.stubout.StubOutForTesting()
self.fs = fake_filesystem.FakeFilesystem()
self.fake_os = fake_filesystem.FakeOsModule(self.fs)
self.fake_glob = fake_filesystem_glob.FakeGlobModule(self.fs)
self.fake_path = self.fake_os.path
self.fake_shutil = fake_filesystem_shutil.FakeShutilModule(self.fs)
self.fake_tempfile_ = fake_tempfile.FakeTempfileModule(self.fs)
self.fake_open = fake_filesystem.FakeFileOpen(self.fs)
self._isStale = False
def setUp(self, doctester=None):
'''Bind the file-related modules to the :py:mod:`pyfakefs` fake
modules real ones. Also bind the fake `file()` and `open()` functions.
'''
if self._isStale:
self._refresh()
if doctester is not None:
doctester.globs = self.replaceGlobs(doctester.globs)
if sys.version_info < (3,):
# file() was eliminated in Python3
self._stubs.SmartSet(builtins, 'file', self.fake_open)
self._stubs.SmartSet(builtins, 'open', self.fake_open)
for module in self._osModules:
self._stubs.SmartSet(module, 'os', self.fake_os)
for module in self._globModules:
self._stubs.SmartSet(module, 'glob', self.fake_glob)
for module in self._pathModules:
self._stubs.SmartSet(module, 'path', self.fake_path)
for module in self._shutilModules:
self._stubs.SmartSet(module, 'shutil', self.fake_shutil)
for module in self._tempfileModules:
self._stubs.SmartSet(module, 'tempfile', self.fake_tempfile_)
def replaceGlobs(self, globs_):
globs = globs_.copy()
if self._isStale:
self._refresh()
if 'os' in globs:
globs['os'] = fake_filesystem.FakeOsModule(self.fs)
if 'glob' in globs:
globs['glob'] = fake_filesystem_glob.FakeGlobModule(self.fs)
if 'path' in globs:
fake_os = globs['os'] if 'os' in globs \
else fake_filesystem.FakeOsModule(self.fs)
globs['path'] = fake_os.path
if 'shutil' in globs:
globs['shutil'] = fake_filesystem_shutil.FakeShutilModule(self.fs)
if 'tempfile' in globs:
globs['tempfile'] = fake_tempfile.FakeTempfileModule(self.fs)
return globs
def tearDown(self, doctester=None):
'''Clear the fake filesystem bindings created by `setUp()`.'''
self._isStale = True
self._stubs.SmartUnsetAll()
| apache-2.0 |
hamx0r/ibrest | app/ib/opt/message.py | 8 | 4798 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# Defines message types for the Receiver class.
#
# This module inspects the EWrapper class to build a set of Message
# types. In creating the types, it also builds a registry of them
# that the Receiver class then uses to determine message types.
##
import sys
from ast import NodeVisitor, parse
from inspect import getsourcefile
from re import match
from ib.ext.AnyWrapper import AnyWrapper
from ib.ext.EWrapper import EWrapper
from ib.ext.EClientSocket import EClientSocket
from ib.lib import toTypeName
class SignatureAccumulator(NodeVisitor):
"""
"""
def __init__(self, classes):
NodeVisitor.__init__(self)
self.signatures = []
for filename in (getsourcefile(cls) for cls in classes):
self.visit(parse(open(filename).read()))
def visit_FunctionDef(self, node):
if sys.version_info[0] < 3:
args = [arg.id for arg in node.args.args]
else:
args = [arg.arg for arg in node.args.args]
self.signatures.append((node.name, args[1:]))
class EClientSocketAccumulator(SignatureAccumulator):
def getSignatures(self):
for name, args in self.signatures:
if match('(?i)req|cancel|place', name):
yield (name, args)
class EWrapperAccumulator(SignatureAccumulator):
def getSignatures(self):
for name, args in self.signatures:
if match('(?!((?i)error.*))', name):
yield (name, args)
##
# Dictionary that associates wrapper method names to the message class
# that should be instantiated for delivery during that method call.
registry = {}
def messageTypeNames():
""" Builds set of message type names.
@return set of all message type names as strings
"""
def typeNames():
for types in registry.values():
for typ in types:
yield typ.typeName
return set(typeNames())
class Message(object):
""" Base class for Message types.
"""
__slots__ = ()
def __init__(self, **kwds):
""" Constructor.
@param **kwds keywords and values for instance
"""
for name in self.__slots__:
setattr(self, name, kwds.pop(name, None))
assert not kwds
def __len__(self):
""" x.__len__() <==> len(x)
"""
return len(self.keys())
def __str__(self):
""" x.__str__() <==> str(x)
"""
name = self.typeName
items = str.join(', ', ['%s=%s' % item for item in self.items()])
return '<%s%s>' % (name, (' ' + items) if items else '')
def items(self):
""" List of message (slot, slot value) pairs, as 2-tuples.
@return list of 2-tuples, each slot (name, value)
"""
return zip(self.keys(), self.values())
def values(self):
""" List of instance slot values.
@return list of each slot value
"""
return [getattr(self, key, None) for key in self.keys()]
def keys(self):
""" List of instance slots.
@return list of each slot.
"""
return self.__slots__
class Error(Message):
""" Specialized message type.
The error family of method calls can't be built programmatically,
so we define one here.
"""
__slots__ = ('id', 'errorCode', 'errorMsg')
def buildMessageRegistry(seq, suffixes=[''], bases=(Message, )):
""" Construct message types and add to given mapping.
@param seq pairs of method (name, arguments)
@param bases sequence of base classes for message types
@return None
"""
for name, args in sorted(seq):
for suffix in suffixes:
typename = toTypeName(name) + suffix
typens = {'__slots__':args, '__assoc__':name, 'typeName':name}
msgtype = type(typename, bases, typens)
if name in registry:
registry[name] = registry[name] + (msgtype, )
else:
registry[name] = (msgtype, )
eWrapperAccum = EWrapperAccumulator((AnyWrapper, EWrapper))
eClientAccum = EClientSocketAccumulator((EClientSocket, ))
wrapperMethods = list(eWrapperAccum.getSignatures())
clientSocketMethods = list(eClientAccum.getSignatures())
errorMethods = [('error', Error.__slots__), ]
buildMessageRegistry(wrapperMethods)
buildMessageRegistry(clientSocketMethods, suffixes=('Pre', 'Post'))
buildMessageRegistry(errorMethods)
def initModule():
target = globals()
for messageTypes in registry.values():
for messageType in messageTypes:
target[messageType.typeName] = messageType
try:
initModule()
except (NameError, ):
pass
else:
del(initModule)
del(AnyWrapper)
del(EWrapper)
del(EClientSocket)
del(eWrapperAccum)
del(eClientAccum)
| mit |
UCL-INGI/INGInious | inginious/agent/docker_agent/_timeout_watcher.py | 2 | 4597 | # -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
"""
Abstractions that provide zeromq-agents that talk with cgroups.
"""
import asyncio
import logging
from inginious.common.asyncio_utils import AsyncIteratorWrapper
class TimeoutWatcher(object):
""" Looks for container timeouts """
def __init__(self, docker_interface):
""" docker_interface is an ASYNC interface to docker """
self._logger = logging.getLogger("inginious.agent.docker")
self._loop = asyncio.get_event_loop()
self._container_had_error = set()
self._watching = set()
self._docker_interface = docker_interface
self._running_asyncio_tasks = set()
async def clean(self):
""" Close all the running tasks watching for a container timeout. All references to
containers are removed: any attempt to was_killed after a call to clean() will return None.
"""
for x in self._running_asyncio_tasks:
x.cancel()
self._container_had_error = set()
self._watching = set()
self._running_asyncio_tasks = set()
async def was_killed(self, container_id):
"""
This method has to be called *once, and only once* for each container registered in `register_container`.
:param container_id: the container id to check
:return: a string containing "timeout" if the container was killed. None if it was not (std format for container errors)
"""
if container_id in self._watching:
self._watching.remove(container_id)
if container_id in self._container_had_error:
self._container_had_error.remove(container_id)
return "timeout"
return None
async def register_container(self, container_id, timeout, hard_timeout):
self._watching.add(container_id)
task = self._loop.create_task(self._handle_container_timeout(container_id, timeout))
self._running_asyncio_tasks.add(task)
task.add_done_callback(self._remove_safe_task)
self._loop.call_later(hard_timeout, asyncio.ensure_future, self._handle_container_hard_timeout(container_id, hard_timeout))
async def _handle_container_timeout(self, container_id, timeout):
"""
Check timeout with docker stats
:param container_id:
:param timeout: in seconds (cpu time)
"""
try:
docker_stats = await self._docker_interface.get_stats(container_id)
source = AsyncIteratorWrapper(docker_stats)
nano_timeout = timeout * (10 ** 9)
async for upd in source:
if upd is None:
await self._kill_it_with_fire(container_id)
self._logger.debug("%i", upd['cpu_stats']['cpu_usage']['total_usage'])
if upd['cpu_stats']['cpu_usage']['total_usage'] > nano_timeout:
self._logger.info("Killing container %s as it used %i CPU seconds (max was %i)",
container_id, int(upd['cpu_stats']['cpu_usage']['total_usage'] / (10 ** 9)), timeout)
await self._kill_it_with_fire(container_id)
return
except asyncio.CancelledError:
pass
except:
self._logger.exception("Exception in _handle_container_timeout")
async def _handle_container_hard_timeout(self, container_id, hard_timeout):
"""
Kills a container (should be called with loop.call_later(hard_timeout, ...)) and displays a message on the log
:param container_id:
:param hard_timeout:
:return:
"""
if container_id in self._watching:
self._logger.info("Killing container %s as it used its %i wall time seconds",
container_id, hard_timeout)
await self._kill_it_with_fire(container_id)
async def _kill_it_with_fire(self, container_id):
"""
Kill a container, with fire.
"""
if container_id in self._watching:
self._watching.remove(container_id)
self._container_had_error.add(container_id)
try:
await self._docker_interface.kill_container(container_id)
except:
pass #is ok
def _remove_safe_task(self, task):
""" Remove a task from _running_asyncio_tasks """
try:
self._running_asyncio_tasks.remove(task)
except:
pass | agpl-3.0 |
maftieu/CouchPotatoServer | libs/tornado/tcpserver.py | 18 | 10244 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded TCP server."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import os
import socket
import ssl
from tornado.log import app_log
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream, SSLIOStream
from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket
from tornado import process
class TCPServer(object):
r"""A non-blocking, single-threaded TCP server.
To use `TCPServer`, define a subclass which overrides the `handle_stream`
method.
To make this server serve SSL traffic, send the ssl_options dictionary
argument with the arguments required for the `ssl.wrap_socket` method,
including "certfile" and "keyfile"::
TCPServer(ssl_options={
"certfile": os.path.join(data_dir, "mydomain.crt"),
"keyfile": os.path.join(data_dir, "mydomain.key"),
})
`TCPServer` initialization follows one of three patterns:
1. `listen`: simple single-process::
server = TCPServer()
server.listen(8888)
IOLoop.instance().start()
2. `bind`/`start`: simple multi-process::
server = TCPServer()
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.instance().start()
When using this interface, an `.IOLoop` must *not* be passed
to the `TCPServer` constructor. `start` will always start
the server on the default singleton `.IOLoop`.
3. `add_sockets`: advanced multi-process::
sockets = bind_sockets(8888)
tornado.process.fork_processes(0)
server = TCPServer()
server.add_sockets(sockets)
IOLoop.instance().start()
The `add_sockets` interface is more complicated, but it can be
used with `tornado.process.fork_processes` to give you more
flexibility in when the fork happens. `add_sockets` can
also be used in single-process servers if you want to create
your listening sockets in some way other than
`~tornado.netutil.bind_sockets`.
.. versionadded:: 3.1
The ``max_buffer_size`` argument.
"""
def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None):
self.io_loop = io_loop
self.ssl_options = ssl_options
self._sockets = {} # fd -> socket object
self._pending_sockets = []
self._started = False
self.max_buffer_size = max_buffer_size
# Verify the SSL options. Otherwise we don't get errors until clients
# connect. This doesn't verify that the keys are legitimate, but
# the SSL module doesn't do that until there is a connected socket
# which seems like too much work
if self.ssl_options is not None and isinstance(self.ssl_options, dict):
# Only certfile is required: it can contain both keys
if 'certfile' not in self.ssl_options:
raise KeyError('missing key "certfile" in ssl_options')
if not os.path.exists(self.ssl_options['certfile']):
raise ValueError('certfile "%s" does not exist' %
self.ssl_options['certfile'])
if ('keyfile' in self.ssl_options and
not os.path.exists(self.ssl_options['keyfile'])):
raise ValueError('keyfile "%s" does not exist' %
self.ssl_options['keyfile'])
def listen(self, port, address=""):
"""Starts accepting connections on the given port.
This method may be called more than once to listen on multiple ports.
`listen` takes effect immediately; it is not necessary to call
`TCPServer.start` afterwards. It is, however, necessary to start
the `.IOLoop`.
"""
sockets = bind_sockets(port, address=address)
self.add_sockets(sockets)
def add_sockets(self, sockets):
"""Makes this server start accepting connections on the given sockets.
The ``sockets`` parameter is a list of socket objects such as
those returned by `~tornado.netutil.bind_sockets`.
`add_sockets` is typically used in combination with that
method and `tornado.process.fork_processes` to provide greater
control over the initialization of a multi-process server.
"""
if self.io_loop is None:
self.io_loop = IOLoop.current()
for sock in sockets:
self._sockets[sock.fileno()] = sock
add_accept_handler(sock, self._handle_connection,
io_loop=self.io_loop)
def add_socket(self, socket):
"""Singular version of `add_sockets`. Takes a single socket object."""
self.add_sockets([socket])
def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128):
"""Binds this server to the given port on the given address.
To start the server, call `start`. If you want to run this server
in a single process, you can call `listen` as a shortcut to the
sequence of `bind` and `start` calls.
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen <socket.socket.listen>`.
This method may be called multiple times prior to `start` to listen
on multiple ports or interfaces.
"""
sockets = bind_sockets(port, address=address, family=family,
backlog=backlog)
if self._started:
self.add_sockets(sockets)
else:
self._pending_sockets.extend(sockets)
def start(self, num_processes=1):
"""Starts this server in the `.IOLoop`.
By default, we run the server in this process and do not fork any
additional child process.
If num_processes is ``None`` or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If num_processes is given and > 1, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``TCPServer.start(n)``.
"""
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes)
sockets = self._pending_sockets
self._pending_sockets = []
self.add_sockets(sockets)
def stop(self):
"""Stops listening for new connections.
Requests currently in progress may still continue after the
server is stopped.
"""
for fd, sock in self._sockets.items():
self.io_loop.remove_handler(fd)
sock.close()
def handle_stream(self, stream, address):
"""Override to handle a new `.IOStream` from an incoming connection."""
raise NotImplementedError()
def _handle_connection(self, connection, address):
if self.ssl_options is not None:
assert ssl, "Python 2.6+ and OpenSSL required for SSL"
try:
connection = ssl_wrap_socket(connection,
self.ssl_options,
server_side=True,
do_handshake_on_connect=False)
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_EOF:
return connection.close()
else:
raise
except socket.error as err:
# If the connection is closed immediately after it is created
# (as in a port scan), we can get one of several errors.
# wrap_socket makes an internal call to getpeername,
# which may return either EINVAL (Mac OS X) or ENOTCONN
# (Linux). If it returns ENOTCONN, this error is
# silently swallowed by the ssl module, so we need to
# catch another error later on (AttributeError in
# SSLIOStream._do_ssl_handshake).
# To test this behavior, try nmap with the -sT flag.
# https://github.com/facebook/tornado/pull/750
if err.args[0] in (errno.ECONNABORTED, errno.EINVAL):
return connection.close()
else:
raise
try:
if self.ssl_options is not None:
stream = SSLIOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size)
else:
stream = IOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size)
self.handle_stream(stream, address)
except Exception:
app_log.error("Error in connection callback", exc_info=True)
| gpl-3.0 |
mk01/xbmc | lib/gtest/test/gtest_catch_exceptions_test.py | 403 | 9422 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG]).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
gitcoinco/web | app/marketing/management/commands/no_applicants_email.py | 1 | 1922 | '''
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils import timezone
from dashboard.models import Bounty
from marketing.mails import no_applicant_reminder
class Command(BaseCommand):
help = 'sends reminder emails to funders whose bounties have 0 applications'
def handle(self, *args, **options):
if settings.DEBUG:
print("not active in non prod environments")
return
start_time_3_days = timezone.now() - timezone.timedelta(hours=24 * 3)
end_time_3_days = timezone.now() - timezone.timedelta(hours=24 * 4)
start_time_7_days = timezone.now() - timezone.timedelta(hours=24 * 7)
end_time_7_days = timezone.now() - timezone.timedelta(hours=24 * 8)
bounties = Bounty.objects.current().filter(
(Q(created_on__range=[end_time_3_days, start_time_3_days]) | Q(created_on__range=[end_time_7_days, start_time_7_days])),
idx_status='open',
network='mainnet'
)
for bounty in [b for b in bounties if b.no_of_applicants == 0]:
no_applicant_reminder(bounty.bounty_owner_email, bounty)
| agpl-3.0 |
kwikteam/klusta | klusta/kwik/tests/test_model.py | 1 | 12258 | # -*- coding: utf-8 -*-
"""Tests of Kwik file opening routines."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal as ae
from pytest import raises
from ...utils import captured_logging
from ..creator import create_kwik
from ..mea import MEA, staggered_positions
from ..mock import create_mock_kwik
from ..model import (KwikModel,
_list_channel_groups,
_list_channels,
_list_recordings,
_list_clusterings,
_concatenate_spikes,
)
#------------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------------
_N_CLUSTERS = 10
_N_SPIKES = 100
_N_CHANNELS = 28
_N_FETS = 2
_N_SAMPLES_TRACES = 10000
def test_kwik_utility(tempdir):
channels = list(range(_N_CHANNELS))
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
model = KwikModel(filename)
model._kwik.open()
assert _list_channel_groups(model._kwik.h5py_file) == [1]
assert _list_recordings(model._kwik.h5py_file) == [0, 1]
assert _list_clusterings(model._kwik.h5py_file, 1) == ['main',
'original',
]
assert _list_channels(model._kwik.h5py_file, 1) == channels
def test_concatenate_spikes():
spikes = [2, 3, 5, 0, 11, 1]
recs = [0, 0, 0, 1, 1, 2]
offsets = [0, 7, 100]
concat = _concatenate_spikes(spikes, recs, offsets)
ae(concat, [2, 3, 5, 7, 18, 101])
def test_kwik_empty(tempdir):
channels = [0, 3, 1]
graph = [[0, 3], [1, 0]]
probe = {'channel_groups': {
0: {'channels': channels,
'graph': graph,
'geometry': {0: (10, 10)},
}}}
sample_rate = 20000
kwik_path = op.join(tempdir, 'test.kwik')
create_kwik(kwik_path=kwik_path, probe=probe, sample_rate=sample_rate)
model = KwikModel(kwik_path)
ae(model.channels, sorted(channels))
ae(model.channel_order, channels)
assert model.sample_rate == sample_rate
assert model.n_channels == 3
assert model.spike_samples is None
assert model.n_spikes == 0
assert model.n_clusters == 0
model.describe()
def test_kwik_open_full(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
with raises(ValueError):
KwikModel()
# NOTE: n_channels - 2 because we use a special channel order.
nc = _N_CHANNELS - 2
# Test implicit open() method.
kwik = KwikModel(filename)
kwik.describe()
kwik.metadata
ae(kwik.channels, np.arange(_N_CHANNELS))
assert kwik.n_channels == _N_CHANNELS
assert kwik.n_spikes == _N_SPIKES
ae(kwik.channel_order, np.arange(1, _N_CHANNELS - 1)[::-1])
assert kwik.spike_samples.shape == (_N_SPIKES,)
assert kwik.spike_samples.dtype == np.int64
# Make sure the spike samples are increasing, even with multiple
# recordings.
# WARNING: need to cast to int64, otherwise negative values will
# overflow and be positive, making the test pass while the
# spike samples are *not* increasing!
assert np.all(np.diff(kwik.spike_samples.astype(np.int64)) >= 0)
assert kwik.spike_times.shape == (_N_SPIKES,)
assert kwik.spike_times.dtype == np.float64
assert kwik.spike_recordings.shape == (_N_SPIKES,)
assert kwik.spike_recordings.dtype == np.uint16
assert kwik.spike_clusters.shape == (_N_SPIKES,)
assert kwik.spike_clusters.min() in (0, 1, 2)
assert kwik.spike_clusters.max() in(_N_CLUSTERS - 2, _N_CLUSTERS - 1)
assert kwik.all_features.shape == (_N_SPIKES, nc, _N_FETS)
kwik.all_features[0, ...]
assert kwik.all_masks.shape == (_N_SPIKES, nc)
assert kwik.all_traces.shape == (_N_SAMPLES_TRACES, _N_CHANNELS)
assert kwik.all_waveforms[0].shape == (1, 40, nc)
assert kwik.all_waveforms[-1].shape == (1, 40, nc)
assert kwik.all_waveforms[-10].shape == (1, 40, nc)
assert kwik.all_waveforms[10].shape == (1, 40, nc)
assert kwik.all_waveforms[[10, 20]].shape == (2, 40, nc)
with raises(IndexError):
kwik.all_waveforms[_N_SPIKES + 10]
with raises(ValueError):
kwik.clustering = 'foo'
with raises(ValueError):
kwik.channel_group = 42
assert kwik.n_recordings == 2
# Test probe.
assert isinstance(kwik.probe, MEA)
assert kwik.probe.positions.shape == (nc, 2)
ae(kwik.probe.positions, staggered_positions(_N_CHANNELS)[1:-1][::-1])
kwik.close()
def test_kwik_open_no_kwx(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES,
with_kwx=False)
# Test implicit open() method.
kwik = KwikModel(filename)
kwik.close()
def test_kwik_open_no_kwd(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES,
with_kwd=False)
# Test implicit open() method.
kwik = KwikModel(filename)
with captured_logging() as buf:
kwik.all_waveforms[:]
# Enusure that there is no error message.
assert not buf.getvalue().strip()
kwik.close()
def test_kwik_save(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
kwik = KwikModel(filename)
cluster_groups = kwik.cluster_groups
sc_0 = kwik.spike_clusters.copy()
sc_1 = sc_0.copy()
new_cluster = _N_CLUSTERS + 10
sc_1[_N_SPIKES // 2:] = new_cluster
ae(kwik.spike_clusters, sc_0)
kwik.add_cluster_group(4, 'new')
cluster_groups[new_cluster] = 'new'
assert kwik.cluster_metadata[new_cluster] == 'unsorted'
kwik.save(sc_1, cluster_groups, {'test': (1, 2.)})
ae(kwik.spike_clusters, sc_1)
assert kwik.cluster_metadata[new_cluster] == 'new'
kwik.close()
kwik = KwikModel(filename)
ae(kwik.spike_clusters, sc_1)
assert kwik.cluster_metadata[new_cluster] == 'new'
ae(kwik.clustering_metadata['test'], [1, 2])
def test_kwik_clusterings(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
kwik = KwikModel(filename)
assert kwik.clusterings == ['main', 'original']
# The default clustering is 'main'.
assert kwik.n_spikes == _N_SPIKES
assert kwik.n_clusters == _N_CLUSTERS
ae(kwik.cluster_ids, np.arange(_N_CLUSTERS))
# Change clustering.
kwik.clustering = 'original'
n_clu = kwik.n_clusters
assert kwik.n_spikes == _N_SPIKES
# Some clusters may be empty with a small number of spikes like here
assert _N_CLUSTERS * 2 - 4 <= n_clu <= _N_CLUSTERS * 2
assert len(kwik.cluster_ids) == n_clu
def test_kwik_manage_clusterings(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
kwik = KwikModel(filename)
spike_clusters = kwik.spike_clusters
assert kwik.clusterings == ['main', 'original']
# Test renaming.
kwik.clustering = 'original'
with raises(ValueError):
kwik.rename_clustering('a', 'b')
with raises(ValueError):
kwik.rename_clustering('original', 'b')
with raises(ValueError):
kwik.rename_clustering('main', 'original')
kwik.clustering = 'main'
kwik.rename_clustering('original', 'original_2')
assert kwik.clusterings == ['main', 'original_2']
with raises(ValueError):
kwik.clustering = 'original'
kwik.clustering = 'original_2'
n_clu = kwik.n_clusters
assert len(kwik.cluster_ids) == n_clu
# Test copy.
with raises(ValueError):
kwik.copy_clustering('a', 'b')
with raises(ValueError):
kwik.copy_clustering('original', 'b')
with raises(ValueError):
kwik.copy_clustering('main', 'original_2')
# You cannot move the current clustering, but you can copy it.
with raises(ValueError):
kwik.rename_clustering('original_2', 'original_2_copy')
kwik.copy_clustering('original_2', 'original_2_copy')
kwik.delete_clustering('original_2_copy')
kwik.clustering = 'main'
kwik.copy_clustering('original_2', 'original')
assert kwik.clusterings == ['main', 'original', 'original_2']
kwik.clustering = 'original'
ci = kwik.cluster_ids
kwik.clustering = 'original_2'
ae(kwik.cluster_ids, ci)
# Test delete.
with raises(ValueError):
kwik.delete_clustering('a')
kwik.delete_clustering('original')
kwik.clustering = 'main'
kwik.delete_clustering('original_2')
assert kwik.clusterings == ['main', 'original']
# Test add.
sc = np.ones(_N_SPIKES, dtype=np.int32)
sc[1] = sc[-2] = 3
kwik.add_clustering('new', sc)
ae(kwik.spike_clusters, spike_clusters)
kwik.clustering = 'new'
ae(kwik.spike_clusters, sc)
assert kwik.n_clusters == 2
ae(kwik.cluster_ids, [1, 3])
def test_kwik_manage_cluster_groups(tempdir):
# Create the test HDF5 file in the temporary directory.
filename = create_mock_kwik(tempdir,
n_clusters=_N_CLUSTERS,
n_spikes=_N_SPIKES,
n_channels=_N_CHANNELS,
n_features_per_channel=_N_FETS,
n_samples_traces=_N_SAMPLES_TRACES)
kwik = KwikModel(filename)
with raises(ValueError):
kwik.delete_cluster_group(2)
with raises(ValueError):
kwik.add_cluster_group(1, 'new')
with raises(ValueError):
kwik.rename_cluster_group(1, 'renamed')
kwik.add_cluster_group(4, 'new')
kwik.rename_cluster_group(4, 'renamed')
kwik.delete_cluster_group(4)
with raises(ValueError):
kwik.delete_cluster_group(4)
| bsd-3-clause |
MikeLing/treeherder | treeherder/log_parser/parsers.py | 1 | 19774 | import json
import logging
import re
from HTMLParser import HTMLParser
import jsonschema
from django.conf import settings
from treeherder.etl.buildbot import RESULT_DICT
logger = logging.getLogger(__name__)
class ParserBase(object):
"""
Base class for all parsers.
"""
def __init__(self, name):
"""Setup the artifact to hold the extracted data."""
self.name = name
self.clear()
def clear(self):
"""Reset this parser's values for another run."""
self.artifact = []
self.complete = False
def parse_line(self, line, lineno):
"""Parse a single line of the log"""
raise NotImplementedError # pragma no cover
def finish_parse(self, last_lineno_seen):
"""Clean-up/summary tasks run at the end of parsing."""
pass
def get_artifact(self):
"""By default, just return the artifact as-is."""
return self.artifact
class StepParser(ParserBase):
"""
Parse out individual job steps within a log.
Step format:
"steps": [
{
"errors": [],
"name": "set props: master", # the name of the process on start line
"started": "2013-06-05 12:39:57.838527",
"started_linenumber": 8,
"finished_linenumber": 10,
"finished": "2013-06-05 12:39:57.839226",
"result": 0
},
...
]
"""
# Matches the half-dozen 'key: value' header lines printed at the start of each
# Buildbot job log. The list of keys are taken from:
# https://hg.mozilla.org/build/buildbotcustom/file/644c3860300a/bin/log_uploader.py#l126
RE_HEADER_LINE = re.compile(r'(?:builder|slave|starttime|results|buildid|builduid|revision): .*')
# Step marker lines, eg:
# ========= Started foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.353866) =========
# ========= Finished foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.354301) =========
RE_STEP_MARKER = re.compile(r'={9} (?P<marker_type>Started|Finished) (?P<name>.*?) '
r'\(results: (?P<result_code>\d+), elapsed: .*?\) '
r'\(at (?P<timestamp>.*?)\)')
STATES = {
# The initial state until we record the first step.
"awaiting_first_step": 0,
# We've started a step, but not yet seen the end of it.
"step_in_progress": 1,
# We've seen the end of the previous step.
"step_finished": 2,
}
# date format in a step started/finished header
DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
def __init__(self):
"""Setup the artifact to hold the header lines."""
super(StepParser, self).__init__("step_data")
self.stepnum = -1
self.artifact = {
"steps": [],
"errors_truncated": False
}
self.sub_parser = ErrorParser()
self.state = self.STATES['awaiting_first_step']
def parse_line(self, line, lineno):
"""Parse a single line of the log.
We have to handle both buildbot style logs as well as Taskcluster logs. The latter
attempt to emulate the buildbot logs, but don't accurately do so, partly due
to the way logs are generated in Taskcluster (ie: on the workers themselves).
Buildbot logs:
builder: ...
slave: ...
starttime: ...
results: ...
buildid: ...
builduid: ...
revision: ...
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
Taskcluster logs (a worst-case example):
<log output outside a step>
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
<log output outside a step>
======= <step START marker> =======
<step log output with no following finish marker>
As can be seen above, Taskcluster logs can have (a) log output that falls between
step markers, and (b) content at the end of the log, that is not followed by a
final finish step marker. We handle this by creating generic placeholder steps to
hold the log output that is not enclosed by step markers, and then by cleaning up
the final step in finish_parse() once all lines have been parsed.
"""
if not line.strip():
# Skip whitespace-only lines, since they will never contain an error line,
# so are not of interest. This also avoids creating spurious unnamed steps
# (which occurs when we find content outside of step markers) for the
# newlines that separate the steps in Buildbot logs.
return
if self.state == self.STATES['awaiting_first_step'] and self.RE_HEADER_LINE.match(line):
# The "key: value" job metadata header lines that appear at the top of
# Buildbot logs would result in the creation of an unnamed step at the
# start of the job, unless we skip them. (Which is not desired, since
# the lines are metadata and not test/build output.)
return
step_marker_match = self.RE_STEP_MARKER.match(line)
if not step_marker_match:
# This is a normal log line, rather than a step marker. (The common case.)
if self.state != self.STATES['step_in_progress']:
# We don't have an in-progress step, so need to start one, even though this
# isn't a "step started" marker line. We therefore create a new generic step,
# since we have no way of finding out the step metadata. This case occurs
# for the Taskcluster logs where content can fall between step markers.
self.start_step(lineno)
# Parse the line for errors, which if found, will be associated with the current step.
self.sub_parser.parse_line(line, lineno)
return
# This is either a "step started" or "step finished" marker line, eg:
# ========= Started foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.353866) =========
# ========= Finished foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.354301) =========
if step_marker_match.group('marker_type') == 'Started':
if self.state == self.STATES['step_in_progress']:
# We're partway through a step (ie: haven't seen a "step finished" marker line),
# but have now reached the "step started" marker for the next step. Before we
# can start the new step, we have to clean up the previous one - albeit using
# generic step metadata, since there was no "step finished" marker. This occurs
# in Taskcluster's logs when content falls between the step marker lines.
self.end_step(lineno)
# Start a new step using the extracted step metadata.
self.start_step(lineno,
name=step_marker_match.group('name'),
timestamp=step_marker_match.group('timestamp'))
return
# This is a "step finished" marker line.
if self.state != self.STATES['step_in_progress']:
# We're not in the middle of a step, so can't finish one. Just ignore the marker line.
return
# Close out the current step using the extracted step metadata.
self.end_step(lineno,
timestamp=step_marker_match.group('timestamp'),
result_code=int(step_marker_match.group('result_code')))
def start_step(self, lineno, name="Unnamed step", timestamp=None):
"""Create a new step and update the state to reflect we're now in the middle of a step."""
self.state = self.STATES['step_in_progress']
self.stepnum += 1
self.steps.append({
"name": name,
"started": timestamp,
"started_linenumber": lineno,
"errors": [],
})
def end_step(self, lineno, timestamp=None, result_code=None):
"""Fill in the current step's summary and update the state to show the current step has ended."""
self.state = self.STATES['step_finished']
step_errors = self.sub_parser.get_artifact()
step_error_count = len(step_errors)
if step_error_count > settings.PARSER_MAX_STEP_ERROR_LINES:
step_errors = step_errors[:settings.PARSER_MAX_STEP_ERROR_LINES]
self.artifact["errors_truncated"] = True
self.current_step.update({
"finished": timestamp,
"finished_linenumber": lineno,
# Whilst the result code is present on both the start and end buildbot-style step
# markers, for Taskcluster logs the start marker line lies about the result, since
# the log output is unbuffered, so Taskcluster does not know the real result at
# that point. As such, we only set the result when ending a step.
"result": RESULT_DICT.get(result_code, "unknown"),
"errors": step_errors
})
# reset the sub_parser for the next step
self.sub_parser.clear()
def finish_parse(self, last_lineno_seen):
"""Clean-up/summary tasks run at the end of parsing."""
if self.state == self.STATES['step_in_progress']:
# We've reached the end of the log without seeing the final "step finish"
# marker, which would normally have triggered updating the step. As such we
# must manually close out the current step, so things like result, finish
# time are set for it. This ensures that the error summary for Taskcluster
# infra failures actually lists the error that occurs at the
# end of the log.
self.end_step(last_lineno_seen)
@property
def steps(self):
"""Return the list of steps in the artifact"""
return self.artifact["steps"]
@property
def current_step(self):
"""Return the current step in the artifact"""
return self.steps[self.stepnum]
class TinderboxPrintParser(ParserBase):
RE_TINDERBOXPRINT = re.compile(r'.*TinderboxPrint: ?(?P<line>.*)$')
RE_UPLOADED_TO = re.compile(
r"<a href=['\"](?P<url>http(s)?://.*)['\"]>(?P<value>.+)</a>: uploaded"
)
RE_LINK_HTML = re.compile(
(r"((?P<title>[A-Za-z/\.0-9\-_ ]+): )?"
r"<a .*href=['\"](?P<url>http(s)?://.+)['\"].*>(?P<value>.+)</a>")
)
RE_LINK_TEXT = re.compile(
r"((?P<title>[A-Za-z/\.0-9\-_ ]+): )?(?P<url>http(s)?://.*)"
)
TINDERBOX_REGEXP_TUPLE = (
{
're': RE_UPLOADED_TO,
'base_dict': {
"content_type": "link",
"title": "artifact uploaded"
},
'duplicates_fields': {}
},
{
're': RE_LINK_HTML,
'base_dict': {
"content_type": "link"
},
'duplicates_fields': {}
},
{
're': RE_LINK_TEXT,
'base_dict': {
"content_type": "link"
},
'duplicates_fields': {'value': 'url'}
}
)
def __init__(self):
"""Setup the artifact to hold the job details."""
super(TinderboxPrintParser, self).__init__("job_details")
def parse_line(self, line, lineno):
"""Parse a single line of the log"""
match = self.RE_TINDERBOXPRINT.match(line) if line else None
if match:
line = match.group('line')
for regexp_item in self.TINDERBOX_REGEXP_TUPLE:
match = regexp_item['re'].match(line)
if match:
artifact = match.groupdict()
# handle duplicate fields
for to_field, from_field in regexp_item['duplicates_fields'].items():
# if to_field not present or None copy form from_field
if to_field not in artifact or artifact[to_field] is None:
artifact[to_field] = artifact[from_field]
artifact.update(regexp_item['base_dict'])
self.artifact.append(artifact)
return
# default case: consider it html content
# try to detect title/value splitting on <br/>
artifact = {"content_type": "raw_html", }
if "<br/>" in line:
title, value = line.split("<br/>", 1)
artifact["title"] = title
artifact["value"] = value
# or similar long lines if they contain a url
elif "href" in line and "title" in line:
def parse_url_line(line_data):
class TpLineParser(HTMLParser):
def handle_starttag(self, tag, attrs):
d = dict(attrs)
artifact["url"] = d['href']
artifact["title"] = d['title']
def handle_data(self, data):
artifact["value"] = data
p = TpLineParser()
p.feed(line_data)
p.close()
# strip ^M returns on windows lines otherwise
# handle_data will yield no data 'value'
parse_url_line(line.replace('\r', ''))
else:
artifact["value"] = line
self.artifact.append(artifact)
class ErrorParser(ParserBase):
"""A generic error detection sub-parser"""
IN_SEARCH_TERMS = (
"TEST-UNEXPECTED-",
"fatal error",
"FATAL ERROR",
"REFTEST ERROR",
"PROCESS-CRASH",
"Assertion failure:",
"Assertion failed:",
"###!!! ABORT:",
"E/GeckoLinker",
"SUMMARY: AddressSanitizer",
"SUMMARY: LeakSanitizer",
"Automation Error:",
"command timed out:",
"wget: unable ",
"TEST-VALGRIND-ERROR",
"[ FAILED ] ",
)
RE_ERR_MATCH = re.compile((
r"^error: TEST FAILED"
r"|^g?make(?:\[\d+\])?: \*\*\*"
r"|^Remote Device Error:"
r"|^[A-Za-z.]+Error: "
r"|^[A-Za-z.]*Exception: "
r"|^remoteFailed:"
r"|^rm: cannot "
r"|^abort:"
r"|^Output exceeded \d+ bytes"
r"|^The web-page 'stop build' button was pressed"
r"|.*\.js: line \d+, col \d+, Error -"
r"|^\[taskcluster\] Error:"
r"|^\[[\w._-]+:(?:error|exception)\]"
))
RE_ERR_SEARCH = re.compile((
r" error\(\d*\):"
r"|:\d+: error:"
r"| error R?C\d*:"
r"|ERROR [45]\d\d:"
r"|mozmake\.(?:exe|EXE)(?:\[\d+\])?: \*\*\*"
))
RE_EXCLUDE_1_SEARCH = re.compile(r"TEST-(?:INFO|PASS) ")
RE_EXCLUDE_2_SEARCH = re.compile(
r"I[ /](Gecko|Robocop|TestRunner).*TEST-UNEXPECTED-"
r"|^TimeoutException: "
r"|^ImportError: No module named pygtk$"
)
RE_ERR_1_MATCH = re.compile(r"^\d+:\d+:\d+ +(?:ERROR|CRITICAL|FATAL) - ")
# Looks for a leading value inside square brackets containing a "YYYY-"
# year pattern but isn't a TaskCluster error indicator (like
# ``taskcluster:error``.
#
# This matches the following:
# [task 2016-08-18T17:50:56.955523Z]
# [2016- task]
#
# But not:
# [taskcluster:error]
# [taskcluster:something 2016-]
RE_TASKCLUSTER_NORMAL_PREFIX = re.compile(r"^\[(?!taskcluster:)[^\]]*20\d{2}-[^\]]+\]\s")
RE_MOZHARNESS_PREFIX = re.compile(r"^\d+:\d+:\d+ +(?:DEBUG|INFO|WARNING) - +")
def __init__(self):
"""A simple error detection sub-parser"""
super(ErrorParser, self).__init__("errors")
self.is_taskcluster = False
def add(self, line, lineno):
self.artifact.append({
"linenumber": lineno,
"line": line.rstrip()
})
def parse_line(self, line, lineno):
"""Check a single line for an error. Keeps track of the linenumber"""
# TaskCluster logs are a bit wonky.
#
# TaskCluster logs begin with output coming from TaskCluster itself,
# before it has transitioned control of the task to the configured
# process. These "internal" logs look like the following:
#
# [taskcluster 2016-09-09 17:41:43.544Z] Worker Group: us-west-2b
#
# If an error occurs during this "setup" phase, TaskCluster may emit
# lines beginning with ``[taskcluster:error]``.
#
# Once control has transitioned from TaskCluster to the configured
# task process, lines can be whatever the configured process emits.
# The popular ``run-task`` wrapper prefixes output to emulate
# TaskCluster's "internal" logs. e.g.
#
# [vcs 2016-09-09T17:45:02.842230Z] adding changesets
#
# This prefixing can confuse error parsing. So, we strip it.
#
# Because regular expression matching and string manipulation can be
# expensive when performed on every line, we only strip the TaskCluster
# log prefix if we know we're in a TaskCluster log.
# First line of TaskCluster logs almost certainly has this.
if line.startswith('[taskcluster '):
self.is_taskcluster = True
# For performance reasons, only do this if we have identified as
# a TC task.
if self.is_taskcluster:
line = re.sub(self.RE_TASKCLUSTER_NORMAL_PREFIX, "", line)
if self.is_error_line(line):
self.add(line, lineno)
def is_error_line(self, line):
if self.RE_EXCLUDE_1_SEARCH.search(line):
return False
if self.RE_ERR_1_MATCH.match(line):
return True
# Remove mozharness prefixes prior to matching
trimline = re.sub(self.RE_MOZHARNESS_PREFIX, "", line).rstrip()
if self.RE_EXCLUDE_2_SEARCH.search(trimline):
return False
return bool(any(term for term in self.IN_SEARCH_TERMS if term in trimline) or
self.RE_ERR_MATCH.match(trimline) or self.RE_ERR_SEARCH.search(trimline))
class PerformanceParser(ParserBase):
"""a sub-parser to find generic performance data"""
# Using $ in the regex as an end of line bounds causes the
# regex to fail on windows logs. This is likely due to the
# ^M character representation of the windows end of line.
RE_PERFORMANCE = re.compile(r'.*?PERFHERDER_DATA:\s+({.*})')
PERF_SCHEMA = json.load(open('schemas/performance-artifact.json'))
def __init__(self):
super(PerformanceParser, self).__init__("performance_data")
def parse_line(self, line, lineno):
match = self.RE_PERFORMANCE.match(line)
if match:
try:
dict = json.loads(match.group(1))
jsonschema.validate(dict, self.PERF_SCHEMA)
self.artifact.append(dict)
except ValueError:
logger.warning("Unable to parse Perfherder data from line: %s",
line)
except jsonschema.ValidationError as e:
logger.warning("Perfherder line '%s' does not comply with "
"json schema: %s", line, e.message)
# Don't mark the parser as complete, in case there are multiple performance artifacts.
| mpl-2.0 |
karlwithak/nowradio | nowradio/stationInfoUpdater.py | 1 | 1896 | import requests
import ourUtils
from dbManager import Queries, get_connection
# This program goes through the list of stations in the db and updates information such as
# current listeners, max listeners, peak listeners, status(up or not)
def worker(id_url_list, connection):
cur = connection.cursor()
for id_ip in id_url_list:
url = "http://" + id_ip[1] + '/7.html'
try:
response = requests.get(url, headers=ourUtils.request_header, timeout=2)
except requests.ConnectionError:
print("connection error: " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
except requests.Timeout:
print("timeout error : " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
except Exception:
print("unknown error : " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
else:
if response.status_code in (200, 304) \
and response.text.count(",") >= 6 \
and len(response.text) < 2048:
info = response.text.split(",")
data = {
'is_up': bool(info[1]),
'peak': info[2],
'max': info[3],
'active': info[4],
'id': id_ip[0]
}
cur.execute(Queries.update_station_by_id, data)
else:
print("bad response: " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
cur.close()
def main():
conn = get_connection()
if conn is None:
exit("could not connect to db")
id_url_list = ourUtils.db_quick_query(conn, Queries.get_all_ips)
ourUtils.multi_thread_runner(id_url_list, worker, conn)
conn.commit()
conn.close()
if __name__ == '__main__':
main()
| mit |
chouseknecht/ansible | test/units/modules/storage/netapp/test_netapp_e_volume.py | 21 | 59265 | # coding=utf-8
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
from unittest import mock
except ImportError:
import mock
from ansible.module_utils.netapp import NetAppESeriesModule
from ansible.modules.storage.netapp.netapp_e_volume import NetAppESeriesVolume
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
class NetAppESeriesVolumeTest(ModuleTestCase):
REQUIRED_PARAMS = {"api_username": "username",
"api_password": "password",
"api_url": "http://localhost/devmgr/v2",
"ssid": "1",
"validate_certs": "no"}
THIN_VOLUME_RESPONSE = [{"capacity": "1288490188800",
"volumeRef": "3A000000600A098000A4B28D000010475C405428",
"status": "optimal",
"protectionType": "type1Protection",
"maxVirtualCapacity": "281474976710656",
"initialProvisionedCapacity": "4294967296",
"currentProvisionedCapacity": "4294967296",
"provisionedCapacityQuota": "1305670057984",
"growthAlertThreshold": 85,
"expansionPolicy": "automatic",
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000001000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "volume"}],
"dataAssurance": True,
"segmentSize": 131072,
"diskPool": True,
"listOfMappings": [],
"mapped": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 0},
"name": "thin_volume",
"id": "3A000000600A098000A4B28D000010475C405428"}]
VOLUME_GET_RESPONSE = [{"offline": False,
"raidLevel": "raid6",
"capacity": "214748364800",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F095C2F7F31",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Clare"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "214748364800",
"name": "Matthew",
"id": "02000000600A098000A4B9D100000F095C2F7F31"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B28D00000FBE5C2F7F26",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Samantha"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Samantha",
"id": "02000000600A098000A4B28D00000FBE5C2F7F26"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F0B5C2F7F40",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"volumeGroupRef": "04000000600A098000A4B9D100000F085C2F7F26",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Micah"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Micah",
"id": "02000000600A098000A4B9D100000F0B5C2F7F40"}]
STORAGE_POOL_GET_RESPONSE = [{"offline": False,
"raidLevel": "raidDiskPool",
"volumeGroupRef": "04000000600A",
"securityType": "capable",
"protectionInformationCapable": False,
"protectionInformationCapabilities": {"protectionInformationCapable": True,
"protectionType": "type2Protection"},
"volumeGroupData": {"type": "diskPool",
"diskPoolData": {"reconstructionReservedDriveCount": 1,
"reconstructionReservedAmt": "296889614336",
"reconstructionReservedDriveCountCurrent": 1,
"poolUtilizationWarningThreshold": 0,
"poolUtilizationCriticalThreshold": 85,
"poolUtilizationState": "utilizationOptimal",
"unusableCapacity": "0",
"degradedReconstructPriority": "high",
"criticalReconstructPriority": "highest",
"backgroundOperationPriority": "low",
"allocGranularity": "4294967296"}},
"reservedSpaceAllocated": False,
"securityLevel": "fde",
"usedSpace": "863288426496",
"totalRaidedSpace": "2276332666880",
"raidStatus": "optimal",
"freeSpace": "1413044240384",
"drivePhysicalType": "sas",
"driveMediaType": "hdd",
"diskPool": True,
"id": "04000000600A098000A4B9D100000F085C2F7F26",
"name": "employee_data_storage_pool"},
{"offline": False,
"raidLevel": "raid1",
"volumeGroupRef": "04000000600A098000A4B28D00000FBD5C2F7F19",
"state": "complete",
"securityType": "capable",
"drawerLossProtection": False,
"protectionInformationCapable": False,
"protectionInformationCapabilities": {"protectionInformationCapable": True,
"protectionType": "type2Protection"},
"volumeGroupData": {"type": "unknown", "diskPoolData": None},
"reservedSpaceAllocated": False,
"securityLevel": "fde",
"usedSpace": "322122547200",
"totalRaidedSpace": "598926258176",
"raidStatus": "optimal",
"freeSpace": "276803710976",
"drivePhysicalType": "sas",
"driveMediaType": "hdd",
"diskPool": False,
"id": "04000000600A098000A4B28D00000FBD5C2F7F19",
"name": "database_storage_pool"}]
GET_LONG_LIVED_OPERATION_RESPONSE = [
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]}]
WORKLOAD_GET_RESPONSE = [{"id": "4200000001000000000000000000000000000000", "name": "general_workload_1",
"workloadAttributes": [{"key": "profileId", "value": "Other_1"}]},
{"id": "4200000002000000000000000000000000000000", "name": "employee_data",
"workloadAttributes": [{"key": "use", "value": "EmployeeData"},
{"key": "location", "value": "ICT"},
{"key": "private", "value": "public"},
{"key": "profileId", "value": "ansible_workload_1"}]},
{"id": "4200000003000000000000000000000000000000", "name": "customer_database",
"workloadAttributes": [{"key": "use", "value": "customer_information"},
{"key": "location", "value": "global"},
{"key": "profileId", "value": "ansible_workload_2"}]},
{"id": "4200000004000000000000000000000000000000", "name": "product_database",
"workloadAttributes": [{"key": "use", "value": "production_information"},
{"key": "security", "value": "private"},
{"key": "location", "value": "global"},
{"key": "profileId", "value": "ansible_workload_4"}]}]
REQUEST_FUNC = "ansible.modules.storage.netapp.netapp_e_volume.NetAppESeriesVolume.request"
GET_VOLUME_FUNC = "ansible.modules.storage.netapp.netapp_e_volume.NetAppESeriesVolume.get_volume"
SLEEP_FUNC = "ansible.modules.storage.netapp.netapp_e_volume.sleep"
def _set_args(self, args=None):
module_args = self.REQUIRED_PARAMS.copy()
if args is not None:
module_args.update(args)
set_module_args(module_args)
def test_module_arguments_pass(self):
"""Ensure valid arguments successful create a class instance."""
arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 10},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1024,
"thin_volume_growth_alert_threshold": 99},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "kb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 67108864}]
# validate size normalization
for arg_set in arg_sets:
self._set_args(arg_set)
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.size_b, volume_object.convert_to_aligned_bytes(arg_set["size"]))
self.assertEqual(volume_object.thin_volume_repo_size_b, volume_object.convert_to_aligned_bytes(arg_set["thin_volume_repo_size"]))
self.assertEqual(volume_object.thin_volume_expansion_policy, "automatic")
if "thin_volume_max_repo_size" not in arg_set.keys():
self.assertEqual(volume_object.thin_volume_max_repo_size_b, volume_object.convert_to_aligned_bytes(arg_set["size"]))
else:
self.assertEqual(volume_object.thin_volume_max_repo_size_b,
volume_object.convert_to_aligned_bytes(arg_set["thin_volume_max_repo_size"]))
# validate metadata form
self._set_args(
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10, "workload_name": "workload1",
"metadata": {"availability": "public", "security": "low"}})
volume_object = NetAppESeriesVolume()
for entry in volume_object.metadata:
self.assertTrue(entry in [{'value': 'low', 'key': 'security'}, {'value': 'public', 'key': 'availability'}])
def test_module_arguments_fail(self):
"""Ensure invalid arguments values do not create a class instance."""
arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 260},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 10},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 9},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 100}]
for arg_set in arg_sets:
with self.assertRaises(AnsibleFailJson):
self._set_args(arg_set)
print(arg_set)
volume_object = NetAppESeriesVolume()
def test_get_volume_pass(self):
"""Evaluate the get_volume method."""
with mock.patch(self.REQUEST_FUNC,
side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_volume(),
[entry for entry in self.VOLUME_GET_RESPONSE if entry["name"] == "Matthew"][0])
with mock.patch(self.REQUEST_FUNC,
side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
self._set_args({"state": "present", "name": "NotAVolume", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_volume(), {})
def test_get_volume_fail(self):
"""Evaluate the get_volume exception paths."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thick volumes."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_volume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thin volumes."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.VOLUME_GET_RESPONSE), Exception()]):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_volume()
def tests_wait_for_volume_availability_pass(self):
"""Ensure wait_for_volume_availability completes as expected."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.GET_VOLUME_FUNC, side_effect=[False, False, True]):
volume_object.wait_for_volume_availability()
def tests_wait_for_volume_availability_fail(self):
"""Ensure wait_for_volume_availability throws the expected exceptions."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.get_volume = lambda: False
with self.assertRaisesRegexp(AnsibleFailJson, "Timed out waiting for the volume"):
with mock.patch(self.SLEEP_FUNC, return_value=None):
volume_object.wait_for_volume_availability()
def tests_wait_for_volume_action_pass(self):
"""Ensure wait_for_volume_action completes as expected."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315D494C6F",
"storageVolumeRef": "02000000600A098000A4B9D1000037315DXXXXXX"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
volume_object.wait_for_volume_action()
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
"storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
volume_object.wait_for_volume_action()
def tests_wait_for_volume_action_fail(self):
"""Ensure wait_for_volume_action throws the expected exceptions."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
"storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to get volume expansion progress."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.wait_for_volume_action()
with self.assertRaisesRegexp(AnsibleFailJson, "Expansion action failed to complete."):
with mock.patch(self.REQUEST_FUNC, return_value=(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0])):
volume_object.wait_for_volume_action(timeout=300)
def test_get_storage_pool_pass(self):
"""Evaluate the get_storage_pool method."""
with mock.patch(self.REQUEST_FUNC, return_value=(200, self.STORAGE_POOL_GET_RESPONSE)):
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool",
"size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_storage_pool(), [entry for entry in self.STORAGE_POOL_GET_RESPONSE if
entry["name"] == "employee_data_storage_pool"][0])
self._set_args(
{"state": "present", "name": "NewVolume", "storage_pool_name": "NotAStoragePool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_storage_pool(), {})
def test_get_storage_pool_fail(self):
"""Evaluate the get_storage_pool exception paths."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of storage pools."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_storage_pool()
def test_check_storage_pool_sufficiency_pass(self):
"""Ensure passing logic."""
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
if entry["name"] == "employee_data_storage_pool"][0]
volume_object.check_storage_pool_sufficiency()
def test_check_storage_pool_sufficiency_fail(self):
"""Validate exceptions are thrown for insufficient storage pool resources."""
self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 10})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Requested storage pool"):
volume_object.check_storage_pool_sufficiency()
with self.assertRaisesRegexp(AnsibleFailJson,
"Thin provisioned volumes can only be created on raid disk pools."):
volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
if entry["name"] == "database_storage_pool"][0]
volume_object.volume_detail = {}
volume_object.check_storage_pool_sufficiency()
with self.assertRaisesRegexp(AnsibleFailJson, "requires the storage pool to be DA-compatible."):
volume_object.pool_detail = {"diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type0Protection",
"protectionInformationCapable": False}}
volume_object.volume_detail = {}
volume_object.data_assurance_enabled = True
volume_object.check_storage_pool_sufficiency()
volume_object.pool_detail = {"diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type2Protection",
"protectionInformationCapable": True}}
volume_object.check_storage_pool_sufficiency()
self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": False})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson,
"Not enough storage pool free space available for the volume's needs."):
volume_object.pool_detail = {"freeSpace": 10, "diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type2Protection",
"protectionInformationCapable": True}}
volume_object.volume_detail = {"totalSizeInBytes": 100}
volume_object.data_assurance_enabled = True
volume_object.size_b = 1
volume_object.check_storage_pool_sufficiency()
def test_update_workload_tags_pass(self):
"""Validate updating workload tags."""
test_sets = [[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data"}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "global"}}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "local"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "global", "importance": "no"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "newWorkload",
"metadata": {"for_testing": "yes"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "newWorkload"}, True]]
for test in test_sets:
self._set_args(test[0])
volume_object = NetAppESeriesVolume()
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), (200, {"id": 1})]):
self.assertEqual(volume_object.update_workload_tags(), test[1])
def test_update_workload_tags_fail(self):
"""Validate updating workload tags fails appropriately."""
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data"})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve storage array workload tags."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.update_workload_tags()
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data", "metadata": {"key": "not-use", "value": "EmployeeData"}})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
volume_object.update_workload_tags()
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data2", "metadata": {"key": "use", "value": "EmployeeData"}})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
volume_object.update_workload_tags()
def test_get_volume_property_changes_pass(self):
"""Verify correct dictionary is returned"""
# no property changes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), dict())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1},
"flashCached": True, "growthAlertThreshold": "90",
"expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), dict())
# property changes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": False, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": False,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": False,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True, "cacheWithoutBatteries": True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": True,
"read_ahead_enable": False, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": False,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), {"metaTags": [],
'cacheSettings': {'readCacheEnable': True,
'writeCacheEnable': True,
'readAheadEnable': False,
"cacheWithoutBatteries": True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": True, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1},
"flashCached": True, "growthAlertThreshold": "95",
"expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'growthAlertThreshold': 90, 'flashCache': True})
def test_get_volume_property_changes_fail(self):
"""Verify correct exception is thrown"""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True, "readAheadMultiplier": 1},
"flashCached": True, "segmentSize": str(512 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "Existing volume segment size is"):
volume_object.get_volume_property_changes()
def test_get_expand_volume_changes_pass(self):
"""Verify expansion changes."""
# thick volumes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": False}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "expansionSize": 100 * 1024 * 1024 * 1024})
# thin volumes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "automatic",
"provisionedCapacityQuota": str(1000 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newVirtualSize": 100 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "automatic",
"provisionedCapacityQuota": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 1000 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 504, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 504 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 756, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 756 * 1024 * 1024 * 1024})
def test_get_expand_volume_changes_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(1000 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "Reducing the size of volumes is not permitted."):
volume_object.get_expand_volume_changes()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 502, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
volume_object.get_expand_volume_changes()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
volume_object.get_expand_volume_changes()
def test_create_volume_pass(self):
"""Verify volume creation."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.create_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.create_volume()
def test_create_volume_fail(self):
"""Verify exceptions thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.create_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.create_volume()
def test_update_volume_properties_pass(self):
"""verify property update."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"metadata": [{"key": "workloadId", "value": "12345"}]}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {}
volume_object.workload_id = "4200000001000000000000000000000000000000"
self.assertFalse(volume_object.update_volume_properties())
def test_update_volume_properties_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update volume properties."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update thin volume properties."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self.assertTrue(volume_object.update_volume_properties())
def test_expand_volume_pass(self):
"""Verify volume expansion."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.expand_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.expand_volume()
def test_expand_volume_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": False}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.expand_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.expand_volume()
def test_delete_volume_pass(self):
"""Verify volume deletion."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.delete_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.delete_volume()
def test_delete_volume_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.delete_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.delete_volume()
| gpl-3.0 |
wfxiang08/django185 | django/core/mail/backends/smtp.py | 477 | 5239 | """SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address
from django.core.mail.utils import DNS_NAME
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, use_ssl=None, timeout=None,
ssl_keyfile=None, ssl_certfile=None,
**kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = settings.EMAIL_HOST_USER if username is None else username
self.password = settings.EMAIL_HOST_PASSWORD if password is None else password
self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls
self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl
self.timeout = settings.EMAIL_TIMEOUT if timeout is None else timeout
self.ssl_keyfile = settings.EMAIL_SSL_KEYFILE if ssl_keyfile is None else ssl_keyfile
self.ssl_certfile = settings.EMAIL_SSL_CERTFILE if ssl_certfile is None else ssl_certfile
if self.use_ssl and self.use_tls:
raise ValueError(
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True.")
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_class = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
if self.use_ssl:
connection_params.update({
'keyfile': self.ssl_keyfile,
'certfile': self.ssl_certfile,
})
try:
self.connection = connection_class(self.host, self.port, **connection_params)
# TLS/SSL are mutually exclusive, so only attempt TLS over
# non-secure connections.
if not self.use_ssl and self.use_tls:
self.connection.ehlo()
self.connection.starttls(keyfile=self.ssl_keyfile, certfile=self.ssl_certfile)
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except smtplib.SMTPException:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except smtplib.SMTPException:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
try:
self.connection.sendmail(from_email, recipients, message.as_bytes(linesep='\r\n'))
except smtplib.SMTPException:
if not self.fail_silently:
raise
return False
return True
| bsd-3-clause |
oihane/server-tools | auth_dynamic_groups/model/res_groups.py | 23 | 2540 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions
from openerp.tools.safe_eval import safe_eval
from openerp import _
class res_groups(models.Model):
_inherit = 'res.groups'
is_dynamic = fields.Boolean('Dynamic')
dynamic_group_condition = fields.Text(
'Condition', help='The condition to be met for a user to be a '
'member of this group. It is evaluated as python code at login '
'time, you get `user` passed as a browse record')
@api.multi
def eval_dynamic_group_condition(self, uid=None):
user = self.env['res.users'].browse([uid]) if uid else self.env.user
result = all(
self.mapped(
lambda this: safe_eval(
this.dynamic_group_condition or 'False',
{
'user': user.sudo(),
'any': any,
'all': all,
'filter': filter,
})))
return result
@api.multi
@api.constrains('dynamic_group_condition')
def _check_dynamic_group_condition(self):
try:
self.filtered('is_dynamic').eval_dynamic_group_condition()
except (NameError, SyntaxError, TypeError):
raise exceptions.ValidationError(
_('The condition doesn\'t evaluate correctly!'))
@api.multi
def action_evaluate(self):
res_users = self.env['res.users']
for user in res_users.search([]):
res_users.update_dynamic_groups(user.id, self.env.cr.dbname)
| agpl-3.0 |
ArcherSys/ArcherSys | Lib/site-packages/pyasn1/type/tag.py | 162 | 4499 | # ASN.1 types tags
from operator import getitem
from pyasn1 import error
tagClassUniversal = 0x00
tagClassApplication = 0x40
tagClassContext = 0x80
tagClassPrivate = 0xC0
tagFormatSimple = 0x00
tagFormatConstructed = 0x20
tagCategoryImplicit = 0x01
tagCategoryExplicit = 0x02
tagCategoryUntagged = 0x04
class Tag:
def __init__(self, tagClass, tagFormat, tagId):
if tagId < 0:
raise error.PyAsn1Error(
'Negative tag ID (%s) not allowed' % (tagId,)
)
self.__tag = (tagClass, tagFormat, tagId)
self.uniq = (tagClass, tagId)
self.__hashedUniqTag = hash(self.uniq)
def __str__(self):
return '[%s:%s:%s]' % self.__tag
def __repr__(self):
return '%s(tagClass=%s, tagFormat=%s, tagId=%s)' % (
(self.__class__.__name__,) + self.__tag
)
# These is really a hotspot -- expose public "uniq" attribute to save on
# function calls
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedUniqTag
def __getitem__(self, idx): return self.__tag[idx]
def __and__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag&tagClass, self.__tag&tagFormat, self.__tag&tagId
)
def __or__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag[0]|tagClass,
self.__tag[1]|tagFormat,
self.__tag[2]|tagId
)
def asTuple(self): return self.__tag # __getitem__() is slow
class TagSet:
def __init__(self, baseTag=(), *superTags):
self.__baseTag = baseTag
self.__superTags = superTags
self.__hashedSuperTags = hash(superTags)
_uniq = ()
for t in superTags:
_uniq = _uniq + t.uniq
self.uniq = _uniq
self.__lenOfSuperTags = len(superTags)
def __str__(self):
return self.__superTags and '+'.join([str(x) for x in self.__superTags]) or '[untagged]'
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
'(), ' + ', '.join([repr(x) for x in self.__superTags])
)
def __add__(self, superTag):
return self.__class__(
self.__baseTag, *self.__superTags + (superTag,)
)
def __radd__(self, superTag):
return self.__class__(
self.__baseTag, *(superTag,) + self.__superTags
)
def tagExplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if tagClass == tagClassUniversal:
raise error.PyAsn1Error(
'Can\'t tag with UNIVERSAL-class tag'
)
if tagFormat != tagFormatConstructed:
superTag = Tag(tagClass, tagFormatConstructed, tagId)
return self + superTag
def tagImplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if self.__superTags:
superTag = Tag(tagClass, self.__superTags[-1][1], tagId)
return self[:-1] + superTag
def getBaseTag(self): return self.__baseTag
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(
self.__baseTag, *getitem(self.__superTags, idx)
)
return self.__superTags[idx]
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedSuperTags
def __len__(self): return self.__lenOfSuperTags
def isSuperTagSetOf(self, tagSet):
if len(tagSet) < self.__lenOfSuperTags:
return
idx = self.__lenOfSuperTags - 1
while idx >= 0:
if self.__superTags[idx] != tagSet[idx]:
return
idx = idx - 1
return 1
def initTagSet(tag): return TagSet(tag, tag)
| mit |
dplepage/logbook | tests/test_logbook.py | 1 | 59981 | # -*- coding: utf-8 -*-
from .utils import (
LogbookTestCase,
activate_via_push_pop,
activate_via_with_statement,
capturing_stderr_context,
get_total_delta_seconds,
make_fake_mail_handler,
missing,
require_module,
require_py3,
)
from contextlib import closing, contextmanager
from datetime import datetime, timedelta
from random import randrange
import logbook
from logbook.helpers import StringIO, xrange, iteritems, zip, u
import os
import pickle
import re
import shutil
import socket
import sys
import tempfile
import time
import json
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
__file_without_pyc__ = __file__
if __file_without_pyc__.endswith(".pyc"):
__file_without_pyc__ = __file_without_pyc__[:-1]
LETTERS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
class _BasicAPITestCase(LogbookTestCase):
def test_basic_logging(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.log.warn('This is a warning. Nice hah?')
self.assert_(handler.has_warning('This is a warning. Nice hah?'))
self.assertEqual(handler.formatted_records, [
'[WARNING] testlogger: This is a warning. Nice hah?'
])
def test_extradict(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.log.warn('Test warning')
record = handler.records[0]
record.extra['existing'] = 'foo'
self.assertEqual(record.extra['nonexisting'], '')
self.assertEqual(record.extra['existing'], 'foo')
self.assertEqual(repr(record.extra),
'ExtraDict({\'existing\': \'foo\'})')
def test_custom_logger(self):
client_ip = '127.0.0.1'
class CustomLogger(logbook.Logger):
def process_record(self, record):
record.extra['ip'] = client_ip
custom_log = CustomLogger('awesome logger')
fmt = '[{record.level_name}] {record.channel}: ' \
'{record.message} [{record.extra[ip]}]'
handler = logbook.TestHandler(format_string=fmt)
self.assertEqual(handler.format_string, fmt)
with self.thread_activation_strategy(handler):
custom_log.warn('Too many sounds')
self.log.warn('"Music" playing')
self.assertEqual(handler.formatted_records, [
'[WARNING] awesome logger: Too many sounds [127.0.0.1]',
'[WARNING] testlogger: "Music" playing []'
])
def test_handler_exception(self):
class ErroringHandler(logbook.TestHandler):
def emit(self, record):
raise RuntimeError('something bad happened')
with capturing_stderr_context() as stderr:
with self.thread_activation_strategy(ErroringHandler()) as handler:
self.log.warn('I warn you.')
self.assert_('something bad happened' in stderr.getvalue())
self.assert_('I warn you' not in stderr.getvalue())
def test_formatting_exception(self):
def make_record():
return logbook.LogRecord('Test Logger', logbook.WARNING,
'Hello {foo:invalid}',
kwargs={'foo': 42},
frame=sys._getframe())
record = make_record()
with self.assertRaises(TypeError) as caught:
record.message
errormsg = str(caught.exception)
self.assertRegexpMatches(errormsg,
"Could not format message with provided arguments: Invalid (?:format specifier)|(?:conversion specification)|(?:format spec)")
self.assertIn("msg='Hello {foo:invalid}'", errormsg)
self.assertIn('args=()', errormsg)
self.assertIn("kwargs={'foo': 42}", errormsg)
self.assertRegexpMatches(
errormsg,
r'Happened in file .*%s, line \d+' % __file_without_pyc__)
def test_exception_catching(self):
logger = logbook.Logger('Test')
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.assertFalse(handler.has_error())
try:
1 / 0
except Exception:
logger.exception()
try:
1 / 0
except Exception:
logger.exception('Awesome')
self.assert_(handler.has_error('Uncaught exception occurred'))
self.assert_(handler.has_error('Awesome'))
self.assertIsNotNone(handler.records[0].exc_info)
self.assertIn('1 / 0', handler.records[0].formatted_exception)
def test_exc_info_tuple(self):
self._test_exc_info(as_tuple=True)
def test_exc_info_true(self):
self._test_exc_info(as_tuple=False)
def _test_exc_info(self, as_tuple):
logger = logbook.Logger("Test")
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
try:
1 / 0
except Exception:
exc_info = sys.exc_info()
logger.info("Exception caught", exc_info=exc_info if as_tuple else True)
self.assertIsNotNone(handler.records[0].exc_info)
self.assertEquals(handler.records[0].exc_info, exc_info)
def test_exporting(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
try:
1 / 0
except Exception:
self.log.exception()
record = handler.records[0]
exported = record.to_dict()
record.close()
imported = logbook.LogRecord.from_dict(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
self.assertEqual(value, getattr(imported, key))
def test_pickle(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
try:
1 / 0
except Exception:
self.log.exception()
record = handler.records[0]
record.pull_information()
record.close()
for p in xrange(pickle.HIGHEST_PROTOCOL):
exported = pickle.dumps(record, p)
imported = pickle.loads(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
imported_value = getattr(imported, key)
if isinstance(value, ZeroDivisionError):
# in Python 3.2, ZeroDivisionError(x) != ZeroDivisionError(x)
self.assert_(type(value) is type(imported_value))
self.assertEqual(value.args, imported_value.args)
else:
self.assertEqual(value, imported_value)
def test_timedate_format(self):
"""
tests the logbook.set_datetime_format() function
"""
FORMAT_STRING = '{record.time:%H:%M:%S} {record.message}'
handler = logbook.TestHandler(format_string=FORMAT_STRING)
handler.push_thread()
logbook.set_datetime_format('utc')
try:
self.log.warn('This is a warning.')
time_utc = handler.records[0].time
logbook.set_datetime_format('local')
self.log.warn('This is a warning.')
time_local = handler.records[1].time
finally:
handler.pop_thread()
# put back the default time factory
logbook.set_datetime_format('utc')
# get the expected difference between local and utc time
t1 = datetime.now()
t2 = datetime.utcnow()
tz_minutes_diff = get_total_delta_seconds(t1 - t2)/60.0
if abs(tz_minutes_diff) < 1:
self.skipTest("Cannot test utc/localtime differences if they vary by less than one minute...")
# get the difference between LogRecord local and utc times
logbook_minutes_diff = get_total_delta_seconds(time_local - time_utc)/60.0
self.assertGreater(abs(logbook_minutes_diff), 1, "Localtime does not differ from UTC by more than 1 minute (Local: %s, UTC: %s)" % (time_local, time_utc))
ratio = logbook_minutes_diff / tz_minutes_diff
self.assertGreater(ratio, 0.99)
self.assertLess(ratio, 1.01)
class BasicAPITestCase_Regular(_BasicAPITestCase):
def setUp(self):
super(BasicAPITestCase_Regular, self).setUp()
self.thread_activation_strategy = activate_via_with_statement
class BasicAPITestCase_Contextmgr(_BasicAPITestCase):
def setUp(self):
super(BasicAPITestCase_Contextmgr, self).setUp()
self.thread_activation_strategy = activate_via_push_pop
class _HandlerTestCase(LogbookTestCase):
def setUp(self):
super(_HandlerTestCase, self).setUp()
self.dirname = tempfile.mkdtemp()
self.filename = os.path.join(self.dirname, 'log.tmp')
def tearDown(self):
shutil.rmtree(self.dirname)
super(_HandlerTestCase, self).tearDown()
def test_file_handler(self):
handler = logbook.FileHandler(self.filename,
format_string='{record.level_name}:{record.channel}:'
'{record.message}',)
with self.thread_activation_strategy(handler):
self.log.warn('warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:testlogger:warning message\n')
def test_file_handler_unicode(self):
with capturing_stderr_context() as captured:
with self.thread_activation_strategy(logbook.FileHandler(self.filename)) as h:
self.log.info(u('\u0431'))
self.assertFalse(captured.getvalue())
def test_file_handler_delay(self):
handler = logbook.FileHandler(self.filename,
format_string='{record.level_name}:{record.channel}:'
'{record.message}', delay=True)
self.assertFalse(os.path.isfile(self.filename))
with self.thread_activation_strategy(handler):
self.log.warn('warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:testlogger:warning message\n')
def test_monitoring_file_handler(self):
if os.name == "nt":
self.skipTest("unsupported on windows due to different IO (also unneeded)")
handler = logbook.MonitoringFileHandler(self.filename,
format_string='{record.level_name}:{record.channel}:'
'{record.message}', delay=True)
with self.thread_activation_strategy(handler):
self.log.warn('warning message')
os.rename(self.filename, self.filename + '.old')
self.log.warn('another warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.read().strip(),
'WARNING:testlogger:another warning message')
def test_custom_formatter(self):
def custom_format(record, handler):
return record.level_name + ':' + record.message
handler = logbook.FileHandler(self.filename)
with self.thread_activation_strategy(handler):
handler.formatter = custom_format
self.log.warn('Custom formatters are awesome')
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:Custom formatters are awesome\n')
def test_rotating_file_handler(self):
basename = os.path.join(self.dirname, 'rot.log')
handler = logbook.RotatingFileHandler(basename, max_size=2048,
backup_count=3,
)
handler.format_string = '{record.message}'
with self.thread_activation_strategy(handler):
for c, x in zip(LETTERS, xrange(32)):
self.log.warn(c * 256)
files = [x for x in os.listdir(self.dirname)
if x.startswith('rot.log')]
files.sort()
self.assertEqual(files, ['rot.log', 'rot.log.1', 'rot.log.2',
'rot.log.3'])
with open(basename) as f:
self.assertEqual(f.readline().rstrip(), 'C' * 256)
self.assertEqual(f.readline().rstrip(), 'D' * 256)
self.assertEqual(f.readline().rstrip(), 'E' * 256)
self.assertEqual(f.readline().rstrip(), 'F' * 256)
def test_timed_rotating_file_handler(self):
basename = os.path.join(self.dirname, 'trot.log')
handler = logbook.TimedRotatingFileHandler(basename, backup_count=3)
handler.format_string = '[{record.time:%H:%M}] {record.message}'
def fake_record(message, year, month, day, hour=0,
minute=0, second=0):
lr = logbook.LogRecord('Test Logger', logbook.WARNING,
message)
lr.time = datetime(year, month, day, hour, minute, second)
return lr
with self.thread_activation_strategy(handler):
for x in xrange(10):
handler.handle(fake_record('First One', 2010, 1, 5, x + 1))
for x in xrange(20):
handler.handle(fake_record('Second One', 2010, 1, 6, x + 1))
for x in xrange(10):
handler.handle(fake_record('Third One', 2010, 1, 7, x + 1))
for x in xrange(20):
handler.handle(fake_record('Last One', 2010, 1, 8, x + 1))
files = sorted(
x for x in os.listdir(self.dirname) if x.startswith('trot')
)
self.assertEqual(files, ['trot-2010-01-06.log', 'trot-2010-01-07.log',
'trot-2010-01-08.log'])
with open(os.path.join(self.dirname, 'trot-2010-01-08.log')) as f:
self.assertEqual(f.readline().rstrip(), '[01:00] Last One')
self.assertEqual(f.readline().rstrip(), '[02:00] Last One')
with open(os.path.join(self.dirname, 'trot-2010-01-07.log')) as f:
self.assertEqual(f.readline().rstrip(), '[01:00] Third One')
self.assertEqual(f.readline().rstrip(), '[02:00] Third One')
def test_mail_handler(self):
subject = u('\xf8nicode')
handler = make_fake_mail_handler(subject=subject)
with capturing_stderr_context() as fallback:
with self.thread_activation_strategy(handler):
self.log.warn('This is not mailed')
try:
1 / 0
except Exception:
self.log.exception(u('Viva la Espa\xf1a'))
if not handler.mails:
# if sending the mail failed, the reason should be on stderr
self.fail(fallback.getvalue())
self.assertEqual(len(handler.mails), 1)
sender, receivers, mail = handler.mails[0]
mail = mail.replace("\r", "")
self.assertEqual(sender, handler.from_addr)
self.assert_('=?utf-8?q?=C3=B8nicode?=' in mail)
self.assertRegexpMatches(mail, 'Message type:\s+ERROR')
self.assertRegexpMatches(mail, 'Location:.*%s' % __file_without_pyc__)
self.assertRegexpMatches(mail, 'Module:\s+%s' % __name__)
self.assertRegexpMatches(mail, 'Function:\s+test_mail_handler')
body = u('Message:\n\nViva la Espa\xf1a')
if sys.version_info < (3, 0):
body = body.encode('utf-8')
self.assertIn(body, mail)
self.assertIn('\n\nTraceback (most', mail)
self.assertIn('1 / 0', mail)
self.assertIn('This is not mailed', fallback.getvalue())
def test_mail_handler_record_limits(self):
suppression_test = re.compile('This message occurred additional \d+ '
'time\(s\) and was suppressed').search
handler = make_fake_mail_handler(record_limit=1,
record_delta=timedelta(seconds=0.5))
with self.thread_activation_strategy(handler):
later = datetime.utcnow() + timedelta(seconds=1.1)
while datetime.utcnow() < later:
self.log.error('Over and over...')
# first mail that is always delivered + 0.5 seconds * 2
# and 0.1 seconds of room for rounding errors makes 3 mails
self.assertEqual(len(handler.mails), 3)
# first mail is always delivered
self.assert_(not suppression_test(handler.mails[0][2]))
# the next two have a supression count
self.assert_(suppression_test(handler.mails[1][2]))
self.assert_(suppression_test(handler.mails[2][2]))
def test_mail_handler_batching(self):
mail_handler = make_fake_mail_handler()
handler = logbook.FingersCrossedHandler(mail_handler, reset=True)
with self.thread_activation_strategy(handler):
self.log.warn('Testing')
self.log.debug('Even more')
self.log.error('And this triggers it')
self.log.info('Aha')
self.log.error('And this triggers it again!')
self.assertEqual(len(mail_handler.mails), 2)
mail = mail_handler.mails[0][2]
pieces = mail.split('Log records that led up to this one:')
self.assertEqual(len(pieces), 2)
body, rest = pieces
rest = rest.replace("\r", "")
self.assertRegexpMatches(body, 'Message type:\s+ERROR')
self.assertRegexpMatches(body, 'Module:\s+%s' % __name__)
self.assertRegexpMatches(body, 'Function:\s+test_mail_handler_batching')
related = rest.strip().split('\n\n')
self.assertEqual(len(related), 2)
self.assertRegexpMatches(related[0], 'Message type:\s+WARNING')
self.assertRegexpMatches(related[1], 'Message type:\s+DEBUG')
self.assertIn('And this triggers it again', mail_handler.mails[1][2])
def test_group_handler_mail_combo(self):
mail_handler = make_fake_mail_handler(level=logbook.DEBUG)
handler = logbook.GroupHandler(mail_handler)
with self.thread_activation_strategy(handler):
self.log.error('The other way round')
self.log.warn('Testing')
self.log.debug('Even more')
self.assertEqual(mail_handler.mails, [])
self.assertEqual(len(mail_handler.mails), 1)
mail = mail_handler.mails[0][2]
pieces = mail.split('Other log records in the same group:')
self.assertEqual(len(pieces), 2)
body, rest = pieces
rest = rest.replace("\r", "")
self.assertRegexpMatches(body, 'Message type:\s+ERROR')
self.assertRegexpMatches(body, 'Module:\s+'+__name__)
self.assertRegexpMatches(body, 'Function:\s+test_group_handler_mail_combo')
related = rest.strip().split('\n\n')
self.assertEqual(len(related), 2)
self.assertRegexpMatches(related[0], 'Message type:\s+WARNING')
self.assertRegexpMatches(related[1], 'Message type:\s+DEBUG')
def test_syslog_handler(self):
to_test = [
(socket.AF_INET, ('127.0.0.1', 0)),
]
if hasattr(socket, 'AF_UNIX'):
to_test.append((socket.AF_UNIX, self.filename))
for sock_family, address in to_test:
with closing(socket.socket(sock_family, socket.SOCK_DGRAM)) as inc:
inc.bind(address)
inc.settimeout(1)
for app_name in [None, 'Testing']:
handler = logbook.SyslogHandler(app_name, inc.getsockname())
with self.thread_activation_strategy(handler):
self.log.warn('Syslog is weird')
try:
rv = inc.recvfrom(1024)[0]
except socket.error:
self.fail('got timeout on socket')
self.assertEqual(rv, (
u('<12>%stestlogger: Syslog is weird\x00') %
(app_name and app_name + u(':') or u(''))).encode('utf-8'))
def test_handler_processors(self):
handler = make_fake_mail_handler(format_string='''\
Subject: Application Error for {record.extra[path]} [{record.extra[method]}]
Message type: {record.level_name}
Location: {record.filename}:{record.lineno}
Module: {record.module}
Function: {record.func_name}
Time: {record.time:%Y-%m-%d %H:%M:%S}
Remote IP: {record.extra[ip]}
Request: {record.extra[path]} [{record.extra[method]}]
Message:
{record.message}
''')
class Request(object):
remote_addr = '127.0.0.1'
method = 'GET'
path = '/index.html'
def handle_request(request):
def inject_extra(record):
record.extra['ip'] = request.remote_addr
record.extra['method'] = request.method
record.extra['path'] = request.path
processor = logbook.Processor(inject_extra)
with self.thread_activation_strategy(processor):
handler.push_thread()
try:
try:
1 / 0
except Exception:
self.log.exception('Exception happened during request')
finally:
handler.pop_thread()
handle_request(Request())
self.assertEqual(len(handler.mails), 1)
mail = handler.mails[0][2]
self.assertIn('Subject: Application Error '
'for /index.html [GET]', mail)
self.assertIn('1 / 0', mail)
def test_regex_matching(self):
test_handler = logbook.TestHandler()
with self.thread_activation_strategy(test_handler):
self.log.warn('Hello World!')
self.assert_(test_handler.has_warning(re.compile('^Hello')))
self.assert_(not test_handler.has_warning(re.compile('world$')))
self.assert_(not test_handler.has_warning('^Hello World'))
def test_custom_handling_test(self):
class MyTestHandler(logbook.TestHandler):
def handle(self, record):
if record.extra.get('flag') != 'testing':
return False
return logbook.TestHandler.handle(self, record)
class MyLogger(logbook.Logger):
def process_record(self, record):
logbook.Logger.process_record(self, record)
record.extra['flag'] = 'testing'
log = MyLogger()
handler = MyTestHandler()
with capturing_stderr_context() as captured:
with self.thread_activation_strategy(handler):
log.warn('From my logger')
self.log.warn('From another logger')
self.assert_(handler.has_warning('From my logger'))
self.assertIn('From another logger', captured.getvalue())
def test_custom_handling_tester(self):
flag = True
class MyTestHandler(logbook.TestHandler):
def should_handle(self, record):
return flag
null_handler = logbook.NullHandler()
with self.thread_activation_strategy(null_handler):
test_handler = MyTestHandler()
with self.thread_activation_strategy(test_handler):
self.log.warn('1')
flag = False
self.log.warn('2')
self.assert_(test_handler.has_warning('1'))
self.assert_(not test_handler.has_warning('2'))
def test_null_handler(self):
with capturing_stderr_context() as captured:
with self.thread_activation_strategy(logbook.NullHandler()) as null_handler:
with self.thread_activation_strategy(logbook.TestHandler(level='ERROR')) as handler:
self.log.error('An error')
self.log.warn('A warning')
self.assertEqual(captured.getvalue(), '')
self.assertFalse(handler.has_warning('A warning'))
self.assert_(handler.has_error('An error'))
def test_test_handler_cache(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.log.warn('First line')
self.assertEqual(len(handler.formatted_records),1)
cache = handler.formatted_records # store cache, to make sure it is identifiable
self.assertEqual(len(handler.formatted_records),1)
self.assert_(cache is handler.formatted_records) # Make sure cache is not invalidated without changes to record
self.log.warn('Second line invalidates cache')
self.assertEqual(len(handler.formatted_records),2)
self.assertFalse(cache is handler.formatted_records) # Make sure cache is invalidated when records change
def test_blackhole_setting(self):
null_handler = logbook.NullHandler()
heavy_init = logbook.LogRecord.heavy_init
with self.thread_activation_strategy(null_handler):
def new_heavy_init(self):
raise RuntimeError('should not be triggered')
logbook.LogRecord.heavy_init = new_heavy_init
try:
with self.thread_activation_strategy(null_handler):
logbook.warn('Awesome')
finally:
logbook.LogRecord.heavy_init = heavy_init
null_handler.bubble = True
with capturing_stderr_context() as captured:
logbook.warning('Not a blockhole')
self.assertNotEqual(captured.getvalue(), '')
def test_calling_frame(self):
handler = logbook.TestHandler()
with self.thread_activation_strategy(handler):
logbook.warn('test')
self.assertEqual(handler.records[0].calling_frame, sys._getframe())
def test_nested_setups(self):
with capturing_stderr_context() as captured:
logger = logbook.Logger('App')
test_handler = logbook.TestHandler(level='WARNING')
mail_handler = make_fake_mail_handler(bubble=True)
handlers = logbook.NestedSetup([
logbook.NullHandler(),
test_handler,
mail_handler
])
with self.thread_activation_strategy(handlers):
logger.warn('This is a warning')
logger.error('This is also a mail')
try:
1 / 0
except Exception:
logger.exception()
logger.warn('And here we go straight back to stderr')
self.assert_(test_handler.has_warning('This is a warning'))
self.assert_(test_handler.has_error('This is also a mail'))
self.assertEqual(len(mail_handler.mails), 2)
self.assertIn('This is also a mail', mail_handler.mails[0][2])
self.assertIn('1 / 0',mail_handler.mails[1][2])
self.assertIn('And here we go straight back to stderr',
captured.getvalue())
with self.thread_activation_strategy(handlers):
logger.warn('threadbound warning')
handlers.push_application()
try:
logger.warn('applicationbound warning')
finally:
handlers.pop_application()
def test_dispatcher(self):
logger = logbook.Logger('App')
with self.thread_activation_strategy(logbook.TestHandler()) as test_handler:
logger.warn('Logbook is too awesome for stdlib')
self.assertEqual(test_handler.records[0].dispatcher, logger)
def test_filtering(self):
logger1 = logbook.Logger('Logger1')
logger2 = logbook.Logger('Logger2')
handler = logbook.TestHandler()
outer_handler = logbook.TestHandler()
def only_1(record, handler):
return record.dispatcher is logger1
handler.filter = only_1
with self.thread_activation_strategy(outer_handler):
with self.thread_activation_strategy(handler):
logger1.warn('foo')
logger2.warn('bar')
self.assert_(handler.has_warning('foo', channel='Logger1'))
self.assertFalse(handler.has_warning('bar', channel='Logger2'))
self.assertFalse(outer_handler.has_warning('foo', channel='Logger1'))
self.assert_(outer_handler.has_warning('bar', channel='Logger2'))
def test_null_handler_filtering(self):
logger1 = logbook.Logger("1")
logger2 = logbook.Logger("2")
outer = logbook.TestHandler()
inner = logbook.NullHandler()
inner.filter = lambda record, handler: record.dispatcher is logger1
with self.thread_activation_strategy(outer):
with self.thread_activation_strategy(inner):
logger1.warn("1")
logger2.warn("2")
self.assertTrue(outer.has_warning("2", channel="2"))
self.assertFalse(outer.has_warning("1", channel="1"))
def test_different_context_pushing(self):
h1 = logbook.TestHandler(level=logbook.DEBUG)
h2 = logbook.TestHandler(level=logbook.INFO)
h3 = logbook.TestHandler(level=logbook.WARNING)
logger = logbook.Logger('Testing')
with self.thread_activation_strategy(h1):
with self.thread_activation_strategy(h2):
with self.thread_activation_strategy(h3):
logger.warn('Wuuu')
logger.info('still awesome')
logger.debug('puzzled')
self.assert_(h1.has_debug('puzzled'))
self.assert_(h2.has_info('still awesome'))
self.assert_(h3.has_warning('Wuuu'))
for handler in h1, h2, h3:
self.assertEquals(len(handler.records), 1)
def test_global_functions(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
logbook.debug('a debug message')
logbook.info('an info message')
logbook.warn('warning part 1')
logbook.warning('warning part 2')
logbook.notice('notice')
logbook.error('an error')
logbook.critical('pretty critical')
logbook.log(logbook.CRITICAL, 'critical too')
self.assert_(handler.has_debug('a debug message'))
self.assert_(handler.has_info('an info message'))
self.assert_(handler.has_warning('warning part 1'))
self.assert_(handler.has_warning('warning part 2'))
self.assert_(handler.has_notice('notice'))
self.assert_(handler.has_error('an error'))
self.assert_(handler.has_critical('pretty critical'))
self.assert_(handler.has_critical('critical too'))
self.assertEqual(handler.records[0].channel, 'Generic')
self.assertIsNone(handler.records[0].dispatcher)
def test_fingerscrossed(self):
handler = logbook.FingersCrossedHandler(logbook.default_handler,
logbook.WARNING)
# if no warning occurs, the infos are not logged
with self.thread_activation_strategy(handler):
with capturing_stderr_context() as captured:
self.log.info('some info')
self.assertEqual(captured.getvalue(), '')
self.assert_(not handler.triggered)
# but if it does, all log messages are output
with self.thread_activation_strategy(handler):
with capturing_stderr_context() as captured:
self.log.info('some info')
self.log.warning('something happened')
self.log.info('something else happened')
logs = captured.getvalue()
self.assert_('some info' in logs)
self.assert_('something happened' in logs)
self.assert_('something else happened' in logs)
self.assert_(handler.triggered)
def test_fingerscrossed_factory(self):
handlers = []
def handler_factory(record, fch):
handler = logbook.TestHandler()
handlers.append(handler)
return handler
def make_fch():
return logbook.FingersCrossedHandler(handler_factory,
logbook.WARNING)
fch = make_fch()
with self.thread_activation_strategy(fch):
self.log.info('some info')
self.assertEqual(len(handlers), 0)
self.log.warning('a warning')
self.assertEqual(len(handlers), 1)
self.log.error('an error')
self.assertEqual(len(handlers), 1)
self.assert_(handlers[0].has_infos)
self.assert_(handlers[0].has_warnings)
self.assert_(handlers[0].has_errors)
self.assert_(not handlers[0].has_notices)
self.assert_(not handlers[0].has_criticals)
self.assert_(not handlers[0].has_debugs)
fch = make_fch()
with self.thread_activation_strategy(fch):
self.log.info('some info')
self.log.warning('a warning')
self.assertEqual(len(handlers), 2)
def test_fingerscrossed_buffer_size(self):
logger = logbook.Logger('Test')
test_handler = logbook.TestHandler()
handler = logbook.FingersCrossedHandler(test_handler, buffer_size=3)
with self.thread_activation_strategy(handler):
logger.info('Never gonna give you up')
logger.warn('Aha!')
logger.warn('Moar!')
logger.error('Pure hate!')
self.assertEqual(test_handler.formatted_records, [
'[WARNING] Test: Aha!',
'[WARNING] Test: Moar!',
'[ERROR] Test: Pure hate!'
])
class HandlerTestCase_Regular(_HandlerTestCase):
def setUp(self):
super(HandlerTestCase_Regular, self).setUp()
self.thread_activation_strategy = activate_via_push_pop
class HandlerTestCase_Contextmgr(_HandlerTestCase):
def setUp(self):
super(HandlerTestCase_Contextmgr, self).setUp()
self.thread_activation_strategy = activate_via_with_statement
class AttributeTestCase(LogbookTestCase):
def test_level_properties(self):
self.assertEqual(self.log.level, logbook.NOTSET)
self.assertEqual(self.log.level_name, 'NOTSET')
self.log.level_name = 'WARNING'
self.assertEqual(self.log.level, logbook.WARNING)
self.log.level = logbook.ERROR
self.assertEqual(self.log.level_name, 'ERROR')
def test_reflected_properties(self):
group = logbook.LoggerGroup()
group.add_logger(self.log)
self.assertEqual(self.log.group, group)
group.level = logbook.ERROR
self.assertEqual(self.log.level, logbook.ERROR)
self.assertEqual(self.log.level_name, 'ERROR')
group.level = logbook.WARNING
self.assertEqual(self.log.level, logbook.WARNING)
self.assertEqual(self.log.level_name, 'WARNING')
self.log.level = logbook.CRITICAL
group.level = logbook.DEBUG
self.assertEqual(self.log.level, logbook.CRITICAL)
self.assertEqual(self.log.level_name, 'CRITICAL')
group.remove_logger(self.log)
self.assertEqual(self.log.group, None)
class LevelLookupTest(LogbookTestCase):
def test_level_lookup_failures(self):
with self.assertRaises(LookupError):
logbook.get_level_name(37)
with self.assertRaises(LookupError):
logbook.lookup_level('FOO')
class FlagsTestCase(LogbookTestCase):
def test_error_flag(self):
with capturing_stderr_context() as captured:
with logbook.Flags(errors='print'):
with logbook.Flags(errors='silent'):
self.log.warn('Foo {42}', 'aha')
self.assertEqual(captured.getvalue(), '')
with logbook.Flags(errors='silent'):
with logbook.Flags(errors='print'):
self.log.warn('Foo {42}', 'aha')
self.assertNotEqual(captured.getvalue(), '')
with self.assertRaises(Exception) as caught:
with logbook.Flags(errors='raise'):
self.log.warn('Foo {42}', 'aha')
self.assertIn('Could not format message with provided '
'arguments', str(caught.exception))
def test_disable_introspection(self):
with logbook.Flags(introspection=False):
with logbook.TestHandler() as h:
self.log.warn('Testing')
self.assertIsNone(h.records[0].frame)
self.assertIsNone(h.records[0].calling_frame)
self.assertIsNone(h.records[0].module)
class LoggerGroupTestCase(LogbookTestCase):
def test_groups(self):
def inject_extra(record):
record.extra['foo'] = 'bar'
group = logbook.LoggerGroup(processor=inject_extra)
group.level = logbook.ERROR
group.add_logger(self.log)
with logbook.TestHandler() as handler:
self.log.warn('A warning')
self.log.error('An error')
self.assertFalse(handler.has_warning('A warning'))
self.assertTrue(handler.has_error('An error'))
self.assertEqual(handler.records[0].extra['foo'], 'bar')
class DefaultConfigurationTestCase(LogbookTestCase):
def test_default_handlers(self):
with capturing_stderr_context() as stream:
self.log.warn('Aha!')
captured = stream.getvalue()
self.assertIn('WARNING: testlogger: Aha!', captured)
class LoggingCompatTestCase(LogbookTestCase):
def test_basic_compat_with_level_setting(self):
self._test_basic_compat(True)
def test_basic_compat_without_level_setting(self):
self._test_basic_compat(False)
def _test_basic_compat(self, set_root_logger_level):
import logging
from logbook.compat import redirected_logging
# mimic the default logging setting
self.addCleanup(logging.root.setLevel, logging.root.level)
logging.root.setLevel(logging.WARNING)
name = 'test_logbook-%d' % randrange(1 << 32)
logger = logging.getLogger(name)
with logbook.TestHandler(bubble=True) as handler:
with capturing_stderr_context() as captured:
with redirected_logging(set_root_logger_level):
logger.debug('This is from the old system')
logger.info('This is from the old system')
logger.warn('This is from the old system')
logger.error('This is from the old system')
logger.critical('This is from the old system')
self.assertIn(('WARNING: %s: This is from the old system' % name),
captured.getvalue())
if set_root_logger_level:
self.assertEquals(handler.records[0].level, logbook.DEBUG)
else:
self.assertEquals(handler.records[0].level, logbook.WARNING)
def test_redirect_logbook(self):
import logging
from logbook.compat import LoggingHandler
out = StringIO()
logger = logging.getLogger()
old_handlers = logger.handlers[:]
handler = logging.StreamHandler(out)
handler.setFormatter(logging.Formatter(
'%(name)s:%(levelname)s:%(message)s'))
logger.handlers[:] = [handler]
try:
with logbook.compat.LoggingHandler() as logging_handler:
self.log.warn("This goes to logging")
pieces = out.getvalue().strip().split(':')
self.assertEqual(pieces, [
'testlogger',
'WARNING',
'This goes to logging'
])
finally:
logger.handlers[:] = old_handlers
class WarningsCompatTestCase(LogbookTestCase):
def test_warning_redirections(self):
from logbook.compat import redirected_warnings
with logbook.TestHandler() as handler:
redirector = redirected_warnings()
redirector.start()
try:
from warnings import warn
warn(RuntimeWarning('Testing'))
finally:
redirector.end()
self.assertEqual(len(handler.records), 1)
self.assertEqual('[WARNING] RuntimeWarning: Testing',
handler.formatted_records[0])
self.assertIn(__file_without_pyc__, handler.records[0].filename)
class MoreTestCase(LogbookTestCase):
@contextmanager
def _get_temporary_file_context(self):
fn = tempfile.mktemp()
try:
yield fn
finally:
try:
os.remove(fn)
except OSError:
pass
@require_module('jinja2')
def test_jinja_formatter(self):
from logbook.more import JinjaFormatter
fmter = JinjaFormatter('{{ record.channel }}/{{ record.level_name }}')
handler = logbook.TestHandler()
handler.formatter = fmter
with handler:
self.log.info('info')
self.assertIn('testlogger/INFO', handler.formatted_records)
@missing('jinja2')
def test_missing_jinja2(self):
from logbook.more import JinjaFormatter
# check the RuntimeError is raised
with self.assertRaises(RuntimeError):
JinjaFormatter('dummy')
def test_colorizing_support(self):
from logbook.more import ColorizedStderrHandler
class TestColorizingHandler(ColorizedStderrHandler):
def should_colorize(self, record):
return True
stream = StringIO()
with TestColorizingHandler(format_string='{record.message}') as handler:
self.log.error('An error')
self.log.warn('A warning')
self.log.debug('A debug message')
lines = handler.stream.getvalue().rstrip('\n').splitlines()
self.assertEqual(lines, [
'\x1b[31;01mAn error',
'\x1b[39;49;00m\x1b[33;01mA warning',
'\x1b[39;49;00m\x1b[37mA debug message',
'\x1b[39;49;00m'
])
def test_tagged(self):
from logbook.more import TaggingLogger, TaggingHandler
stream = StringIO()
second_handler = logbook.StreamHandler(stream)
logger = TaggingLogger('name', ['cmd'])
handler = TaggingHandler(dict(
info=logbook.default_handler,
cmd=second_handler,
both=[logbook.default_handler, second_handler],
))
handler.bubble = False
with handler:
with capturing_stderr_context() as captured:
logger.log('info', 'info message')
logger.log('both', 'all message')
logger.cmd('cmd message')
stderr = captured.getvalue()
self.assertIn('info message', stderr)
self.assertIn('all message', stderr)
self.assertNotIn('cmd message', stderr)
stringio = stream.getvalue()
self.assertNotIn('info message', stringio)
self.assertIn('all message', stringio)
self.assertIn('cmd message', stringio)
def test_external_application_handler(self):
from logbook.more import ExternalApplicationHandler as Handler
with self._get_temporary_file_context() as fn:
handler = Handler([sys.executable, '-c', r'''if 1:
f = open(%(tempfile)s, 'w')
try:
f.write('{record.message}\n')
finally:
f.close()
''' % {'tempfile': repr(fn)}])
with handler:
self.log.error('this is a really bad idea')
with open(fn, 'r') as rf:
contents = rf.read().strip()
self.assertEqual(contents, 'this is a really bad idea')
def test_external_application_handler_stdin(self):
from logbook.more import ExternalApplicationHandler as Handler
with self._get_temporary_file_context() as fn:
handler = Handler([sys.executable, '-c', r'''if 1:
import sys
f = open(%(tempfile)s, 'w')
try:
f.write(sys.stdin.read())
finally:
f.close()
''' % {'tempfile': repr(fn)}], '{record.message}\n')
with handler:
self.log.error('this is a really bad idea')
with open(fn, 'r') as rf:
contents = rf.read().strip()
self.assertEqual(contents, 'this is a really bad idea')
def test_exception_handler(self):
from logbook.more import ExceptionHandler
with ExceptionHandler(ValueError) as exception_handler:
with self.assertRaises(ValueError) as caught:
self.log.info('here i am')
self.assertIn('INFO: testlogger: here i am', caught.exception.args[0])
def test_exception_handler_specific_level(self):
from logbook.more import ExceptionHandler
with logbook.TestHandler() as test_handler:
with self.assertRaises(ValueError) as caught:
with ExceptionHandler(ValueError, level='WARNING') as exception_handler:
self.log.info('this is irrelevant')
self.log.warn('here i am')
self.assertIn('WARNING: testlogger: here i am', caught.exception.args[0])
self.assertIn('this is irrelevant', test_handler.records[0].message)
def test_dedup_handler(self):
from logbook.more import DedupHandler
with logbook.TestHandler() as test_handler:
with DedupHandler():
self.log.info('foo')
self.log.info('bar')
self.log.info('foo')
self.assertEqual(2, len(test_handler.records))
self.assertIn('message repeated 2 times: foo', test_handler.records[0].message)
self.assertIn('message repeated 1 times: bar', test_handler.records[1].message)
class QueuesTestCase(LogbookTestCase):
def _get_zeromq(self, multi=False):
from logbook.queues import ZeroMQHandler, ZeroMQSubscriber
# Get an unused port
tempsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempsock.bind(('localhost', 0))
host, unused_port = tempsock.getsockname()
tempsock.close()
# Retrieve the ZeroMQ handler and subscriber
uri = 'tcp://%s:%d' % (host, unused_port)
if multi:
handler = [ZeroMQHandler(uri, multi=True) for _ in range(3)]
else:
handler = ZeroMQHandler(uri)
subscriber = ZeroMQSubscriber(uri, multi=multi)
# Enough time to start
time.sleep(0.1)
return handler, subscriber
@require_module('zmq')
def test_zeromq_handler(self):
tests = [
u('Logging something'),
u('Something with umlauts äöü'),
u('Something else for good measure'),
]
handler, subscriber = self._get_zeromq()
for test in tests:
with handler:
self.log.warn(test)
record = subscriber.recv()
self.assertEqual(record.message, test)
self.assertEqual(record.channel, self.log.name)
@require_module('zmq')
def test_multi_zeromq_handler(self):
tests = [
u('Logging something'),
u('Something with umlauts äöü'),
u('Something else for good measure'),
]
handlers, subscriber = self._get_zeromq(multi=True)
for handler in handlers:
for test in tests:
with handler:
self.log.warn(test)
record = subscriber.recv()
self.assertEqual(record.message, test)
self.assertEqual(record.channel, self.log.name)
@require_module('zmq')
def test_zeromq_background_thread(self):
handler, subscriber = self._get_zeromq()
test_handler = logbook.TestHandler()
controller = subscriber.dispatch_in_background(test_handler)
with handler:
self.log.warn('This is a warning')
self.log.error('This is an error')
# stop the controller. This will also stop the loop and join the
# background process. Before that we give it a fraction of a second
# to get all results
time.sleep(0.2)
controller.stop()
self.assertTrue(test_handler.has_warning('This is a warning'))
self.assertTrue(test_handler.has_error('This is an error'))
@missing('zmq')
def test_missing_zeromq(self):
from logbook.queues import ZeroMQHandler, ZeroMQSubscriber
with self.assertRaises(RuntimeError):
ZeroMQHandler('tcp://127.0.0.1:42000')
with self.assertRaises(RuntimeError):
ZeroMQSubscriber('tcp://127.0.0.1:42000')
@require_module('multiprocessing')
def test_multi_processing_handler(self):
from multiprocessing import Process, Queue
from logbook.queues import MultiProcessingHandler, \
MultiProcessingSubscriber
queue = Queue(-1)
test_handler = logbook.TestHandler()
subscriber = MultiProcessingSubscriber(queue)
def send_back():
handler = MultiProcessingHandler(queue)
handler.push_thread()
try:
logbook.warn('Hello World')
finally:
handler.pop_thread()
p = Process(target=send_back)
p.start()
p.join()
with test_handler:
subscriber.dispatch_once()
self.assert_(test_handler.has_warning('Hello World'))
def test_threaded_wrapper_handler(self):
from logbook.queues import ThreadedWrapperHandler
test_handler = logbook.TestHandler()
with ThreadedWrapperHandler(test_handler) as handler:
self.log.warn('Just testing')
self.log.error('More testing')
# give it some time to sync up
handler.close()
self.assertTrue(not handler.controller.running)
self.assertTrue(test_handler.has_warning('Just testing'))
self.assertTrue(test_handler.has_error('More testing'))
@require_module('execnet')
def test_execnet_handler(self):
def run_on_remote(channel):
import logbook
from logbook.queues import ExecnetChannelHandler
handler = ExecnetChannelHandler(channel)
log = logbook.Logger("Execnet")
handler.push_application()
log.info('Execnet works')
import execnet
gw = execnet.makegateway()
channel = gw.remote_exec(run_on_remote)
from logbook.queues import ExecnetChannelSubscriber
subscriber = ExecnetChannelSubscriber(channel)
record = subscriber.recv()
self.assertEqual(record.msg, 'Execnet works')
gw.exit()
@require_module('multiprocessing')
def test_subscriber_group(self):
from multiprocessing import Process, Queue
from logbook.queues import MultiProcessingHandler, \
MultiProcessingSubscriber, SubscriberGroup
a_queue = Queue(-1)
b_queue = Queue(-1)
test_handler = logbook.TestHandler()
subscriber = SubscriberGroup([
MultiProcessingSubscriber(a_queue),
MultiProcessingSubscriber(b_queue)
])
def make_send_back(message, queue):
def send_back():
with MultiProcessingHandler(queue):
logbook.warn(message)
return send_back
for _ in range(10):
p1 = Process(target=make_send_back('foo', a_queue))
p2 = Process(target=make_send_back('bar', b_queue))
p1.start()
p2.start()
p1.join()
p2.join()
messages = [subscriber.recv().message for i in (1, 2)]
self.assertEqual(sorted(messages), ['bar', 'foo'])
@require_module('redis')
def test_redis_handler(self):
import redis
from logbook.queues import RedisHandler
KEY = 'redis'
FIELDS = ['message', 'host']
r = redis.Redis(decode_responses=True)
redis_handler = RedisHandler(level=logbook.INFO, bubble=True)
#We don't want output for the tests, so we can wrap everything in a NullHandler
null_handler = logbook.NullHandler()
#Check default values
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
#Are all the fields in the record?
[self.assertTrue(message.find(field)) for field in FIELDS]
self.assertEqual(key, KEY)
self.assertTrue(message.find(LETTERS))
#Change the key of the handler and check on redis
KEY = 'test_another_key'
redis_handler.key = KEY
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
self.assertEqual(key, KEY)
#Check that extra fields are added if specified when creating the handler
FIELDS.append('type')
extra_fields = {'type': 'test'}
del(redis_handler)
redis_handler = RedisHandler(key=KEY, level=logbook.INFO,
extra_fields=extra_fields, bubble=True)
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
[self.assertTrue(message.find(field)) for field in FIELDS]
self.assertTrue(message.find('test'))
#And finally, check that fields are correctly added if appended to the
#log message
FIELDS.append('more_info')
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS, more_info='This works')
key, message = r.blpop(KEY)
[self.assertTrue(message.find(field)) for field in FIELDS]
self.assertTrue(message.find('This works'))
class TicketingTestCase(LogbookTestCase):
@require_module('sqlalchemy')
def test_basic_ticketing(self):
from logbook.ticketing import TicketingHandler
with TicketingHandler('sqlite:///') as handler:
for x in xrange(5):
self.log.warn('A warning')
self.log.info('An error')
if x < 2:
try:
1 / 0
except Exception:
self.log.exception()
self.assertEqual(handler.db.count_tickets(), 3)
tickets = handler.db.get_tickets()
self.assertEqual(len(tickets), 3)
self.assertEqual(tickets[0].level, logbook.INFO)
self.assertEqual(tickets[1].level, logbook.WARNING)
self.assertEqual(tickets[2].level, logbook.ERROR)
self.assertEqual(tickets[0].occurrence_count, 5)
self.assertEqual(tickets[1].occurrence_count, 5)
self.assertEqual(tickets[2].occurrence_count, 2)
self.assertEqual(tickets[0].last_occurrence.level, logbook.INFO)
tickets[0].solve()
self.assert_(tickets[0].solved)
tickets[0].delete()
ticket = handler.db.get_ticket(tickets[1].ticket_id)
self.assertEqual(ticket, tickets[1])
occurrences = handler.db.get_occurrences(tickets[2].ticket_id,
order_by='time')
self.assertEqual(len(occurrences), 2)
record = occurrences[0]
self.assertIn(__file_without_pyc__, record.filename)
# avoid 2to3 destroying our assertion
self.assertEqual(getattr(record, 'func_name'), 'test_basic_ticketing')
self.assertEqual(record.level, logbook.ERROR)
self.assertEqual(record.thread, get_ident())
self.assertEqual(record.process, os.getpid())
self.assertEqual(record.channel, 'testlogger')
self.assertIn('1 / 0', record.formatted_exception)
class HelperTestCase(LogbookTestCase):
def test_jsonhelper(self):
from logbook.helpers import to_safe_json
class Bogus(object):
def __str__(self):
return 'bogus'
rv = to_safe_json([
None,
'foo',
u('jäger'),
1,
datetime(2000, 1, 1),
{'jäger1': 1, u('jäger2'): 2, Bogus(): 3, 'invalid': object()},
object() # invalid
])
self.assertEqual(
rv, [None, u('foo'), u('jäger'), 1, '2000-01-01T00:00:00Z',
{u('jäger1'): 1, u('jäger2'): 2, u('bogus'): 3,
u('invalid'): None}, None])
def test_datehelpers(self):
from logbook.helpers import format_iso8601, parse_iso8601
now = datetime.now()
rv = format_iso8601()
self.assertEqual(rv[:4], str(now.year))
self.assertRaises(ValueError, parse_iso8601, 'foo')
v = parse_iso8601('2000-01-01T00:00:00.12Z')
self.assertEqual(v.microsecond, 120000)
v = parse_iso8601('2000-01-01T12:00:00+01:00')
self.assertEqual(v.hour, 11)
v = parse_iso8601('2000-01-01T12:00:00-01:00')
self.assertEqual(v.hour, 13)
class UnicodeTestCase(LogbookTestCase):
# in Py3 we can just assume a more uniform unicode environment
@require_py3
def test_default_format_unicode(self):
with capturing_stderr_context() as stream:
self.log.warn('\u2603')
self.assertIn('WARNING: testlogger: \u2603', stream.getvalue())
@require_py3
def test_default_format_encoded(self):
with capturing_stderr_context() as stream:
# it's a string but it's in the right encoding so don't barf
self.log.warn('\u2603')
self.assertIn('WARNING: testlogger: \u2603', stream.getvalue())
@require_py3
def test_default_format_bad_encoding(self):
with capturing_stderr_context() as stream:
# it's a string, is wrong, but just dump it in the logger,
# don't try to decode/encode it
self.log.warn('Русский'.encode('koi8-r'))
self.assertIn("WARNING: testlogger: b'\\xf2\\xd5\\xd3\\xd3\\xcb\\xc9\\xca'", stream.getvalue())
@require_py3
def test_custom_unicode_format_unicode(self):
format_string = ('[{record.level_name}] '
'{record.channel}: {record.message}')
with capturing_stderr_context() as stream:
with logbook.StderrHandler(format_string=format_string):
self.log.warn("\u2603")
self.assertIn('[WARNING] testlogger: \u2603', stream.getvalue())
@require_py3
def test_custom_string_format_unicode(self):
format_string = ('[{record.level_name}] '
'{record.channel}: {record.message}')
with capturing_stderr_context() as stream:
with logbook.StderrHandler(format_string=format_string):
self.log.warn('\u2603')
self.assertIn('[WARNING] testlogger: \u2603', stream.getvalue())
@require_py3
def test_unicode_message_encoded_params(self):
with capturing_stderr_context() as stream:
self.log.warn("\u2603 {0}", "\u2603".encode('utf8'))
self.assertIn("WARNING: testlogger: \u2603 b'\\xe2\\x98\\x83'", stream.getvalue())
@require_py3
def test_encoded_message_unicode_params(self):
with capturing_stderr_context() as stream:
self.log.warn('\u2603 {0}'.encode('utf8'), '\u2603')
self.assertIn('WARNING: testlogger: \u2603 \u2603', stream.getvalue())
| bsd-3-clause |
ctxis/canape | CANAPE.Scripting/Lib/distutils/sysconfig.py | 41 | 21356 | """Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <fdrake@acm.org>
"""
__revision__ = "$Id$"
import os
import re
import string
import sys
from distutils.errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCBuild9. If we're dealing with an x64 Windows build,
# it'll live in project/PCbuild/amd64.
project_base = os.path.dirname(os.path.abspath(sys.executable))
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
# Setup.local is available for Makefile builds including VPATH builds,
# Setup.dist is available on Windows
def _python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(project_base, "Modules", fn)):
return True
return False
python_build = _python_build()
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return sys.version[:3]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if python_build:
buildir = os.path.dirname(sys.executable)
if plat_specific:
# python.h is located in the buildir
inc_dir = buildir
else:
# the source dir is relative to the buildir
srcdir = os.path.abspath(os.path.join(buildir,
get_config_var('srcdir')))
# Include is located in the srcdir
inc_dir = os.path.join(srcdir, "Include")
return inc_dir
return os.path.join(prefix, "include", "python" + get_python_version())
elif os.name == "nt":
return os.path.join(prefix, "include")
elif os.name == "os2":
return os.path.join(prefix, "Include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
if get_python_version() < "2.2":
return prefix
else:
return os.path.join(prefix, "Lib", "site-packages")
elif os.name == "os2":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
(cc, cxx, opt, cflags, ccshared, ldshared, so_ext) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
if 'CC' in os.environ:
cc = os.environ['CC']
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc)
compiler.shared_lib_extension = so_ext
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(project_base, "PC")
else:
inc_dir = project_base
else:
inc_dir = get_python_inc(plat_specific=1)
if get_python_version() < '2.2':
config_h = 'config.h'
else:
# The name of the config.h file changed in 2.2
config_h = 'pyconfig.h'
return os.path.join(inc_dir, config_h)
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(os.path.dirname(sys.executable), "Makefile")
lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
return os.path.join(lib_dir, "config", "Makefile")
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while 1:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1)
if g is None:
g = {}
done = {}
notdone = {}
while 1:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while 1:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
g = {}
# load the installed Makefile:
try:
filename = get_makefile_filename()
parse_makefile(filename, g)
except IOError, msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# load the installed pyconfig.h:
try:
filename = get_config_h_filename()
parse_config_h(file(filename), g)
except IOError, msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# On MacOSX we need to check the setting of the environment variable
# MACOSX_DEPLOYMENT_TARGET: configure bases some choices on it so
# it needs to be compatible.
# If it isn't set we set it to the configure-time value
if sys.platform == 'darwin' and 'MACOSX_DEPLOYMENT_TARGET' in g:
cfg_target = g['MACOSX_DEPLOYMENT_TARGET']
cur_target = os.getenv('MACOSX_DEPLOYMENT_TARGET', '')
if cur_target == '':
cur_target = cfg_target
os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target)
elif map(int, cfg_target.split('.')) > map(int, cur_target.split('.')):
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'
% (cur_target, cfg_target))
raise DistutilsPlatformError(my_msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if python_build:
g['LDSHARED'] = g['BLDSHARED']
elif get_python_version() < '2.1':
# The following two branches are for 1.5.2 compatibility.
if sys.platform == 'aix4': # what about AIX 3.x ?
# Linker script is in the config directory, not in Modules as the
# Makefile says.
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp)
elif sys.platform == 'beos':
# Linker script is in the config directory. In the Makefile it is
# relative to the srcdir, which after installation no longer makes
# sense.
python_lib = get_python_lib(standard_lib=1)
linkerscript_path = string.split(g['LDSHARED'])[0]
linkerscript_name = os.path.basename(linkerscript_path)
linkerscript = os.path.join(python_lib, 'config',
linkerscript_name)
# XXX this isn't the right place to do this: adding the Python
# library to the link, if needed, should be in the "build_ext"
# command. (It's also needed for non-MS compilers on Windows, and
# it's taken care of for them by the 'build_ext.get_libraries()'
# method.)
g['LDSHARED'] = ("%s -L%s/lib -lpython%s" %
(linkerscript, PREFIX, get_python_version()))
global _config_vars
_config_vars = g
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def _init_os2():
"""Initialize the module as appropriate for OS/2"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_config_vars[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_config_vars[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
m = re.search('-isysroot\s+(\S+)', _config_vars['CFLAGS'])
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_config_vars[key] = flags
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
return get_config_vars().get(name)
| gpl-3.0 |
groschovskiy/personfinder | app/pytz/zoneinfo/GB_minus_Eire.py | 9 | 9525 | '''tzinfo timezone information for GB_minus_Eire.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class GB_minus_Eire(DstTzInfo):
'''GB_minus_Eire timezone definition. See datetime.tzinfo for details'''
zone = 'GB_minus_Eire'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1916,5,21,2,0,0),
d(1916,10,1,2,0,0),
d(1917,4,8,2,0,0),
d(1917,9,17,2,0,0),
d(1918,3,24,2,0,0),
d(1918,9,30,2,0,0),
d(1919,3,30,2,0,0),
d(1919,9,29,2,0,0),
d(1920,3,28,2,0,0),
d(1920,10,25,2,0,0),
d(1921,4,3,2,0,0),
d(1921,10,3,2,0,0),
d(1922,3,26,2,0,0),
d(1922,10,8,2,0,0),
d(1923,4,22,2,0,0),
d(1923,9,16,2,0,0),
d(1924,4,13,2,0,0),
d(1924,9,21,2,0,0),
d(1925,4,19,2,0,0),
d(1925,10,4,2,0,0),
d(1926,4,18,2,0,0),
d(1926,10,3,2,0,0),
d(1927,4,10,2,0,0),
d(1927,10,2,2,0,0),
d(1928,4,22,2,0,0),
d(1928,10,7,2,0,0),
d(1929,4,21,2,0,0),
d(1929,10,6,2,0,0),
d(1930,4,13,2,0,0),
d(1930,10,5,2,0,0),
d(1931,4,19,2,0,0),
d(1931,10,4,2,0,0),
d(1932,4,17,2,0,0),
d(1932,10,2,2,0,0),
d(1933,4,9,2,0,0),
d(1933,10,8,2,0,0),
d(1934,4,22,2,0,0),
d(1934,10,7,2,0,0),
d(1935,4,14,2,0,0),
d(1935,10,6,2,0,0),
d(1936,4,19,2,0,0),
d(1936,10,4,2,0,0),
d(1937,4,18,2,0,0),
d(1937,10,3,2,0,0),
d(1938,4,10,2,0,0),
d(1938,10,2,2,0,0),
d(1939,4,16,2,0,0),
d(1939,11,19,2,0,0),
d(1940,2,25,2,0,0),
d(1941,5,4,1,0,0),
d(1941,8,10,1,0,0),
d(1942,4,5,1,0,0),
d(1942,8,9,1,0,0),
d(1943,4,4,1,0,0),
d(1943,8,15,1,0,0),
d(1944,4,2,1,0,0),
d(1944,9,17,1,0,0),
d(1945,4,2,1,0,0),
d(1945,7,15,1,0,0),
d(1945,10,7,2,0,0),
d(1946,4,14,2,0,0),
d(1946,10,6,2,0,0),
d(1947,3,16,2,0,0),
d(1947,4,13,1,0,0),
d(1947,8,10,1,0,0),
d(1947,11,2,2,0,0),
d(1948,3,14,2,0,0),
d(1948,10,31,2,0,0),
d(1949,4,3,2,0,0),
d(1949,10,30,2,0,0),
d(1950,4,16,2,0,0),
d(1950,10,22,2,0,0),
d(1951,4,15,2,0,0),
d(1951,10,21,2,0,0),
d(1952,4,20,2,0,0),
d(1952,10,26,2,0,0),
d(1953,4,19,2,0,0),
d(1953,10,4,2,0,0),
d(1954,4,11,2,0,0),
d(1954,10,3,2,0,0),
d(1955,4,17,2,0,0),
d(1955,10,2,2,0,0),
d(1956,4,22,2,0,0),
d(1956,10,7,2,0,0),
d(1957,4,14,2,0,0),
d(1957,10,6,2,0,0),
d(1958,4,20,2,0,0),
d(1958,10,5,2,0,0),
d(1959,4,19,2,0,0),
d(1959,10,4,2,0,0),
d(1960,4,10,2,0,0),
d(1960,10,2,2,0,0),
d(1961,3,26,2,0,0),
d(1961,10,29,2,0,0),
d(1962,3,25,2,0,0),
d(1962,10,28,2,0,0),
d(1963,3,31,2,0,0),
d(1963,10,27,2,0,0),
d(1964,3,22,2,0,0),
d(1964,10,25,2,0,0),
d(1965,3,21,2,0,0),
d(1965,10,24,2,0,0),
d(1966,3,20,2,0,0),
d(1966,10,23,2,0,0),
d(1967,3,19,2,0,0),
d(1967,10,29,2,0,0),
d(1968,2,18,2,0,0),
d(1968,10,26,23,0,0),
d(1971,10,31,2,0,0),
d(1972,3,19,2,0,0),
d(1972,10,29,2,0,0),
d(1973,3,18,2,0,0),
d(1973,10,28,2,0,0),
d(1974,3,17,2,0,0),
d(1974,10,27,2,0,0),
d(1975,3,16,2,0,0),
d(1975,10,26,2,0,0),
d(1976,3,21,2,0,0),
d(1976,10,24,2,0,0),
d(1977,3,20,2,0,0),
d(1977,10,23,2,0,0),
d(1978,3,19,2,0,0),
d(1978,10,29,2,0,0),
d(1979,3,18,2,0,0),
d(1979,10,28,2,0,0),
d(1980,3,16,2,0,0),
d(1980,10,26,2,0,0),
d(1981,3,29,1,0,0),
d(1981,10,25,1,0,0),
d(1982,3,28,1,0,0),
d(1982,10,24,1,0,0),
d(1983,3,27,1,0,0),
d(1983,10,23,1,0,0),
d(1984,3,25,1,0,0),
d(1984,10,28,1,0,0),
d(1985,3,31,1,0,0),
d(1985,10,27,1,0,0),
d(1986,3,30,1,0,0),
d(1986,10,26,1,0,0),
d(1987,3,29,1,0,0),
d(1987,10,25,1,0,0),
d(1988,3,27,1,0,0),
d(1988,10,23,1,0,0),
d(1989,3,26,1,0,0),
d(1989,10,29,1,0,0),
d(1990,3,25,1,0,0),
d(1990,10,28,1,0,0),
d(1991,3,31,1,0,0),
d(1991,10,27,1,0,0),
d(1992,3,29,1,0,0),
d(1992,10,25,1,0,0),
d(1993,3,28,1,0,0),
d(1993,10,24,1,0,0),
d(1994,3,27,1,0,0),
d(1994,10,23,1,0,0),
d(1995,3,26,1,0,0),
d(1995,10,22,1,0,0),
d(1996,1,1,0,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(3600,0,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
]
GB_minus_Eire = GB_minus_Eire()
| apache-2.0 |
berrange/nova | nova/scheduler/driver.py | 7 | 5070 | # Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler base class that all Schedulers should inherit from
"""
import sys
from oslo.config import cfg
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.i18n import _, _LW
from nova import notifications
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import rpc
from nova import servicegroup
LOG = logging.getLogger(__name__)
scheduler_driver_opts = [
cfg.StrOpt('scheduler_host_manager',
default='nova.scheduler.host_manager.HostManager',
help='The scheduler host manager class to use'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
def handle_schedule_error(context, ex, instance_uuid, request_spec):
"""On run_instance failure, update instance state and
send notifications.
"""
if isinstance(ex, exception.NoValidHost):
LOG.warning(_LW("NoValidHost exception with message: \'%s\'"),
ex.format_message().strip(),
instance_uuid=instance_uuid)
else:
LOG.exception(_("Exception during scheduler.run_instance"))
state = vm_states.ERROR.upper()
LOG.warning(_LW('Setting instance to %s state.'), state,
instance_uuid=instance_uuid)
(old_ref, new_ref) = db.instance_update_and_get_original(context,
instance_uuid, {'vm_state': vm_states.ERROR,
'task_state': None})
notifications.send_update(context, old_ref, new_ref,
service="scheduler")
compute_utils.add_instance_fault_from_exc(context,
new_ref, ex, sys.exc_info())
properties = request_spec.get('instance_properties', {})
payload = dict(request_spec=request_spec,
instance_properties=properties,
instance_id=instance_uuid,
state=vm_states.ERROR,
method='run_instance',
reason=ex)
rpc.get_notifier('scheduler').error(context,
'scheduler.run_instance', payload)
def instance_update_db(context, instance_uuid, extra_values=None):
"""Clear the host and node - set the scheduled_at field of an Instance.
:returns: An Instance with the updated fields set properly.
"""
now = timeutils.utcnow()
values = {'host': None, 'node': None, 'scheduled_at': now}
if extra_values:
values.update(extra_values)
return db.instance_update(context, instance_uuid, values)
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
def __init__(self):
self.host_manager = importutils.import_object(
CONF.scheduler_host_manager)
self.servicegroup_api = servicegroup.API()
def run_periodic_tasks(self, context):
"""Manager calls this so drivers can perform periodic tasks."""
pass
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
services = db.service_get_all_by_topic(context, topic)
return [service['host']
for service in services
if self.servicegroup_api.service_is_up(service)]
# NOTE(alaski): Remove this method when the scheduler rpc interface is
# bumped to 4.x as it is no longer used.
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties, legacy_bdm_in_spec):
"""Must override schedule_run_instance method for scheduler to work."""
msg = _("Driver must implement schedule_run_instance")
raise NotImplementedError(msg)
def select_destinations(self, context, request_spec, filter_properties):
"""Must override select_destinations method.
:return: A list of dicts with 'host', 'nodename' and 'limits' as keys
that satisfies the request_spec and filter_properties.
"""
msg = _("Driver must implement select_destinations")
raise NotImplementedError(msg)
| apache-2.0 |
HyperBaton/ansible | lib/ansible/modules/network/fortios/fortios_system_alias.py | 7 | 9082 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_alias
short_description: Configure alias command in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and alias category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_alias:
description:
- Configure alias command.
default: null
type: dict
suboptions:
command:
description:
- Command list to execute.
type: str
name:
description:
- Alias command name.
required: true
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure alias command.
fortios_system_alias:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_alias:
command: "<your_own_value>"
name: "default_name_4"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_alias_data(json):
option_list = ['command', 'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_alias(data, fos):
vdom = data['vdom']
state = data['state']
system_alias_data = data['system_alias']
filtered_data = underscore_to_hyphen(filter_system_alias_data(system_alias_data))
if state == "present":
return fos.set('system',
'alias',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'alias',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_alias']:
resp = system_alias(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_alias": {
"required": False, "type": "dict", "default": None,
"options": {
"command": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/lib-tk/Tix.py | 50 | 74119 | # -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
#
# $Id: Tix.py 63487 2008-05-20 07:13:37Z georg.brandl $
#
# Tix.py -- Tix widget wrappers.
#
# For Tix, see http://tix.sourceforge.net
#
# - Sudhir Shenoy (sshenoy@gol.com), Dec. 1995.
# based on an idea of Jean-Marc Lugrin (lugrin@ms.com)
#
# NOTE: In order to minimize changes to Tkinter.py, some of the code here
# (TixWidget.__init__) has been taken from Tkinter (Widget.__init__)
# and will break if there are major changes in Tkinter.
#
# The Tix widgets are represented by a class hierarchy in python with proper
# inheritance of base classes.
#
# As a result after creating a 'w = StdButtonBox', I can write
# w.ok['text'] = 'Who Cares'
# or w.ok['bg'] = w['bg']
# or even w.ok.invoke()
# etc.
#
# Compare the demo tixwidgets.py to the original Tcl program and you will
# appreciate the advantages.
#
from Tkinter import *
from Tkinter import _flatten, _cnfmerge, _default_root
# WARNING - TkVersion is a limited precision floating point number
if TkVersion < 3.999:
raise ImportError, "This version of Tix.py requires Tk 4.0 or higher"
import _tkinter # If this fails your Python may not be configured for Tk
# Some more constants (for consistency with Tkinter)
WINDOW = 'window'
TEXT = 'text'
STATUS = 'status'
IMMEDIATE = 'immediate'
IMAGE = 'image'
IMAGETEXT = 'imagetext'
BALLOON = 'balloon'
AUTO = 'auto'
ACROSSTOP = 'acrosstop'
# Some constants used by Tkinter dooneevent()
TCL_DONT_WAIT = 1 << 1
TCL_WINDOW_EVENTS = 1 << 2
TCL_FILE_EVENTS = 1 << 3
TCL_TIMER_EVENTS = 1 << 4
TCL_IDLE_EVENTS = 1 << 5
TCL_ALL_EVENTS = 0
# BEWARE - this is implemented by copying some code from the Widget class
# in Tkinter (to override Widget initialization) and is therefore
# liable to break.
import Tkinter, os
# Could probably add this to Tkinter.Misc
class tixCommand:
"""The tix commands provide access to miscellaneous elements
of Tix's internal state and the Tix application context.
Most of the information manipulated by these commands pertains
to the application as a whole, or to a screen or
display, rather than to a particular window.
This is a mixin class, assumed to be mixed to Tkinter.Tk
that supports the self.tk.call method.
"""
def tix_addbitmapdir(self, directory):
"""Tix maintains a list of directories under which
the tix_getimage and tix_getbitmap commands will
search for image files. The standard bitmap directory
is $TIX_LIBRARY/bitmaps. The addbitmapdir command
adds directory into this list. By using this
command, the image files of an applications can
also be located using the tix_getimage or tix_getbitmap
command.
"""
return self.tk.call('tix', 'addbitmapdir', directory)
def tix_cget(self, option):
"""Returns the current value of the configuration
option given by option. Option may be any of the
options described in the CONFIGURATION OPTIONS section.
"""
return self.tk.call('tix', 'cget', option)
def tix_configure(self, cnf=None, **kw):
"""Query or modify the configuration options of the Tix application
context. If no option is specified, returns a dictionary all of the
available options. If option is specified with no value, then the
command returns a list describing the one named option (this list
will be identical to the corresponding sublist of the value
returned if no option is specified). If one or more option-value
pairs are specified, then the command modifies the given option(s)
to have the given value(s); in this case the command returns an
empty string. Option may be any of the configuration options.
"""
# Copied from Tkinter.py
if kw:
cnf = _cnfmerge((cnf, kw))
elif cnf:
cnf = _cnfmerge(cnf)
if cnf is None:
cnf = {}
for x in self.tk.split(self.tk.call('tix', 'configure')):
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
if isinstance(cnf, StringType):
x = self.tk.split(self.tk.call('tix', 'configure', '-'+cnf))
return (x[0][1:],) + x[1:]
return self.tk.call(('tix', 'configure') + self._options(cnf))
def tix_filedialog(self, dlgclass=None):
"""Returns the file selection dialog that may be shared among
different calls from this application. This command will create a
file selection dialog widget when it is called the first time. This
dialog will be returned by all subsequent calls to tix_filedialog.
An optional dlgclass parameter can be passed to specified what type
of file selection dialog widget is desired. Possible options are
tix FileSelectDialog or tixExFileSelectDialog.
"""
if dlgclass is not None:
return self.tk.call('tix', 'filedialog', dlgclass)
else:
return self.tk.call('tix', 'filedialog')
def tix_getbitmap(self, name):
"""Locates a bitmap file of the name name.xpm or name in one of the
bitmap directories (see the tix_addbitmapdir command above). By
using tix_getbitmap, you can avoid hard coding the pathnames of the
bitmap files in your application. When successful, it returns the
complete pathname of the bitmap file, prefixed with the character
'@'. The returned value can be used to configure the -bitmap
option of the TK and Tix widgets.
"""
return self.tk.call('tix', 'getbitmap', name)
def tix_getimage(self, name):
"""Locates an image file of the name name.xpm, name.xbm or name.ppm
in one of the bitmap directories (see the addbitmapdir command
above). If more than one file with the same name (but different
extensions) exist, then the image type is chosen according to the
depth of the X display: xbm images are chosen on monochrome
displays and color images are chosen on color displays. By using
tix_ getimage, you can advoid hard coding the pathnames of the
image files in your application. When successful, this command
returns the name of the newly created image, which can be used to
configure the -image option of the Tk and Tix widgets.
"""
return self.tk.call('tix', 'getimage', name)
def tix_option_get(self, name):
"""Gets the options manitained by the Tix
scheme mechanism. Available options include:
active_bg active_fg bg
bold_font dark1_bg dark1_fg
dark2_bg dark2_fg disabled_fg
fg fixed_font font
inactive_bg inactive_fg input1_bg
input2_bg italic_font light1_bg
light1_fg light2_bg light2_fg
menu_font output1_bg output2_bg
select_bg select_fg selector
"""
# could use self.tk.globalgetvar('tixOption', name)
return self.tk.call('tix', 'option', 'get', name)
def tix_resetoptions(self, newScheme, newFontSet, newScmPrio=None):
"""Resets the scheme and fontset of the Tix application to
newScheme and newFontSet, respectively. This affects only those
widgets created after this call. Therefore, it is best to call the
resetoptions command before the creation of any widgets in a Tix
application.
The optional parameter newScmPrio can be given to reset the
priority level of the Tk options set by the Tix schemes.
Because of the way Tk handles the X option database, after Tix has
been has imported and inited, it is not possible to reset the color
schemes and font sets using the tix config command. Instead, the
tix_resetoptions command must be used.
"""
if newScmPrio is not None:
return self.tk.call('tix', 'resetoptions', newScheme, newFontSet, newScmPrio)
else:
return self.tk.call('tix', 'resetoptions', newScheme, newFontSet)
class Tk(Tkinter.Tk, tixCommand):
"""Toplevel widget of Tix which represents mostly the main window
of an application. It has an associated Tcl interpreter."""
def __init__(self, screenName=None, baseName=None, className='Tix'):
Tkinter.Tk.__init__(self, screenName, baseName, className)
tixlib = os.environ.get('TIX_LIBRARY')
self.tk.eval('global auto_path; lappend auto_path [file dir [info nameof]]')
if tixlib is not None:
self.tk.eval('global auto_path; lappend auto_path {%s}' % tixlib)
self.tk.eval('global tcl_pkgPath; lappend tcl_pkgPath {%s}' % tixlib)
# Load Tix - this should work dynamically or statically
# If it's static, tcl/tix8.1/pkgIndex.tcl should have
# 'load {} Tix'
# If it's dynamic under Unix, tcl/tix8.1/pkgIndex.tcl should have
# 'load libtix8.1.8.3.so Tix'
self.tk.eval('package require Tix')
def destroy(self):
# For safety, remove an delete_window binding before destroy
self.protocol("WM_DELETE_WINDOW", "")
Tkinter.Tk.destroy(self)
# The Tix 'tixForm' geometry manager
class Form:
"""The Tix Form geometry manager
Widgets can be arranged by specifying attachments to other widgets.
See Tix documentation for complete details"""
def config(self, cnf={}, **kw):
self.tk.call('tixForm', self._w, *self._options(cnf, kw))
form = config
def __setitem__(self, key, value):
Form.form(self, {key: value})
def check(self):
return self.tk.call('tixForm', 'check', self._w)
def forget(self):
self.tk.call('tixForm', 'forget', self._w)
def grid(self, xsize=0, ysize=0):
if (not xsize) and (not ysize):
x = self.tk.call('tixForm', 'grid', self._w)
y = self.tk.splitlist(x)
z = ()
for x in y:
z = z + (self.tk.getint(x),)
return z
return self.tk.call('tixForm', 'grid', self._w, xsize, ysize)
def info(self, option=None):
if not option:
return self.tk.call('tixForm', 'info', self._w)
if option[0] != '-':
option = '-' + option
return self.tk.call('tixForm', 'info', self._w, option)
def slaves(self):
return map(self._nametowidget,
self.tk.splitlist(
self.tk.call(
'tixForm', 'slaves', self._w)))
Tkinter.Widget.__bases__ = Tkinter.Widget.__bases__ + (Form,)
class TixWidget(Tkinter.Widget):
"""A TixWidget class is used to package all (or most) Tix widgets.
Widget initialization is extended in two ways:
1) It is possible to give a list of options which must be part of
the creation command (so called Tix 'static' options). These cannot be
given as a 'config' command later.
2) It is possible to give the name of an existing TK widget. These are
child widgets created automatically by a Tix mega-widget. The Tk call
to create these widgets is therefore bypassed in TixWidget.__init__
Both options are for use by subclasses only.
"""
def __init__ (self, master=None, widgetName=None,
static_options=None, cnf={}, kw={}):
# Merge keywords and dictionary arguments
if kw:
cnf = _cnfmerge((cnf, kw))
else:
cnf = _cnfmerge(cnf)
# Move static options into extra. static_options must be
# a list of keywords (or None).
extra=()
# 'options' is always a static option
if static_options:
static_options.append('options')
else:
static_options = ['options']
for k,v in cnf.items()[:]:
if k in static_options:
extra = extra + ('-' + k, v)
del cnf[k]
self.widgetName = widgetName
Widget._setup(self, master, cnf)
# If widgetName is None, this is a dummy creation call where the
# corresponding Tk widget has already been created by Tix
if widgetName:
self.tk.call(widgetName, self._w, *extra)
# Non-static options - to be done via a 'config' command
if cnf:
Widget.config(self, cnf)
# Dictionary to hold subwidget names for easier access. We can't
# use the children list because the public Tix names may not be the
# same as the pathname component
self.subwidget_list = {}
# We set up an attribute access function so that it is possible to
# do w.ok['text'] = 'Hello' rather than w.subwidget('ok')['text'] = 'Hello'
# when w is a StdButtonBox.
# We can even do w.ok.invoke() because w.ok is subclassed from the
# Button class if you go through the proper constructors
def __getattr__(self, name):
if self.subwidget_list.has_key(name):
return self.subwidget_list[name]
raise AttributeError, name
def set_silent(self, value):
"""Set a variable without calling its action routine"""
self.tk.call('tixSetSilent', self._w, value)
def subwidget(self, name):
"""Return the named subwidget (which must have been created by
the sub-class)."""
n = self._subwidget_name(name)
if not n:
raise TclError, "Subwidget " + name + " not child of " + self._name
# Remove header of name and leading dot
n = n[len(self._w)+1:]
return self._nametowidget(n)
def subwidgets_all(self):
"""Return all subwidgets."""
names = self._subwidget_names()
if not names:
return []
retlist = []
for name in names:
name = name[len(self._w)+1:]
try:
retlist.append(self._nametowidget(name))
except:
# some of the widgets are unknown e.g. border in LabelFrame
pass
return retlist
def _subwidget_name(self,name):
"""Get a subwidget name (returns a String, not a Widget !)"""
try:
return self.tk.call(self._w, 'subwidget', name)
except TclError:
return None
def _subwidget_names(self):
"""Return the name of all subwidgets."""
try:
x = self.tk.call(self._w, 'subwidgets', '-all')
return self.tk.split(x)
except TclError:
return None
def config_all(self, option, value):
"""Set configuration options for all subwidgets (and self)."""
if option == '':
return
elif not isinstance(option, StringType):
option = repr(option)
if not isinstance(value, StringType):
value = repr(value)
names = self._subwidget_names()
for name in names:
self.tk.call(name, 'configure', '-' + option, value)
# These are missing from Tkinter
def image_create(self, imgtype, cnf={}, master=None, **kw):
if not master:
master = Tkinter._default_root
if not master:
raise RuntimeError, 'Too early to create image'
if kw and cnf: cnf = _cnfmerge((cnf, kw))
elif kw: cnf = kw
options = ()
for k, v in cnf.items():
if callable(v):
v = self._register(v)
options = options + ('-'+k, v)
return master.tk.call(('image', 'create', imgtype,) + options)
def image_delete(self, imgname):
try:
self.tk.call('image', 'delete', imgname)
except TclError:
# May happen if the root was destroyed
pass
# Subwidgets are child widgets created automatically by mega-widgets.
# In python, we have to create these subwidgets manually to mirror their
# existence in Tk/Tix.
class TixSubWidget(TixWidget):
"""Subwidget class.
This is used to mirror child widgets automatically created
by Tix/Tk as part of a mega-widget in Python (which is not informed
of this)"""
def __init__(self, master, name,
destroy_physically=1, check_intermediate=1):
if check_intermediate:
path = master._subwidget_name(name)
try:
path = path[len(master._w)+1:]
plist = path.split('.')
except:
plist = []
if not check_intermediate:
# immediate descendant
TixWidget.__init__(self, master, None, None, {'name' : name})
else:
# Ensure that the intermediate widgets exist
parent = master
for i in range(len(plist) - 1):
n = '.'.join(plist[:i+1])
try:
w = master._nametowidget(n)
parent = w
except KeyError:
# Create the intermediate widget
parent = TixSubWidget(parent, plist[i],
destroy_physically=0,
check_intermediate=0)
# The Tk widget name is in plist, not in name
if plist:
name = plist[-1]
TixWidget.__init__(self, parent, None, None, {'name' : name})
self.destroy_physically = destroy_physically
def destroy(self):
# For some widgets e.g., a NoteBook, when we call destructors,
# we must be careful not to destroy the frame widget since this
# also destroys the parent NoteBook thus leading to an exception
# in Tkinter when it finally calls Tcl to destroy the NoteBook
for c in self.children.values(): c.destroy()
if self.master.children.has_key(self._name):
del self.master.children[self._name]
if self.master.subwidget_list.has_key(self._name):
del self.master.subwidget_list[self._name]
if self.destroy_physically:
# This is bypassed only for a few widgets
self.tk.call('destroy', self._w)
# Useful func. to split Tcl lists and return as a dict. From Tkinter.py
def _lst2dict(lst):
dict = {}
for x in lst:
dict[x[0][1:]] = (x[0][1:],) + x[1:]
return dict
# Useful class to create a display style - later shared by many items.
# Contributed by Steffen Kremser
class DisplayStyle:
"""DisplayStyle - handle configuration options shared by
(multiple) Display Items"""
def __init__(self, itemtype, cnf={}, **kw):
master = _default_root # global from Tkinter
if not master and cnf.has_key('refwindow'): master=cnf['refwindow']
elif not master and kw.has_key('refwindow'): master= kw['refwindow']
elif not master: raise RuntimeError, "Too early to create display style: no root window"
self.tk = master.tk
self.stylename = self.tk.call('tixDisplayStyle', itemtype,
*self._options(cnf,kw) )
def __str__(self):
return self.stylename
def _options(self, cnf, kw):
if kw and cnf:
cnf = _cnfmerge((cnf, kw))
elif kw:
cnf = kw
opts = ()
for k, v in cnf.items():
opts = opts + ('-'+k, v)
return opts
def delete(self):
self.tk.call(self.stylename, 'delete')
def __setitem__(self,key,value):
self.tk.call(self.stylename, 'configure', '-%s'%key, value)
def config(self, cnf={}, **kw):
return _lst2dict(
self.tk.split(
self.tk.call(
self.stylename, 'configure', *self._options(cnf,kw))))
def __getitem__(self,key):
return self.tk.call(self.stylename, 'cget', '-%s'%key)
######################################################
### The Tix Widget classes - in alphabetical order ###
######################################################
class Balloon(TixWidget):
"""Balloon help widget.
Subwidget Class
--------- -----
label Label
message Message"""
# FIXME: It should inherit -superclass tixShell
def __init__(self, master=None, cnf={}, **kw):
# static seem to be -installcolormap -initwait -statusbar -cursor
static = ['options', 'installcolormap', 'initwait', 'statusbar',
'cursor']
TixWidget.__init__(self, master, 'tixBalloon', static, cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label',
destroy_physically=0)
self.subwidget_list['message'] = _dummyLabel(self, 'message',
destroy_physically=0)
def bind_widget(self, widget, cnf={}, **kw):
"""Bind balloon widget to another.
One balloon widget may be bound to several widgets at the same time"""
self.tk.call(self._w, 'bind', widget._w, *self._options(cnf, kw))
def unbind_widget(self, widget):
self.tk.call(self._w, 'unbind', widget._w)
class ButtonBox(TixWidget):
"""ButtonBox - A container for pushbuttons.
Subwidgets are the buttons added with the add method.
"""
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixButtonBox',
['orientation', 'options'], cnf, kw)
def add(self, name, cnf={}, **kw):
"""Add a button with given name to box."""
btn = self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = _dummyButton(self, name)
return btn
def invoke(self, name):
if self.subwidget_list.has_key(name):
self.tk.call(self._w, 'invoke', name)
class ComboBox(TixWidget):
"""ComboBox - an Entry field with a dropdown menu. The user can select a
choice by either typing in the entry subwdget or selecting from the
listbox subwidget.
Subwidget Class
--------- -----
entry Entry
arrow Button
slistbox ScrolledListBox
tick Button
cross Button : present if created with the fancy option"""
# FIXME: It should inherit -superclass tixLabelWidget
def __init__ (self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixComboBox',
['editable', 'dropdown', 'fancy', 'options'],
cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
'slistbox')
try:
self.subwidget_list['tick'] = _dummyButton(self, 'tick')
self.subwidget_list['cross'] = _dummyButton(self, 'cross')
except TypeError:
# unavailable when -fancy not specified
pass
# align
def add_history(self, str):
self.tk.call(self._w, 'addhistory', str)
def append_history(self, str):
self.tk.call(self._w, 'appendhistory', str)
def insert(self, index, str):
self.tk.call(self._w, 'insert', index, str)
def pick(self, index):
self.tk.call(self._w, 'pick', index)
class Control(TixWidget):
"""Control - An entry field with value change arrows. The user can
adjust the value by pressing the two arrow buttons or by entering
the value directly into the entry. The new value will be checked
against the user-defined upper and lower limits.
Subwidget Class
--------- -----
incr Button
decr Button
entry Entry
label Label"""
# FIXME: It should inherit -superclass tixLabelWidget
def __init__ (self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixControl', ['options'], cnf, kw)
self.subwidget_list['incr'] = _dummyButton(self, 'incr')
self.subwidget_list['decr'] = _dummyButton(self, 'decr')
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
def decrement(self):
self.tk.call(self._w, 'decr')
def increment(self):
self.tk.call(self._w, 'incr')
def invoke(self):
self.tk.call(self._w, 'invoke')
def update(self):
self.tk.call(self._w, 'update')
class DirList(TixWidget):
"""DirList - displays a list view of a directory, its previous
directories and its sub-directories. The user can choose one of
the directories displayed in the list or change to another directory.
Subwidget Class
--------- -----
hlist HList
hsb Scrollbar
vsb Scrollbar"""
# FIXME: It should inherit -superclass tixScrolledHList
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirList', ['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def chdir(self, dir):
self.tk.call(self._w, 'chdir', dir)
class DirTree(TixWidget):
"""DirTree - Directory Listing in a hierarchical view.
Displays a tree view of a directory, its previous directories and its
sub-directories. The user can choose one of the directories displayed
in the list or change to another directory.
Subwidget Class
--------- -----
hlist HList
hsb Scrollbar
vsb Scrollbar"""
# FIXME: It should inherit -superclass tixScrolledHList
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirTree', ['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def chdir(self, dir):
self.tk.call(self._w, 'chdir', dir)
class DirSelectBox(TixWidget):
"""DirSelectBox - Motif style file select box.
It is generally used for
the user to choose a file. FileSelectBox stores the files mostly
recently selected into a ComboBox widget so that they can be quickly
selected again.
Subwidget Class
--------- -----
selection ComboBox
filter ComboBox
dirlist ScrolledListBox
filelist ScrolledListBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirSelectBox', ['options'], cnf, kw)
self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
class ExFileSelectBox(TixWidget):
"""ExFileSelectBox - MS Windows style file select box.
It provides an convenient method for the user to select files.
Subwidget Class
--------- -----
cancel Button
ok Button
hidden Checkbutton
types ComboBox
dir ComboBox
file ComboBox
dirlist ScrolledListBox
filelist ScrolledListBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixExFileSelectBox', ['options'], cnf, kw)
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
self.subwidget_list['types'] = _dummyComboBox(self, 'types')
self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
self.subwidget_list['file'] = _dummyComboBox(self, 'file')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
def filter(self):
self.tk.call(self._w, 'filter')
def invoke(self):
self.tk.call(self._w, 'invoke')
# Should inherit from a Dialog class
class DirSelectDialog(TixWidget):
"""The DirSelectDialog widget presents the directories in the file
system in a dialog window. The user can use this dialog window to
navigate through the file system to select the desired directory.
Subwidgets Class
---------- -----
dirbox DirSelectDialog"""
# FIXME: It should inherit -superclass tixDialogShell
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirSelectDialog',
['options'], cnf, kw)
self.subwidget_list['dirbox'] = _dummyDirSelectBox(self, 'dirbox')
# cancel and ok buttons are missing
def popup(self):
self.tk.call(self._w, 'popup')
def popdown(self):
self.tk.call(self._w, 'popdown')
# Should inherit from a Dialog class
class ExFileSelectDialog(TixWidget):
"""ExFileSelectDialog - MS Windows style file select dialog.
It provides an convenient method for the user to select files.
Subwidgets Class
---------- -----
fsbox ExFileSelectBox"""
# FIXME: It should inherit -superclass tixDialogShell
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixExFileSelectDialog',
['options'], cnf, kw)
self.subwidget_list['fsbox'] = _dummyExFileSelectBox(self, 'fsbox')
def popup(self):
self.tk.call(self._w, 'popup')
def popdown(self):
self.tk.call(self._w, 'popdown')
class FileSelectBox(TixWidget):
"""ExFileSelectBox - Motif style file select box.
It is generally used for
the user to choose a file. FileSelectBox stores the files mostly
recently selected into a ComboBox widget so that they can be quickly
selected again.
Subwidget Class
--------- -----
selection ComboBox
filter ComboBox
dirlist ScrolledListBox
filelist ScrolledListBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixFileSelectBox', ['options'], cnf, kw)
self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
def apply_filter(self): # name of subwidget is same as command
self.tk.call(self._w, 'filter')
def invoke(self):
self.tk.call(self._w, 'invoke')
# Should inherit from a Dialog class
class FileSelectDialog(TixWidget):
"""FileSelectDialog - Motif style file select dialog.
Subwidgets Class
---------- -----
btns StdButtonBox
fsbox FileSelectBox"""
# FIXME: It should inherit -superclass tixStdDialogShell
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixFileSelectDialog',
['options'], cnf, kw)
self.subwidget_list['btns'] = _dummyStdButtonBox(self, 'btns')
self.subwidget_list['fsbox'] = _dummyFileSelectBox(self, 'fsbox')
def popup(self):
self.tk.call(self._w, 'popup')
def popdown(self):
self.tk.call(self._w, 'popdown')
class FileEntry(TixWidget):
"""FileEntry - Entry field with button that invokes a FileSelectDialog.
The user can type in the filename manually. Alternatively, the user can
press the button widget that sits next to the entry, which will bring
up a file selection dialog.
Subwidgets Class
---------- -----
button Button
entry Entry"""
# FIXME: It should inherit -superclass tixLabelWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixFileEntry',
['dialogtype', 'options'], cnf, kw)
self.subwidget_list['button'] = _dummyButton(self, 'button')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
def invoke(self):
self.tk.call(self._w, 'invoke')
def file_dialog(self):
# FIXME: return python object
pass
class HList(TixWidget):
"""HList - Hierarchy display widget can be used to display any data
that have a hierarchical structure, for example, file system directory
trees. The list entries are indented and connected by branch lines
according to their places in the hierachy.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixHList',
['columns', 'options'], cnf, kw)
def add(self, entry, cnf={}, **kw):
return self.tk.call(self._w, 'add', entry, *self._options(cnf, kw))
def add_child(self, parent=None, cnf={}, **kw):
if not parent:
parent = ''
return self.tk.call(
self._w, 'addchild', parent, *self._options(cnf, kw))
def anchor_set(self, entry):
self.tk.call(self._w, 'anchor', 'set', entry)
def anchor_clear(self):
self.tk.call(self._w, 'anchor', 'clear')
def column_width(self, col=0, width=None, chars=None):
if not chars:
return self.tk.call(self._w, 'column', 'width', col, width)
else:
return self.tk.call(self._w, 'column', 'width', col,
'-char', chars)
def delete_all(self):
self.tk.call(self._w, 'delete', 'all')
def delete_entry(self, entry):
self.tk.call(self._w, 'delete', 'entry', entry)
def delete_offsprings(self, entry):
self.tk.call(self._w, 'delete', 'offsprings', entry)
def delete_siblings(self, entry):
self.tk.call(self._w, 'delete', 'siblings', entry)
def dragsite_set(self, index):
self.tk.call(self._w, 'dragsite', 'set', index)
def dragsite_clear(self):
self.tk.call(self._w, 'dragsite', 'clear')
def dropsite_set(self, index):
self.tk.call(self._w, 'dropsite', 'set', index)
def dropsite_clear(self):
self.tk.call(self._w, 'dropsite', 'clear')
def header_create(self, col, cnf={}, **kw):
self.tk.call(self._w, 'header', 'create', col, *self._options(cnf, kw))
def header_configure(self, col, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'header', 'configure', col)))
self.tk.call(self._w, 'header', 'configure', col,
*self._options(cnf, kw))
def header_cget(self, col, opt):
return self.tk.call(self._w, 'header', 'cget', col, opt)
def header_exists(self, col):
return self.tk.call(self._w, 'header', 'exists', col)
def header_delete(self, col):
self.tk.call(self._w, 'header', 'delete', col)
def header_size(self, col):
return self.tk.call(self._w, 'header', 'size', col)
def hide_entry(self, entry):
self.tk.call(self._w, 'hide', 'entry', entry)
def indicator_create(self, entry, cnf={}, **kw):
self.tk.call(
self._w, 'indicator', 'create', entry, *self._options(cnf, kw))
def indicator_configure(self, entry, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'indicator', 'configure', entry)))
self.tk.call(
self._w, 'indicator', 'configure', entry, *self._options(cnf, kw))
def indicator_cget(self, entry, opt):
return self.tk.call(self._w, 'indicator', 'cget', entry, opt)
def indicator_exists(self, entry):
return self.tk.call (self._w, 'indicator', 'exists', entry)
def indicator_delete(self, entry):
self.tk.call(self._w, 'indicator', 'delete', entry)
def indicator_size(self, entry):
return self.tk.call(self._w, 'indicator', 'size', entry)
def info_anchor(self):
return self.tk.call(self._w, 'info', 'anchor')
def info_children(self, entry=None):
c = self.tk.call(self._w, 'info', 'children', entry)
return self.tk.splitlist(c)
def info_data(self, entry):
return self.tk.call(self._w, 'info', 'data', entry)
def info_exists(self, entry):
return self.tk.call(self._w, 'info', 'exists', entry)
def info_hidden(self, entry):
return self.tk.call(self._w, 'info', 'hidden', entry)
def info_next(self, entry):
return self.tk.call(self._w, 'info', 'next', entry)
def info_parent(self, entry):
return self.tk.call(self._w, 'info', 'parent', entry)
def info_prev(self, entry):
return self.tk.call(self._w, 'info', 'prev', entry)
def info_selection(self):
c = self.tk.call(self._w, 'info', 'selection')
return self.tk.splitlist(c)
def item_cget(self, entry, col, opt):
return self.tk.call(self._w, 'item', 'cget', entry, col, opt)
def item_configure(self, entry, col, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'item', 'configure', entry, col)))
self.tk.call(self._w, 'item', 'configure', entry, col,
*self._options(cnf, kw))
def item_create(self, entry, col, cnf={}, **kw):
self.tk.call(
self._w, 'item', 'create', entry, col, *self._options(cnf, kw))
def item_exists(self, entry, col):
return self.tk.call(self._w, 'item', 'exists', entry, col)
def item_delete(self, entry, col):
self.tk.call(self._w, 'item', 'delete', entry, col)
def entrycget(self, entry, opt):
return self.tk.call(self._w, 'entrycget', entry, opt)
def entryconfigure(self, entry, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'entryconfigure', entry)))
self.tk.call(self._w, 'entryconfigure', entry,
*self._options(cnf, kw))
def nearest(self, y):
return self.tk.call(self._w, 'nearest', y)
def see(self, entry):
self.tk.call(self._w, 'see', entry)
def selection_clear(self, cnf={}, **kw):
self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw))
def selection_includes(self, entry):
return self.tk.call(self._w, 'selection', 'includes', entry)
def selection_set(self, first, last=None):
self.tk.call(self._w, 'selection', 'set', first, last)
def show_entry(self, entry):
return self.tk.call(self._w, 'show', 'entry', entry)
def xview(self, *args):
self.tk.call(self._w, 'xview', *args)
def yview(self, *args):
self.tk.call(self._w, 'yview', *args)
class InputOnly(TixWidget):
"""InputOnly - Invisible widget. Unix only.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixInputOnly', None, cnf, kw)
class LabelEntry(TixWidget):
"""LabelEntry - Entry field with label. Packages an entry widget
and a label into one mega widget. It can beused be used to simplify
the creation of ``entry-form'' type of interface.
Subwidgets Class
---------- -----
label Label
entry Entry"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixLabelEntry',
['labelside','options'], cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
class LabelFrame(TixWidget):
"""LabelFrame - Labelled Frame container. Packages a frame widget
and a label into one mega widget. To create widgets inside a
LabelFrame widget, one creates the new widgets relative to the
frame subwidget and manage them inside the frame subwidget.
Subwidgets Class
---------- -----
label Label
frame Frame"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixLabelFrame',
['labelside','options'], cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['frame'] = _dummyFrame(self, 'frame')
class ListNoteBook(TixWidget):
"""A ListNoteBook widget is very similar to the TixNoteBook widget:
it can be used to display many windows in a limited space using a
notebook metaphor. The notebook is divided into a stack of pages
(windows). At one time only one of these pages can be shown.
The user can navigate through these pages by
choosing the name of the desired page in the hlist subwidget."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixListNoteBook', ['options'], cnf, kw)
# Is this necessary? It's not an exposed subwidget in Tix.
self.subwidget_list['pane'] = _dummyPanedWindow(self, 'pane',
destroy_physically=0)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['shlist'] = _dummyScrolledHList(self, 'shlist')
def add(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = TixSubWidget(self, name)
return self.subwidget_list[name]
def page(self, name):
return self.subwidget(name)
def pages(self):
# Can't call subwidgets_all directly because we don't want .nbframe
names = self.tk.split(self.tk.call(self._w, 'pages'))
ret = []
for x in names:
ret.append(self.subwidget(x))
return ret
def raise_page(self, name): # raise is a python keyword
self.tk.call(self._w, 'raise', name)
class Meter(TixWidget):
"""The Meter widget can be used to show the progress of a background
job which may take a long time to execute.
"""
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixMeter',
['options'], cnf, kw)
class NoteBook(TixWidget):
"""NoteBook - Multi-page container widget (tabbed notebook metaphor).
Subwidgets Class
---------- -----
nbframe NoteBookFrame
<pages> page widgets added dynamically with the add method"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self,master,'tixNoteBook', ['options'], cnf, kw)
self.subwidget_list['nbframe'] = TixSubWidget(self, 'nbframe',
destroy_physically=0)
def add(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = TixSubWidget(self, name)
return self.subwidget_list[name]
def delete(self, name):
self.tk.call(self._w, 'delete', name)
self.subwidget_list[name].destroy()
del self.subwidget_list[name]
def page(self, name):
return self.subwidget(name)
def pages(self):
# Can't call subwidgets_all directly because we don't want .nbframe
names = self.tk.split(self.tk.call(self._w, 'pages'))
ret = []
for x in names:
ret.append(self.subwidget(x))
return ret
def raise_page(self, name): # raise is a python keyword
self.tk.call(self._w, 'raise', name)
def raised(self):
return self.tk.call(self._w, 'raised')
class NoteBookFrame(TixWidget):
# FIXME: This is dangerous to expose to be called on its own.
pass
class OptionMenu(TixWidget):
"""OptionMenu - creates a menu button of options.
Subwidget Class
--------- -----
menubutton Menubutton
menu Menu"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixOptionMenu', ['options'], cnf, kw)
self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
def add_command(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', 'command', name, *self._options(cnf, kw))
def add_separator(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', 'separator', name, *self._options(cnf, kw))
def delete(self, name):
self.tk.call(self._w, 'delete', name)
def disable(self, name):
self.tk.call(self._w, 'disable', name)
def enable(self, name):
self.tk.call(self._w, 'enable', name)
class PanedWindow(TixWidget):
"""PanedWindow - Multi-pane container widget
allows the user to interactively manipulate the sizes of several
panes. The panes can be arranged either vertically or horizontally.The
user changes the sizes of the panes by dragging the resize handle
between two panes.
Subwidgets Class
---------- -----
<panes> g/p widgets added dynamically with the add method."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixPanedWindow', ['orientation', 'options'], cnf, kw)
# add delete forget panecget paneconfigure panes setsize
def add(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = TixSubWidget(self, name,
check_intermediate=0)
return self.subwidget_list[name]
def delete(self, name):
self.tk.call(self._w, 'delete', name)
self.subwidget_list[name].destroy()
del self.subwidget_list[name]
def forget(self, name):
self.tk.call(self._w, 'forget', name)
def panecget(self, entry, opt):
return self.tk.call(self._w, 'panecget', entry, opt)
def paneconfigure(self, entry, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'paneconfigure', entry)))
self.tk.call(self._w, 'paneconfigure', entry, *self._options(cnf, kw))
def panes(self):
names = self.tk.call(self._w, 'panes')
ret = []
for x in names:
ret.append(self.subwidget(x))
return ret
class PopupMenu(TixWidget):
"""PopupMenu widget can be used as a replacement of the tk_popup command.
The advantage of the Tix PopupMenu widget is it requires less application
code to manipulate.
Subwidgets Class
---------- -----
menubutton Menubutton
menu Menu"""
# FIXME: It should inherit -superclass tixShell
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixPopupMenu', ['options'], cnf, kw)
self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
def bind_widget(self, widget):
self.tk.call(self._w, 'bind', widget._w)
def unbind_widget(self, widget):
self.tk.call(self._w, 'unbind', widget._w)
def post_widget(self, widget, x, y):
self.tk.call(self._w, 'post', widget._w, x, y)
class ResizeHandle(TixWidget):
"""Internal widget to draw resize handles on Scrolled widgets."""
def __init__(self, master, cnf={}, **kw):
# There seems to be a Tix bug rejecting the configure method
# Let's try making the flags -static
flags = ['options', 'command', 'cursorfg', 'cursorbg',
'handlesize', 'hintcolor', 'hintwidth',
'x', 'y']
# In fact, x y height width are configurable
TixWidget.__init__(self, master, 'tixResizeHandle',
flags, cnf, kw)
def attach_widget(self, widget):
self.tk.call(self._w, 'attachwidget', widget._w)
def detach_widget(self, widget):
self.tk.call(self._w, 'detachwidget', widget._w)
def hide(self, widget):
self.tk.call(self._w, 'hide', widget._w)
def show(self, widget):
self.tk.call(self._w, 'show', widget._w)
class ScrolledHList(TixWidget):
"""ScrolledHList - HList with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledHList', ['options'],
cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledListBox(TixWidget):
"""ScrolledListBox - Listbox with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledListBox', ['options'], cnf, kw)
self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledText(TixWidget):
"""ScrolledText - Text with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledText', ['options'], cnf, kw)
self.subwidget_list['text'] = _dummyText(self, 'text')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledTList(TixWidget):
"""ScrolledTList - TList with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledTList', ['options'],
cnf, kw)
self.subwidget_list['tlist'] = _dummyTList(self, 'tlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledWindow(TixWidget):
"""ScrolledWindow - Window with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledWindow', ['options'], cnf, kw)
self.subwidget_list['window'] = _dummyFrame(self, 'window')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class Select(TixWidget):
"""Select - Container of button subwidgets. It can be used to provide
radio-box or check-box style of selection options for the user.
Subwidgets are buttons added dynamically using the add method."""
# FIXME: It should inherit -superclass tixLabelWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixSelect',
['allowzero', 'radio', 'orientation', 'labelside',
'options'],
cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
def add(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = _dummyButton(self, name)
return self.subwidget_list[name]
def invoke(self, name):
self.tk.call(self._w, 'invoke', name)
class Shell(TixWidget):
"""Toplevel window.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixShell', ['options', 'title'], cnf, kw)
class DialogShell(TixWidget):
"""Toplevel window, with popup popdown and center methods.
It tells the window manager that it is a dialog window and should be
treated specially. The exact treatment depends on the treatment of
the window manager.
Subwidgets - None"""
# FIXME: It should inherit from Shell
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master,
'tixDialogShell',
['options', 'title', 'mapped',
'minheight', 'minwidth',
'parent', 'transient'], cnf, kw)
def popdown(self):
self.tk.call(self._w, 'popdown')
def popup(self):
self.tk.call(self._w, 'popup')
def center(self):
self.tk.call(self._w, 'center')
class StdButtonBox(TixWidget):
"""StdButtonBox - Standard Button Box (OK, Apply, Cancel and Help) """
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixStdButtonBox',
['orientation', 'options'], cnf, kw)
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['apply'] = _dummyButton(self, 'apply')
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['help'] = _dummyButton(self, 'help')
def invoke(self, name):
if self.subwidget_list.has_key(name):
self.tk.call(self._w, 'invoke', name)
class TList(TixWidget):
"""TList - Hierarchy display widget which can be
used to display data in a tabular format. The list entries of a TList
widget are similar to the entries in the Tk listbox widget. The main
differences are (1) the TList widget can display the list entries in a
two dimensional format and (2) you can use graphical images as well as
multiple colors and fonts for the list entries.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixTList', ['options'], cnf, kw)
def active_set(self, index):
self.tk.call(self._w, 'active', 'set', index)
def active_clear(self):
self.tk.call(self._w, 'active', 'clear')
def anchor_set(self, index):
self.tk.call(self._w, 'anchor', 'set', index)
def anchor_clear(self):
self.tk.call(self._w, 'anchor', 'clear')
def delete(self, from_, to=None):
self.tk.call(self._w, 'delete', from_, to)
def dragsite_set(self, index):
self.tk.call(self._w, 'dragsite', 'set', index)
def dragsite_clear(self):
self.tk.call(self._w, 'dragsite', 'clear')
def dropsite_set(self, index):
self.tk.call(self._w, 'dropsite', 'set', index)
def dropsite_clear(self):
self.tk.call(self._w, 'dropsite', 'clear')
def insert(self, index, cnf={}, **kw):
self.tk.call(self._w, 'insert', index, *self._options(cnf, kw))
def info_active(self):
return self.tk.call(self._w, 'info', 'active')
def info_anchor(self):
return self.tk.call(self._w, 'info', 'anchor')
def info_down(self, index):
return self.tk.call(self._w, 'info', 'down', index)
def info_left(self, index):
return self.tk.call(self._w, 'info', 'left', index)
def info_right(self, index):
return self.tk.call(self._w, 'info', 'right', index)
def info_selection(self):
c = self.tk.call(self._w, 'info', 'selection')
return self.tk.splitlist(c)
def info_size(self):
return self.tk.call(self._w, 'info', 'size')
def info_up(self, index):
return self.tk.call(self._w, 'info', 'up', index)
def nearest(self, x, y):
return self.tk.call(self._w, 'nearest', x, y)
def see(self, index):
self.tk.call(self._w, 'see', index)
def selection_clear(self, cnf={}, **kw):
self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw))
def selection_includes(self, index):
return self.tk.call(self._w, 'selection', 'includes', index)
def selection_set(self, first, last=None):
self.tk.call(self._w, 'selection', 'set', first, last)
def xview(self, *args):
self.tk.call(self._w, 'xview', *args)
def yview(self, *args):
self.tk.call(self._w, 'yview', *args)
class Tree(TixWidget):
"""Tree - The tixTree widget can be used to display hierachical
data in a tree form. The user can adjust
the view of the tree by opening or closing parts of the tree."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixTree',
['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def autosetmode(self):
'''This command calls the setmode method for all the entries in this
Tree widget: if an entry has no child entries, its mode is set to
none. Otherwise, if the entry has any hidden child entries, its mode is
set to open; otherwise its mode is set to close.'''
self.tk.call(self._w, 'autosetmode')
def close(self, entrypath):
'''Close the entry given by entryPath if its mode is close.'''
self.tk.call(self._w, 'close', entrypath)
def getmode(self, entrypath):
'''Returns the current mode of the entry given by entryPath.'''
return self.tk.call(self._w, 'getmode', entrypath)
def open(self, entrypath):
'''Open the entry given by entryPath if its mode is open.'''
self.tk.call(self._w, 'open', entrypath)
def setmode(self, entrypath, mode='none'):
'''This command is used to indicate whether the entry given by
entryPath has children entries and whether the children are visible. mode
must be one of open, close or none. If mode is set to open, a (+)
indicator is drawn next the the entry. If mode is set to close, a (-)
indicator is drawn next the the entry. If mode is set to none, no
indicators will be drawn for this entry. The default mode is none. The
open mode indicates the entry has hidden children and this entry can be
opened by the user. The close mode indicates that all the children of the
entry are now visible and the entry can be closed by the user.'''
self.tk.call(self._w, 'setmode', entrypath, mode)
# Could try subclassing Tree for CheckList - would need another arg to init
class CheckList(TixWidget):
"""The CheckList widget
displays a list of items to be selected by the user. CheckList acts
similarly to the Tk checkbutton or radiobutton widgets, except it is
capable of handling many more items than checkbuttons or radiobuttons.
"""
# FIXME: It should inherit -superclass tixTree
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixCheckList',
['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def autosetmode(self):
'''This command calls the setmode method for all the entries in this
Tree widget: if an entry has no child entries, its mode is set to
none. Otherwise, if the entry has any hidden child entries, its mode is
set to open; otherwise its mode is set to close.'''
self.tk.call(self._w, 'autosetmode')
def close(self, entrypath):
'''Close the entry given by entryPath if its mode is close.'''
self.tk.call(self._w, 'close', entrypath)
def getmode(self, entrypath):
'''Returns the current mode of the entry given by entryPath.'''
return self.tk.call(self._w, 'getmode', entrypath)
def open(self, entrypath):
'''Open the entry given by entryPath if its mode is open.'''
self.tk.call(self._w, 'open', entrypath)
def getselection(self, mode='on'):
'''Returns a list of items whose status matches status. If status is
not specified, the list of items in the "on" status will be returned.
Mode can be on, off, default'''
c = self.tk.split(self.tk.call(self._w, 'getselection', mode))
return self.tk.splitlist(c)
def getstatus(self, entrypath):
'''Returns the current status of entryPath.'''
return self.tk.call(self._w, 'getstatus', entrypath)
def setstatus(self, entrypath, mode='on'):
'''Sets the status of entryPath to be status. A bitmap will be
displayed next to the entry its status is on, off or default.'''
self.tk.call(self._w, 'setstatus', entrypath, mode)
###########################################################################
### The subclassing below is used to instantiate the subwidgets in each ###
### mega widget. This allows us to access their methods directly. ###
###########################################################################
class _dummyButton(Button, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyCheckbutton(Checkbutton, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyEntry(Entry, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyFrame(Frame, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyLabel(Label, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyListbox(Listbox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyMenu(Menu, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyMenubutton(Menubutton, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyScrollbar(Scrollbar, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyText(Text, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyScrolledListBox(ScrolledListBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class _dummyHList(HList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyScrolledHList(ScrolledHList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class _dummyTList(TList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyComboBox(ComboBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, ['fancy',destroy_physically])
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
'slistbox')
try:
self.subwidget_list['tick'] = _dummyButton(self, 'tick')
#cross Button : present if created with the fancy option
self.subwidget_list['cross'] = _dummyButton(self, 'cross')
except TypeError:
# unavailable when -fancy not specified
pass
class _dummyDirList(DirList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class _dummyDirSelectBox(DirSelectBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
class _dummyExFileSelectBox(ExFileSelectBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
self.subwidget_list['types'] = _dummyComboBox(self, 'types')
self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
self.subwidget_list['file'] = _dummyComboBox(self, 'file')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
class _dummyFileSelectBox(FileSelectBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
class _dummyFileComboBox(ComboBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['dircbx'] = _dummyComboBox(self, 'dircbx')
class _dummyStdButtonBox(StdButtonBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['apply'] = _dummyButton(self, 'apply')
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['help'] = _dummyButton(self, 'help')
class _dummyNoteBookFrame(NoteBookFrame, TixSubWidget):
def __init__(self, master, name, destroy_physically=0):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyPanedWindow(PanedWindow, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
########################
### Utility Routines ###
########################
#mike Should tixDestroy be exposed as a wrapper? - but not for widgets.
def OptionName(widget):
'''Returns the qualified path name for the widget. Normally used to set
default options for subwidgets. See tixwidgets.py'''
return widget.tk.call('tixOptionName', widget._w)
# Called with a dictionary argument of the form
# {'*.c':'C source files', '*.txt':'Text Files', '*':'All files'}
# returns a string which can be used to configure the fsbox file types
# in an ExFileSelectBox. i.e.,
# '{{*} {* - All files}} {{*.c} {*.c - C source files}} {{*.txt} {*.txt - Text Files}}'
def FileTypeList(dict):
s = ''
for type in dict.keys():
s = s + '{{' + type + '} {' + type + ' - ' + dict[type] + '}} '
return s
# Still to be done:
# tixIconView
class CObjView(TixWidget):
"""This file implements the Canvas Object View widget. This is a base
class of IconView. It implements automatic placement/adjustment of the
scrollbars according to the canvas objects inside the canvas subwidget.
The scrollbars are adjusted so that the canvas is just large enough
to see all the objects.
"""
# FIXME: It should inherit -superclass tixScrolledWidget
pass
class Grid(TixWidget):
'''The Tix Grid command creates a new window and makes it into a
tixGrid widget. Additional options, may be specified on the command
line or in the option database to configure aspects such as its cursor
and relief.
A Grid widget displays its contents in a two dimensional grid of cells.
Each cell may contain one Tix display item, which may be in text,
graphics or other formats. See the DisplayStyle class for more information
about Tix display items. Individual cells, or groups of cells, can be
formatted with a wide range of attributes, such as its color, relief and
border.
Subwidgets - None'''
# valid specific resources as of Tk 8.4
# editdonecmd, editnotifycmd, floatingcols, floatingrows, formatcmd,
# highlightbackground, highlightcolor, leftmargin, itemtype, selectmode,
# selectunit, topmargin,
def __init__(self, master=None, cnf={}, **kw):
static= []
self.cnf= cnf
TixWidget.__init__(self, master, 'tixGrid', static, cnf, kw)
# valid options as of Tk 8.4
# anchor, bdtype, cget, configure, delete, dragsite, dropsite, entrycget, edit
# entryconfigure, format, geometryinfo, info, index, move, nearest, selection
# set, size, unset, xview, yview
# def anchor option ?args ...?
def anchor_get(self):
"Get the (x,y) coordinate of the current anchor cell"
return self._getints(self.tk.call(self, 'anchor', 'get'))
# def bdtype
# def delete dim from ?to?
def delete_row(self, from_, to=None):
"""Delete rows between from_ and to inclusive.
If to is not provided, delete only row at from_"""
if to is None:
self.tk.call(self, 'delete', 'row', from_)
else:
self.tk.call(self, 'delete', 'row', from_, to)
def delete_column(self, from_, to=None):
"""Delete columns between from_ and to inclusive.
If to is not provided, delete only column at from_"""
if to is None:
self.tk.call(self, 'delete', 'column', from_)
else:
self.tk.call(self, 'delete', 'column', from_, to)
# def edit apply
# def edit set x y
def entrycget(self, x, y, option):
"Get the option value for cell at (x,y)"
return self.tk.call(self, 'entrycget', x, y, option)
def entryconfigure(self, x, y, **kw):
return self.tk.call(self, 'entryconfigure', x, y, *self._options(None, kw))
# def format
# def index
def info_exists(self, x, y):
"Return True if display item exists at (x,y)"
return bool(int(self.tk.call(self, 'info', 'exists', x, y)))
def info_bbox(self, x, y):
# This seems to always return '', at least for 'text' displayitems
return self.tk.call(self, 'info', 'bbox', x, y)
def nearest(self, x, y):
"Return coordinate of cell nearest pixel coordinate (x,y)"
return self._getints(self.tk.call(self, 'nearest', x, y))
# def selection adjust
# def selection clear
# def selection includes
# def selection set
# def selection toggle
# def move dim from to offset
def set(self, x, y, itemtype=None, **kw):
args= self._options(self.cnf, kw)
if itemtype is not None:
args= ('-itemtype', itemtype) + args
self.tk.call(self, 'set', x, y, *args)
# def size dim index ?option value ...?
# def unset x y
def xview(self):
return self._getdoubles(self.tk.call(self, 'xview'))
def xview_moveto(self, fraction):
self.tk.call(self,'xview', 'moveto', fraction)
def xview_scroll(self, count, what="units"):
"Scroll right (count>0) or left <count> of units|pages"
self.tk.call(self, 'xview', 'scroll', count, what)
def yview(self):
return self._getdoubles(self.tk.call(self, 'yview'))
def yview_moveto(self, fraction):
self.tk.call(self,'ysview', 'moveto', fraction)
def yview_scroll(self, count, what="units"):
"Scroll down (count>0) or up <count> of units|pages"
self.tk.call(self, 'yview', 'scroll', count, what)
class ScrolledGrid(Grid):
'''Scrolled Grid widgets'''
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master=None, cnf={}, **kw):
static= []
self.cnf= cnf
TixWidget.__init__(self, master, 'tixScrolledGrid', static, cnf, kw)
| apache-2.0 |
scavallero/mydomus | auth.py | 1 | 3792 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MyDomus - Polling Service
# Copyright (c) 2016 Salvatore Cavallero (salvatoe.cavallero@gmail.com)
# https://github.com/scavallero/mydomus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
import json
import hashlib
import logging
import httpapp
import os
#########################################################################
# Module setup
########################################################################
logger = logging.getLogger("Mydomus")
user = {}
def verifyUser(usr,pswd):
res = False
if usr in user.keys():
if user[usr]['password'] == pswd:
res = True
return res
def verifyToken(token):
res = False
usr = ""
for item in user.keys():
if 'token' in user[item].keys():
if user[item]['token'] == token:
res = True
usr = item
return res,usr
def decodeUrlToken(url):
fields = url.split('/')
token = fields[-1]
del fields[-1]
new_url = ''
for item in fields:
if item != '':
new_url = new_url + '/'+item
if new_url == '':
new_url = '/'
res,usr = verifyToken(token)
if res:
return new_url
else:
return None
def load():
global user
logger.info("Start loading user authorization")
CWD = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(CWD,"user.conf")) as data_file:
try:
user = json.load(data_file)
except ValueError: # includes simplejson.decoder.JSONDecodeError
logger.critical('json decoding failure user.conf')
for item in user.keys():
h = hashlib.sha224(item+user[item]['password']).hexdigest()
p = hashlib.md5(user[item]['password']).hexdigest()
user[item]['token'] = h
user[item]['password'] = p
logger.info('User: %s - %s' % (item,h))
### ADDED API ###
@httpapp.addurl('/verify/')
def url_verify(p,m):
global user
fields = p.split('/')
if len(fields) == 4:
if fields[2] in user.keys():
if fields[3] == user[fields[2]]['password']:
return '{"status":"ok","token":"%s"}' % user[fields[2]]['token']
else:
return '{"status":"error","reason":"wrong password"}'
else:
return '{"status":"error","reason":"user unknown"}'
else:
return '{"status":"error","reason":"missing user or password"}'
@httpapp.addurl('/checktoken/')
def url_checktoken(p,m):
global user
fields = p.split('/')
if len(fields) == 3:
token = fields[2]
res,usr = verifyToken(token)
if res:
return '{"status":"ok","user":"%s"}' % usr
else:
return '{"status":"error","reason":"wrong token"}'
else:
return '{"status":"error","reason":"missing token"}'
logger.info("User authorization loaded")
| gpl-3.0 |
ondra-novak/chromium.src | chrome/common/extensions/docs/server2/test_data/canned_data.py | 7 | 36855 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from extensions_paths import CHROME_EXTENSIONS
from third_party.json_schema_compiler.json_parse import OrderedDict
from test_file_system import MoveAllTo, MoveTo
CANNED_CHANNELS = OrderedDict([
('trunk', 'trunk'),
('dev', 31),
('beta', 30),
('stable', 29)
])
CANNED_BRANCHES = OrderedDict([
('trunk', 'trunk'),
(31, '1612'),
(30, '1599'),
(29, '1547'),
(28, '1500'),
(27, '1453'),
(26, '1410'),
(25, '1364'),
(24, '1312'),
(23, '1271'),
(22, '1229'),
(21, '1180'),
(20, '1132'),
(19, '1084'),
(18, '1025'),
(17, '963'),
(16, '912'),
(15, '874'),
(14, '835'),
(13, '782'),
(12, '742'),
(11, '696'),
(10, '648'),
( 9, '597'),
( 8, '552'),
( 7, '544'),
( 6, '495'),
( 5, '396'),
])
CANNED_TEST_FILE_SYSTEM_DATA = MoveTo(CHROME_EXTENSIONS, {
'api': {
'_api_features.json': json.dumps({
'ref_test': { 'dependencies': ['permission:ref_test'] },
'tester': { 'dependencies': ['permission:tester', 'manifest:tester'] }
}),
'_manifest_features.json': '{}',
'_permission_features.json': '{}'
},
'docs': {
'templates': {
'articles': {
'test_article.html':
'<h1>hi</h1>you<h2>first</h2><h3>inner</h3><h2>second</h2>'
},
'intros': {
'test_intro.html':
'you<h2>first</h2><h3>inner</h3><h2>second</h2>'
},
'json': {
'api_availabilities.json': json.dumps({
'trunk_api': {
'channel': 'trunk'
},
'dev_api': {
'channel': 'dev'
},
'beta_api': {
'channel': 'beta'
},
'stable_api': {
'channel': 'stable',
'version': 20
}
}),
'intro_tables.json': json.dumps({
'tester': {
'Permissions': [
{
'class': 'override',
'text': '"tester"'
},
{
'text': 'is an API for testing things.'
}
],
'Learn More': [
{
'link': 'https://tester.test.com/welcome.html',
'text': 'Welcome!'
}
]
}
}),
'manifest.json': '{}',
'permissions.json': '{}'
},
'private': {
'intro_tables': {
'trunk_message.html': 'available on trunk'
},
'table_of_contents.html': '<table-of-contents>',
}
}
}
})
_TEST_WHATS_NEW_JSON = {
"backgroundpages.to-be-non-persistent": {
"type": "additionsToExistingApis",
"description": "backgrounds to be non persistent",
"version": 22
},
"chromeSetting.set-regular-only-scope": {
"type": "additionsToExistingApis",
"description": "ChromeSetting.set now has a regular_only scope.",
"version": 21
},
"manifest-v1-deprecated": {
"type": "manifestChanges",
"description": "Manifest version 1 was deprecated in Chrome 18",
"version": 20
}
}
CANNED_API_FILE_SYSTEM_DATA = MoveAllTo(CHROME_EXTENSIONS, {
'trunk': {
'api': {
'_api_features.json': json.dumps({
'alarm': {
'channel': 'stable'
},
'app.window': {
'channel': 'stable'
},
'browserAction': {
'channel': 'stable'
},
'contextMenus': {
'channel': 'stable'
},
'events': {
'channel': 'stable'
},
'extension': {
'channel': 'stable'
},
'signedInDevices': {
'channel': 'stable'
},
'systemInfo.cpu': {
'channel': 'stable'
},
'systemInfo.stuff': {
'channel': 'dev'
}
}),
'_manifest_features.json': json.dumps({
'history': {
'channel': 'beta'
},
'notifications': {
'channel': 'beta'
},
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'storage': {
'channel': 'beta'
},
'sync': {
'channel': 'trunk'
},
'web_request': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'stable'
},
'bluetooth': {
'channel': 'dev'
},
'bookmarks': {
'channel': 'stable'
},
'cookies': {
'channel': 'dev'
},
'declarativeContent': {
'channel': 'trunk'
},
'declarativeWebRequest': [
{ 'channel': 'beta',
'extension_types': ['extension']
},
{ 'channel': 'stable',
'extension_types': ['extension'],
'whitelist': ['aaa']
},
],
'falseBetaAPI': {
'channel': 'beta'
},
'systemInfo.display': {
'channel': 'stable'
},
'trunkAPI': {
'channel': 'trunk'
}
}),
'alarm.json': json.dumps([{
'namespace': 'alarm',
'description': '<code>alarm</code>'
}]),
'app_window.json': json.dumps([{
'namespace': 'app.window',
'description': '<code>app.window</code>'
}]),
'browser_action.json': json.dumps([{
'namespace': 'browserAction',
'description': '<code>browserAction</code>'
}]),
'bluetooth.idl': '\n'.join(('//Copyleft Schmopyright',
'',
'//An IDL description, oh my!',
'namespace bluetooth {',
' dictionary Socket {',
' long id;',
' };',
'};')),
'context_menus.json': json.dumps([{
'namespace': 'contextMenus',
'description': ''
}]),
'json_stable_api.json': json.dumps([{
'namespace': 'jsonStableAPI',
'description': 'An API with a predetermined availability.'
}]),
'idle.json': json.dumps([{'namespace': 'idle', 'description': ''}]),
'input_ime.json': json.dumps([{
'namespace': 'input.ime',
'description': 'An API that has the potential to cause some trouble.'
}]),
'menus.json': json.dumps([{'namespace': 'menus', 'description': ''}]),
'signed_in_devices.json': json.dumps([{
'namespace': 'signedInDevices',
'description': 'Another API that could cause some trouble.'
}]),
'system_info_stuff.json': json.dumps([{
'namespace': 'systemInfo.stuff',
'description': 'Yet another API that could wreck havoc...'
}]),
'tabs.json': json.dumps([{'namespace': 'tabs', 'description': ''}]),
'windows.json': json.dumps([{'namespace': 'windows', 'description': ''}])
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': json.dumps({
'jsonTrunkAPI': {
'channel': 'trunk'
},
'jsonDevAPI': {
'channel': 'dev'
},
'jsonBetaAPI': {
'channel': 'beta'
},
'jsonStableAPI': {
'channel': 'stable',
'version': 20
}
}),
'intro_tables.json': json.dumps({
'test': [
{
'Permissions': 'probably none'
}
]
}),
'manifest.json': '{}',
'permissions.json': '{}',
'whats_new.json': json.dumps(_TEST_WHATS_NEW_JSON)
},
'public': {
'apps': {
'alarm.html': 'alarm.html',
'app_window.html': 'app_window.html',
'contextMenus.html': 'contextMenus.html',
},
'extensions': {
'alarm.html': 'alarm.html',
'browserAction.html': 'browserAction.html',
'contextMenus.html': 'contextMenus.html',
}
}
}
}
},
'1612': {
'api': {
'_api_features.json': json.dumps({
'alarm': {
'channel': 'stable'
},
'app.window': {
'channel': 'stable'
},
'browserAction': {
'channel': 'stable'
},
'events': {
'channel': 'trunk'
},
'extension': {
'channel': 'stable'
},
'systemInfo.cpu': {
'channel': 'stable'
},
'systemInfo.stuff': {
'channel': 'dev'
}
}),
'_manifest_features.json': json.dumps({
'contextMenus': {
'channel': 'trunk'
},
'notifications': {
'channel': 'beta'
},
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'storage': {
'channel': 'dev'
},
'sync': {
'channel': 'trunk'
},
'system_info_display': {
'channel': 'stable'
},
'web_request': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'stable'
},
'appsFirst': {
'channel': 'stable',
'extension_types': ['extension', 'platform_app']
},
'bluetooth': {
'channel': 'dev'
},
'bookmarks': {
'channel': 'stable'
},
'cookies': {
'channel': 'dev'
},
'declarativeContent': {
'channel': 'trunk'
},
'declarativeWebRequest': [
{ 'channel': 'beta' },
{ 'channel': 'stable', 'whitelist': ['aaa'] }
],
'downloads': {
'channel': 'beta'
}
}),
'alarm.json': json.dumps([{
'namespace': 'alarm',
'description': '<code>alarm</code>'
}]),
'app_window.json': json.dumps([{
'namespace': 'app.window',
'description': '<code>app.window</code>'
}]),
'browser_action.json': json.dumps([{
'namespace': 'browserAction',
'description': '<code>browserAction</code>'
}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'windows.json': json.dumps([{'namespace': 'windows'}])
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': json.dumps({
'jsonTrunkAPI': {
'channel': 'trunk'
},
'jsonDevAPI': {
'channel': 'dev'
},
'jsonBetaAPI': {
'channel': 'beta'
},
'jsonStableAPI': {
'channel': 'stable',
'version': 20
}
}),
'intro_tables.json': json.dumps({
'test': [
{
'Permissions': 'probably none'
}
]
}),
'manifest.json': '{}',
'permissions.json': '{}',
'whats_new.json': json.dumps(_TEST_WHATS_NEW_JSON)
},
'public': {
'apps': {
'alarm.html': 'alarm.html',
'app_window.html': 'app_window.html',
},
'extensions': {
'alarm.html': 'alarm.html',
'browserAction.html': 'browserAction.html',
}
}
}
}
},
'1599': {
'api': {
'_api_features.json': json.dumps({
'alarm': {
'channel': 'stable'
},
'app.window': {
'channel': 'stable'
},
'browserAction': {
'channel': 'stable'
},
'events': {
'channel': 'trunk'
},
'extension': {
'channel': 'stable'
},
'systemInfo.cpu': {
'channel': 'beta'
},
'systemInfo.stuff': {
'channel': 'dev'
}
}),
'_manifest_features.json': json.dumps({
'contextMenus': {
'channel': 'trunk'
},
'notifications': {
'channel': 'dev'
},
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'storage': {
'channel': 'dev'
},
'sync': {
'channel': 'trunk'
},
'system_info_display': {
'channel': 'stable'
},
'web_request': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'stable'
},
'appsFirst': {
'channel': 'stable',
'extension_types': ['extension', 'platform_app']
},
'bluetooth': {
'channel': 'dev'
},
'bookmarks': {
'channel': 'stable'
},
'cookies': {
'channel': 'dev'
},
'declarativeContent': {
'channel': 'trunk'
},
'declarativeWebRequest': [
{ 'channel': 'beta' },
{ 'channel': 'stable', 'whitelist': ['aaa'] }
],
'downloads': {
'channel': 'beta'
}
}),
'alarm.json': json.dumps([{
'namespace': 'alarm',
'description': '<code>alarm</code>'
}]),
'app_window.json': json.dumps([{
'namespace': 'app.window',
'description': '<code>app.window</code>'
}]),
'browser_action.json': json.dumps([{
'namespace': 'browserAction',
'description': '<code>browserAction</code>'
}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'windows.json': json.dumps([{'namespace': 'windows'}])
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': json.dumps({
'jsonTrunkAPI': {
'channel': 'trunk'
},
'jsonDevAPI': {
'channel': 'dev'
},
'jsonBetaAPI': {
'channel': 'beta'
},
'jsonStableAPI': {
'channel': 'stable',
'version': 20
}
}),
'intro_tables.json': json.dumps({
'test': [
{
'Permissions': 'probably none'
}
]
}),
'manifest.json': '{}',
'permissions.json': '{}',
'whats_new.json': json.dumps(_TEST_WHATS_NEW_JSON)
},
'public': {
'apps': {
'alarm.html': 'alarm.html',
'app_window.html': 'app_window.html',
},
'extensions': {
'alarm.html': 'alarm.html',
'browserAction.html': 'browserAction.html',
}
}
}
}
},
'1547': {
'api': {
'_api_features.json': json.dumps({
'alarm': {
'channel': 'stable'
},
'app.window': {
'channel': 'stable'
},
'browserAction': {
'channel': 'stable'
},
'events': {
'channel': 'trunk'
},
'extension': {
'channel': 'stable'
},
'systemInfo.stuff': {
'channel': 'dev'
}
}),
'_manifest_features.json': json.dumps({
'contextMenus': {
'channel': 'trunk'
},
'notifications': {
'channel': 'dev'
},
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'storage': {
'channel': 'dev'
},
'sync': {
'channel': 'trunk'
},
'system_info_display': {
'channel': 'stable'
},
'web_request': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'stable'
},
'appsFirst': {
'channel': 'stable',
'extension_types': ['extension', 'platform_app']
},
'bluetooth': {
'channel': 'dev'
},
'bookmarks': {
'channel': 'stable'
},
'cookies': {
'channel': 'dev'
},
'declarativeContent': {
'channel': 'trunk'
},
'declarativeWebRequest': [
{ 'channel': 'beta' },
{ 'channel': 'stable', 'whitelist': ['aaa'] }
],
'downloads': {
'channel': 'beta'
}
}),
'alarm.json': json.dumps([{
'namespace': 'alarm',
'description': '<code>alarm</code>'
}]),
'app_window.json': json.dumps([{
'namespace': 'app.window',
'description': '<code>app.window</code>'
}]),
'browser_action.json': json.dumps([{
'namespace': 'browserAction',
'description': '<code>browserAction</code>'
}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'windows.json': json.dumps([{'namespace': 'windows'}])
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': json.dumps({
'jsonTrunkAPI': {
'channel': 'trunk'
},
'jsonDevAPI': {
'channel': 'dev'
},
'jsonBetaAPI': {
'channel': 'beta'
},
'jsonStableAPI': {
'channel': 'stable',
'version': 20
}
}),
'intro_tables.json': json.dumps({
'test': [
{
'Permissions': 'probably none'
}
]
}),
'manifest.json': '{}',
'permissions.json': '{}',
'whats_new.json': json.dumps(_TEST_WHATS_NEW_JSON)
},
'public': {
'apps': {
'alarm.html': 'alarm.html',
'app_window.html': 'app_window.html',
},
'extensions': {
'alarm.html': 'alarm.html',
'browserAction.html': 'browserAction.html',
}
}
}
}
},
'1500': {
'api': {
'_api_features.json': json.dumps({
'alarm': {
'channel': 'stable'
},
'app.window': {
'channel': 'stable'
},
'browserAction': {
'channel': 'stable'
},
'events': {
'channel': 'trunk'
},
'extension': {
'channel': 'stable'
},
'systemInfo.stuff': {
'channel': 'dev'
}
}),
'_manifest_features.json': json.dumps({
'contextMenus': {
'channel': 'trunk'
},
'notifications': {
'channel': 'dev'
},
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'storage': {
'channel': 'dev'
},
'sync': {
'channel': 'trunk'
},
'system_info_display': {
'channel': 'stable'
},
'web_request': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'stable'
},
'appsFirst': {
'channel': 'stable',
'extension_types': ['extension', 'platform_app']
},
'bluetooth': {
'channel': 'dev'
},
'bookmarks': {
'channel': 'stable'
},
'cookies': {
'channel': 'dev'
},
'declarativeContent': {
'channel': 'trunk'
},
'declarativeWebRequest': [
{ 'channel': 'beta' },
{ 'channel': 'stable', 'whitelist': ['aaa'] }
],
'downloads': {
'channel': 'beta'
}
}),
'alarm.json': json.dumps([{
'namespace': 'alarm',
'description': '<code>alarm</code>'
}]),
'app_window.json': json.dumps([{
'namespace': 'app.window',
'description': '<code>app.window</code>'
}]),
'browser_action.json': json.dumps([{
'namespace': 'browserAction',
'description': '<code>browserAction</code>'
}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'windows.json': json.dumps([{'namespace': 'windows'}])
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': json.dumps({
'jsonTrunkAPI': {
'channel': 'trunk'
},
'jsonDevAPI': {
'channel': 'dev'
},
'jsonBetaAPI': {
'channel': 'beta'
},
'jsonStableAPI': {
'channel': 'stable',
'version': 20
}
}),
'intro_tables.json': json.dumps({
'test': [
{
'Permissions': 'probably none'
}
]
}),
'manifest.json': '{}',
'permissions.json': '{}',
'whats_new.json': json.dumps(_TEST_WHATS_NEW_JSON)
},
'public': {
'apps': {
'alarm.html': 'alarm.html',
'app_window.html': 'app_window.html',
},
'extensions': {
'alarm.html': 'alarm.html',
'browserAction.html': 'browserAction.html',
}
}
}
}
},
'1453': {
'api': {
'_api_features.json': json.dumps({
'alarm': {
'channel': 'stable'
},
'app.window': {
'channel': 'stable'
},
'browserAction': {
'channel': 'stable'
},
'events': {
'channel': 'dev'
},
'extension': {
'channel': 'stable'
},
'systemInfo.stuff': {
'channel': 'dev'
}
}),
'_manifest_features.json': json.dumps({
'notifications': {
'channel': 'dev'
},
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'storage': {
'channel': 'dev'
},
'system_info_display': {
'channel': 'stable'
},
'web_request': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'stable'
},
'appsFirst': {
'channel': 'stable',
'extension_types': ['extension', 'platform_app']
},
'bluetooth': {
'channel': 'dev'
},
'bookmarks': {
'channel': 'stable'
},
'context_menus': {
'channel': 'trunk'
},
'declarativeContent': {
'channel': 'trunk'
},
'declarativeWebRequest': [
{ 'channel': 'beta' },
{ 'channel': 'stable', 'whitelist': ['aaa'] }
],
'downloads': {
'channel': 'dev'
}
}),
'alarm.json': json.dumps([{
'namespace': 'alarm',
'description': '<code>alarm</code>'
}]),
'app_window.json': json.dumps([{
'namespace': 'app.window',
'description': '<code>app.window</code>'
}]),
'browser_action.json': json.dumps([{
'namespace': 'browserAction',
'description': '<code>browserAction</code>'
}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'windows.json': json.dumps([{'namespace': 'windows'}])
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': json.dumps({
'jsonTrunkAPI': {
'channel': 'trunk'
},
'jsonDevAPI': {
'channel': 'dev'
},
'jsonBetaAPI': {
'channel': 'beta'
},
'jsonStableAPI': {
'channel': 'stable',
'version': 20
}
}),
'intro_tables.json': json.dumps({
'test': [
{
'Permissions': 'probably none'
}
]
}),
'manifest.json': '{}',
'permissions.json': '{}',
'whats_new.json': json.dumps(_TEST_WHATS_NEW_JSON)
},
'public': {
'apps': {
'alarm.html': 'alarm.html',
'app_window.html': 'app_window.html',
},
'extensions': {
'alarm.html': 'alarm.html',
'browserAction.html': 'browserAction.html',
}
}
}
}
},
'1410': {
'api': {
'_manifest_features.json': json.dumps({
'alarm': {
'channel': 'stable'
},
'app.window': {
'channel': 'stable'
},
'browserAction': {
'channel': 'stable'
},
'events': {
'channel': 'beta'
},
'notifications': {
'channel': 'dev'
},
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'web_request': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'stable'
},
'appsFirst': {
'channel': 'stable',
'extension_types': ['extension', 'platform_app']
},
'bluetooth': {
'channel': 'dev'
},
'bookmarks': {
'channel': 'stable'
},
'context_menus': {
'channel': 'trunk'
},
'declarativeContent': {
'channel': 'trunk'
},
'declarativeWebRequest': [
{ 'channel': 'beta' },
{ 'channel': 'stable', 'whitelist': ['aaa'] }
],
'systemInfo.display': {
'channel': 'stable'
}
}),
'alarm.json': json.dumps([{
'namespace': 'alarm',
'description': '<code>alarm</code>'
}]),
'app_window.json': json.dumps([{
'namespace': 'app.window',
'description': '<code>app.window</code>'
}]),
'browser_action.json': json.dumps([{
'namespace': 'browserAction',
'description': '<code>browserAction</code>'
}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'windows.json': json.dumps([{'namespace': 'windows'}])
}
},
'1364': {
'api': {
'_manifest_features.json': json.dumps({
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'stable'
},
'appsFirst': {
'channel': 'stable',
'extension_types': ['platform_app']
},
'bookmarks': {
'channel': 'stable'
},
'systemInfo.display': {
'channel': 'stable'
},
'webRequest': {
'channel': 'stable'
}
}),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'windows.json': json.dumps([{'namespace': 'windows'}])
}
},
'1312': {
'api': {
'_manifest_features.json': json.dumps({
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'web_request': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'stable'
},
'bookmarks': {
'channel': 'stable'
},
'systemInfo.display': {
'channel': 'stable'
}
}),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'windows.json': json.dumps([{'namespace': 'windows'}])
}
},
'1271': {
'api': {
'_manifest_features.json': json.dumps({
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'system_info_display': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'alarms': {
'channel': 'beta'
},
'bookmarks': {
'channel': 'stable'
},
'webRequest': {
'channel': 'stable'
}
}),
'alarms.idl': '//copy\n\n//desc\nnamespace alarms {}',
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'windows.json': json.dumps([{'namespace': 'windows'}])
}
},
'1229': {
'api': {
'_manifest_features.json': json.dumps({
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
},
'web_request': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'bookmarks': {
'channel': 'stable'
},
'systemInfo.display': {
'channel': 'beta'
}
}),
'alarms.idl': '//copy\n\n//desc\nnamespace alarms {}',
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
}
},
'1180': {
'api': {
'_manifest_features.json': json.dumps({
'page_action': {
'channel': 'stable'
},
'runtime': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'bookmarks': {
'channel': 'stable'
},
'webRequest': {
'channel': 'stable'
}
}),
'bookmarks.json': json.dumps([{'namespace': 'bookmarks'}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input_ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
}
},
'1132': {
'api': {
'_manifest_features.json': json.dumps({
'bookmarks': {
'channel': 'trunk'
},
'page_action': {
'channel': 'stable'
}
}),
'_permission_features.json': json.dumps({
'webRequest': {
'channel': 'stable'
}
}),
'bookmarks.json': json.dumps([{'namespace': 'bookmarks'}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input.ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
}
},
'1084': {
'api': {
'_manifest_features.json': json.dumps({
'contents': 'nothing of interest here,really'
}),
'bookmarks.json': json.dumps([{'namespace': 'bookmarks'}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input.ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'pageAction.json': json.dumps([{'namespace': 'pageAction'}]),
'webRequest.json': json.dumps([{'namespace': 'webRequest'}])
}
},
'1025': {
'api': {
'bookmarks.json': json.dumps([{'namespace': 'bookmarks'}]),
'idle.json': json.dumps([{'namespace': 'idle'}]),
'input.ime.json': json.dumps([{'namespace': 'input.ime'}]),
'menus.json': json.dumps([{'namespace': 'menus'}]),
'tabs.json': json.dumps([{'namespace': 'tabs'}]),
'pageAction.json': json.dumps([{'namespace': 'pageAction'}]),
'webRequest.json': json.dumps([{'namespace': 'webRequest'}])
}
},
'963': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
},
{
'namespace': 'webRequest'
}
])
}
},
'912': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
},
{
'namespace': 'experimental.webRequest'
}
])
}
},
'874': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
}
])
}
},
'835': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
}
])
}
},
'782': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
}
])
}
},
'742': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
}
])
}
},
'696': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
}
])
}
},
'648': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
}
])
}
},
'597': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
}
])
}
},
'552': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
},
{
'namespace': 'pageAction'
}
])
}
},
'544': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
}
])
}
},
'495': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'menus'
}
])
}
},
'396': {
'api': {
'extension_api.json': json.dumps([
{
'namespace': 'idle'
},
{
'namespace': 'experimental.menus'
}
])
}
}
})
| bsd-3-clause |
arunkgupta/gramps | gramps/gui/merge/__init__.py | 1 | 1096 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
"""
from mergeperson import *
from mergefamily import *
from mergeevent import *
from mergeplace import *
from mergesource import *
from mergecitation import *
from mergerepository import *
from mergemedia import *
from mergenote import *
| gpl-2.0 |
phelix/bitcoin | contrib/devtools/optimize-pngs.py | 63 | 3184 | #!/usr/bin/env python
'''
Run this scrip every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tostring()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)};
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
#verify
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
| mit |
psav/cfme_tests | cfme/networks/floating_ips.py | 2 | 3074 | import attr
from navmazing import NavigateToAttribute
from cfme.common import Taggable
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection, BaseEntity, parent_of_type
from cfme.networks.views import FloatingIpDetailsView, FloatingIpView
from cfme.utils import version
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
@attr.s
class FloatingIp(Taggable, BaseEntity):
"""Class representing floating ips"""
in_version = ('5.8', version.LATEST)
category = "networks"
page_name = 'floating_ip'
string_name = 'FloatingIP'
refresh_text = "Refresh items and relationships"
detail_page_suffix = 'floating_ip_detail'
quad_name = None
db_types = ["FloatingIP"]
address = attr.ib()
@property
def status(self):
view = navigate_to(self, 'Details')
return view.entities.properties.get_text_of('Status')
@property
def provider(self):
from cfme.networks.provider import NetworkProvider
return parent_of_type(self, NetworkProvider)
@attr.s
class FloatingIpCollection(BaseCollection):
""" Collection object for NetworkPort object
Note: Network providers object are not implemented in mgmt
"""
ENTITY = FloatingIp
def all(self):
view = navigate_to(self, 'All')
all_ips = view.entities.get_all(surf_pages=True)
list_floating_ip_obj = []
for ip in all_ips:
# as for 5.9 floating ip doesn't have name att, will get name as address from data
list_floating_ip_obj.append(ip.name if ip.name else ip.data['address'])
return [self.instantiate(address=name) for name in list_floating_ip_obj]
@navigator.register(FloatingIpCollection, 'All')
class All(CFMENavigateStep):
VIEW = FloatingIpView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select('Networks', 'Floating IPs')
def resetter(self):
"""Reset the view"""
self.view.browser.refresh()
@navigator.register(FloatingIp, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToAttribute('parent', 'All')
VIEW = FloatingIpDetailsView
def step(self):
# as for 5.9 floating ip doesn't have name att, will get id for navigation
if self.obj.appliance.version < '5.9':
element = self.prerequisite_view.entities.get_entity(
name=self.obj.address, surf_pages=True)
else:
all_items = self.prerequisite_view.entities.get_all(surf_pages=True)
for entity in all_items:
if entity.data['address'] == self.obj.address:
entity_id = entity.data['id']
element = self.prerequisite_view.entities.get_entity(
entity_id=entity_id, surf_pages=True)
break
try:
element.click()
except Exception:
raise ItemNotFound('Floating IP not found on the page')
| gpl-2.0 |
trusch/libbson | test/googletest/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
2ndQuadrant/ansible | lib/ansible/modules/remote_management/oneview/oneview_san_manager_facts.py | 120 | 3321 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_san_manager_facts
short_description: Retrieve facts about one or more of the OneView SAN Managers
description:
- Retrieve facts about one or more of the SAN Managers from OneView
version_added: "2.5"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
provider_display_name:
description:
- Provider Display Name.
params:
description:
- List of params to delimit, filter and sort the list of resources.
- "params allowed:
- C(start): The first item to return, using 0-based indexing.
- C(count): The number of resources to return.
- C(query): A general query string to narrow the list of resources returned.
- C(sort): The sort order of the returned data set."
extends_documentation_fragment:
- oneview
'''
EXAMPLES = '''
- name: Gather facts about all SAN Managers
oneview_san_manager_facts:
config: /etc/oneview/oneview_config.json
delegate_to: localhost
- debug: var=san_managers
- name: Gather paginated, filtered and sorted facts about SAN Managers
oneview_san_manager_facts:
config: /etc/oneview/oneview_config.json
params:
start: 0
count: 3
sort: name:ascending
query: isInternal eq false
delegate_to: localhost
- debug: var=san_managers
- name: Gather facts about a SAN Manager by provider display name
oneview_san_manager_facts:
config: /etc/oneview/oneview_config.json
provider_display_name: Brocade Network Advisor
delegate_to: localhost
- debug: var=san_managers
'''
RETURN = '''
san_managers:
description: Has all the OneView facts about the SAN Managers.
returned: Always, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class SanManagerFactsModule(OneViewModuleBase):
argument_spec = dict(
provider_display_name=dict(type='str'),
params=dict(type='dict')
)
def __init__(self):
super(SanManagerFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.san_managers
def execute_module(self):
if self.module.params.get('provider_display_name'):
provider_display_name = self.module.params['provider_display_name']
san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
if san_manager:
resources = [san_manager]
else:
resources = []
else:
resources = self.oneview_client.san_managers.get_all(**self.facts_params)
return dict(changed=False, ansible_facts=dict(san_managers=resources))
def main():
SanManagerFactsModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 |
mirrax/OpenBazaar | node/multisig.py | 13 | 6390 | import logging
import random
import re
import urllib2
import obelisk
# Create new private key:
#
# $ sx newkey > key1
#
# Show private secret:
#
# $ cat key1 | sx wif-to-secret
#
# Show compressed public key:
#
# $ cat key1 | sx pubkey
#
# You will need 3 keys for buyer, seller and arbitrer
class Multisig(object):
def __init__(self, client, number_required, pubkeys):
if number_required > len(pubkeys):
raise Exception("number_required > len(pubkeys)")
self.client = client
self.number_required = number_required
self.pubkeys = pubkeys
self.log = logging.getLogger(self.__class__.__name__)
@property
def script(self):
result = chr(80 + self.number_required)
for pubkey in self.pubkeys:
result += chr(33) + pubkey
result += chr(80 + len(self.pubkeys))
# checkmultisig
result += "\xae"
return result
@property
def address(self):
raw_addr = obelisk.hash_160(self.script)
return obelisk.hash_160_to_bc_address(raw_addr, addrtype=0x05)
def create_unsigned_transaction(self, destination, finished_cb):
def fetched(escrow, history):
if escrow is not None:
self.log.error("Error fetching history: %s", escrow)
return
self._fetched(history, destination, finished_cb)
self.client.fetch_history(self.address, fetched)
def _fetched(self, history, destination, finished_cb):
unspent = [row[:4] for row in history if row[4] is None]
transaction = self._build_actual_tx(unspent, destination)
finished_cb(transaction)
@staticmethod
def _build_actual_tx(unspent, destination):
# Send all unspent outputs (everything in the address) minus the fee
transaction = obelisk.Transaction()
total_amount = 0
for row in unspent:
assert len(row) == 4
outpoint = obelisk.OutPoint()
outpoint.hash = row[0]
outpoint.index = row[1]
value = row[3]
total_amount += value
add_input(transaction, outpoint)
# Constrain fee so we don't get negative amount to send
fee = min(total_amount, 10000)
send_amount = total_amount - fee
add_output(transaction, destination, send_amount)
return transaction
def sign_all_inputs(self, transaction, secret):
signatures = []
key = obelisk.EllipticCurveKey()
key.set_secret(secret)
for i, _ in enumerate(transaction.inputs):
sighash = generate_signature_hash(transaction, i, self.script)
# Add sighash::all to end of signature.
signature = key.sign(sighash) + "\x01"
signatures.append(signature.encode('hex'))
return signatures
@staticmethod
def make_request(*args):
opener = urllib2.build_opener()
opener.addheaders = [(
'User-agent',
'Mozilla/5.0' + str(random.randrange(1000000))
)]
try:
return opener.open(*args).read().strip()
except Exception as exc:
try:
stripped_exc = exc.read().strip()
except Exception:
stripped_exc = exc
raise Exception(stripped_exc)
@staticmethod
def eligius_pushtx(transaction):
print 'FINAL TRANSACTION: %s' % transaction
request = Multisig.make_request(
'http://eligius.st/~wizkid057/newstats/pushtxn.php',
'transaction=' + transaction + '&send=Push'
)
strings = re.findall('string[^"]*"[^"]*"', request)
for string in strings:
quote = re.findall('"[^"]*"', string)[0]
if len(quote) >= 5:
return quote[1:-1]
@staticmethod
def broadcast(transaction):
raw_tx = transaction.serialize().encode("hex")
Multisig.eligius_pushtx(raw_tx)
# gateway_broadcast(raw_tx)
# bci_pushtx(raw_tx)
def add_input(transaction, prevout):
tx_input = obelisk.TxIn()
tx_input.previous_output.hash = prevout.hash
tx_input.previous_output.index = prevout.index
transaction.inputs.append(tx_input)
def add_output(transaction, address, value):
output = obelisk.TxOut()
output.value = value
output.script = obelisk.output_script(address)
transaction.outputs.append(output)
def generate_signature_hash(parent_tx, input_index, script_code):
transaction = obelisk.copy_tx(parent_tx)
if input_index >= len(transaction.inputs):
return None
for tx_input in transaction.inputs:
tx_input.script = ""
transaction.inputs[input_index].script = script_code
raw_tx = transaction.serialize() + "\x01\x00\x00\x00"
return obelisk.Hash(raw_tx)
class Escrow(object):
def __init__(self, client, buyer_pubkey, seller_pubkey, arbit_pubkey):
pubkeys = (buyer_pubkey, seller_pubkey, arbit_pubkey)
self.multisig = Multisig(client, 2, pubkeys)
# 1. BUYER: Deposit funds for seller
@property
def deposit_address(self):
return self.multisig.address
# 2. BUYER: Send unsigned transaction to seller
def initiate(self, destination_address, finished_cb):
self.multisig.create_unsigned_transaction(
destination_address, finished_cb)
# ...
# 3. BUYER: Release funds by sending signature to seller
def release_funds(self, transaction, secret):
return self.multisig.sign_all_inputs(transaction, secret)
# 4. SELLER: Claim your funds by generating a signature.
def claim_funds(self, transaction, secret, buyer_sigs):
seller_sigs = self.multisig.sign_all_inputs(transaction, secret)
return Escrow.complete(transaction, buyer_sigs, seller_sigs,
self.multisig.script)
@staticmethod
def complete(transaction, buyer_sigs, seller_sigs, script_code):
for i, _ in enumerate(transaction.inputs):
sigs = (buyer_sigs[i], seller_sigs[i])
script = "\x00"
for sig in sigs:
script += chr(len(sig)) + sig
script += "\x4c"
assert len(script_code) < 255
script += chr(len(script_code)) + script_code
transaction.inputs[i].script = script
return transaction
| mit |
Stanford-Online/edx-analytics-data-api | analytics_data_api/v0/views/__init__.py | 1 | 10029 | from itertools import groupby
from django.db import models
from django.db.models import Q
from django.utils import timezone
from rest_framework import generics, serializers
from opaque_keys.edx.keys import CourseKey
from analytics_data_api.v0.exceptions import CourseNotSpecifiedError
from analytics_data_api.v0.views.utils import (
raise_404_if_none,
split_query_argument,
validate_course_id
)
class CourseViewMixin(object):
"""
Captures the course_id from the url and validates it.
"""
course_id = None
def get(self, request, *args, **kwargs):
self.course_id = self.kwargs.get('course_id', request.query_params.get('course_id', None))
if not self.course_id:
raise CourseNotSpecifiedError()
validate_course_id(self.course_id)
return super(CourseViewMixin, self).get(request, *args, **kwargs)
class PaginatedHeadersMixin(object):
"""
If the response is paginated, then augment it with this response header:
* Link: list of next and previous pagination URLs, e.g.
<next_url>; rel="next", <previous_url>; rel="prev"
Format follows the github API convention:
https://developer.github.com/guides/traversing-with-pagination/
Useful with PaginatedCsvRenderer, so that previous/next links aren't lost when returning CSV data.
"""
# TODO: When we upgrade to Django REST API v3.1, define a custom DEFAULT_PAGINATION_CLASS
# instead of using this mechanism:
# http://www.django-rest-framework.org/api-guide/pagination/#header-based-pagination
def get(self, request, *args, **kwargs):
"""
Stores pagination links in a response header.
"""
response = super(PaginatedHeadersMixin, self).get(request, args, kwargs)
link = self.get_paginated_links(response.data)
if link:
response['Link'] = link
return response
@staticmethod
def get_paginated_links(data):
"""
Returns the links string.
"""
# Un-paginated data is returned as a list, not a dict.
next_url = None
prev_url = None
if isinstance(data, dict):
next_url = data.get('next')
prev_url = data.get('previous')
if next_url is not None and prev_url is not None:
link = '<{next_url}>; rel="next", <{prev_url}>; rel="prev"'
elif next_url is not None:
link = '<{next_url}>; rel="next"'
elif prev_url is not None:
link = '<{prev_url}>; rel="prev"'
else:
link = ''
return link.format(next_url=next_url, prev_url=prev_url)
class CsvViewMixin(object):
"""
Augments a text/csv response with this header:
* Content-Disposition: allows the client to download the response as a file attachment.
"""
# Default filename slug for CSV download files
filename_slug = 'report'
def get_csv_filename(self):
"""
Returns the filename for the CSV download.
"""
course_key = CourseKey.from_string(self.course_id)
course_id = u'-'.join([course_key.org, course_key.course, course_key.run])
now = timezone.now().replace(microsecond=0)
return u'{0}--{1}--{2}.csv'.format(course_id, now.isoformat(), self.filename_slug)
def finalize_response(self, request, response, *args, **kwargs):
"""
Append Content-Disposition header to CSV requests.
"""
if request.META.get('HTTP_ACCEPT') == u'text/csv':
response['Content-Disposition'] = u'attachment; filename={}'.format(self.get_csv_filename())
return super(CsvViewMixin, self).finalize_response(request, response, *args, **kwargs)
class APIListView(generics.ListAPIView):
"""
An abstract view to store common code for views that return a list of data.
**Example Requests**
GET /api/v0/some_endpoint/
Returns full list of serialized models with all default fields.
GET /api/v0/some_endpoint/?ids={id_1},{id_2}
Returns list of serialized models with IDs that match an ID in the given
`ids` query parameter with all default fields.
GET /api/v0/some_endpoint/?ids={id_1},{id_2}&fields={some_field_1},{some_field_2}
Returns list of serialized models with IDs that match an ID in the given
`ids` query parameter with only the fields in the given `fields` query parameter.
GET /api/v0/some_endpoint/?ids={id_1},{id_2}&exclude={some_field_1},{some_field_2}
Returns list of serialized models with IDs that match an ID in the given
`ids` query parameter with all fields except those in the given `exclude` query
parameter.
POST /api/v0/some_endpoint/
{
"ids": [
"{id_1}",
"{id_2}",
...
"{id_200}"
],
"fields": [
"{some_field_1}",
"{some_field_2}"
]
}
**Response Values**
Since this is an abstract class, this view just returns an empty list.
**Parameters**
This view supports filtering the results by a given list of IDs. It also supports
explicitly specifying the fields to include in each result with `fields` as well of
the fields to exclude with `exclude`.
For GET requests, these parameters are passed in the query string.
For POST requests, these parameters are passed as a JSON dict in the request body.
ids -- The comma-separated list of identifiers for which results are filtered to.
For example, 'edX/DemoX/Demo_Course,course-v1:edX+DemoX+Demo_2016'. Default is to
return all courses.
fields -- The comma-separated fields to return in the response.
For example, 'course_id,created'. Default is to return all fields.
exclude -- The comma-separated fields to exclude in the response.
For example, 'course_id,created'. Default is to not exclude any fields.
**Notes**
* GET is usable when the number of IDs is relatively low
* POST is required when the number of course IDs would cause the URL to be too long.
* POST functions the same as GET here. It does not modify any state.
"""
ids = None
fields = None
exclude = None
always_exclude = []
model_id_field = 'id'
ids_param = 'ids'
def get_serializer(self, *args, **kwargs):
kwargs.update({
'context': self.get_serializer_context(),
'fields': self.fields,
'exclude': self.exclude
})
return self.get_serializer_class()(*args, **kwargs)
def get(self, request, *args, **kwargs):
query_params = self.request.query_params
self.fields = split_query_argument(query_params.get('fields'))
exclude = split_query_argument(query_params.get('exclude'))
self.exclude = self.always_exclude + (exclude if exclude else [])
self.ids = split_query_argument(query_params.get(self.ids_param))
self.verify_ids()
return super(APIListView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
# self.request.data is a QueryDict. For keys with singleton lists as values,
# QueryDicts return the singleton element of the list instead of the list itself,
# which is undesirable. So, we convert to a normal dict.
request_data_dict = dict(request.data)
self.fields = request_data_dict.get('fields')
exclude = request_data_dict.get('exclude')
self.exclude = self.always_exclude + (exclude if exclude else [])
self.ids = request_data_dict.get(self.ids_param)
self.verify_ids()
return super(APIListView, self).get(request, *args, **kwargs)
def verify_ids(self):
"""
Optionally raise an exception if any of the IDs set as self.ids are invalid.
By default, no verification is done.
Subclasses can override this if they wish to perform verification.
"""
pass
def base_field_dict(self, item_id):
"""Default result with fields pre-populated to default values."""
field_dict = {
self.model_id_field: item_id,
}
return field_dict
def update_field_dict_from_model(self, model, base_field_dict=None, field_list=None):
field_list = (field_list if field_list else
[f.name for f in self.model._meta.get_fields()]) # pylint: disable=protected-access
field_dict = base_field_dict if base_field_dict else {}
field_dict.update({field: getattr(model, field) for field in field_list})
return field_dict
def postprocess_field_dict(self, field_dict):
"""Applies some business logic to final result without access to any data from the original model."""
return field_dict
def group_by_id(self, queryset):
"""Return results aggregated by a distinct ID."""
aggregate_field_dict = []
for item_id, model_group in groupby(queryset, lambda x: (getattr(x, self.model_id_field))):
field_dict = self.base_field_dict(item_id)
for model in model_group:
field_dict = self.update_field_dict_from_model(model, base_field_dict=field_dict)
field_dict = self.postprocess_field_dict(field_dict)
aggregate_field_dict.append(field_dict)
return aggregate_field_dict
def get_query(self):
return reduce(lambda q, item_id: q | Q(id=item_id), self.ids, Q())
@raise_404_if_none
def get_queryset(self):
if self.ids:
queryset = self.model.objects.filter(self.get_query())
else:
queryset = self.model.objects.all()
field_dict = self.group_by_id(queryset)
# Django-rest-framework will serialize this dictionary to a JSON response
return field_dict
| agpl-3.0 |
radez/tripleo-image-elements | elements/horizon/os-apply-config/etc/horizon/local_settings.py | 2 | 2751 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from openstack_dashboard import exceptions
DEBUG = False
TEMPLATE_DEBUG = DEBUG
COMPRESS_OFFLINE = True
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
STATIC_ROOT = '/var/www/horizon/static'
ALLOWED_HOSTS = ['*']
with open('/etc/horizon/.secret_key_store', 'r') as f:
SECRET_KEY = f.read()
HORIZON_CONFIG = {
'dashboards': ('project', 'admin', 'settings',),
'default_dashboard': 'project',
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
}
CACHES = {
'default': {
{{#horizon.caches.memcached}}
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': [{{#nodes}}'{{.}}',{{/nodes}}]
{{/horizon.caches.memcached}} # flake8: noqa
{{^horizon.caches.memcached}}
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
{{/horizon.caches.memcached}}
}
}
{{#horizon.caches.memcached}}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
{{/horizon.caches.memcached}}
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
OPENSTACK_HOST = "{{keystone.host}}"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True
}
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': True,
# NOTE: as of Grizzly this is not yet supported in Nova so enabling this
# setting will not do anything useful
'can_encrypt_volumes': False
}
OPENSTACK_NEUTRON_NETWORK = {
'enable_lb': False
}
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
TIME_ZONE = "UTC"
| apache-2.0 |
Ayub-Khan/edx-platform | openedx/core/djangoapps/content/course_overviews/management/commands/tests/test_generate_course_overview.py | 14 | 3080 | # pylint: disable=missing-docstring
from django.core.management.base import CommandError
from mock import patch
from openedx.core.djangoapps.content.course_overviews.management.commands import generate_course_overview
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestGenerateCourseOverview(ModuleStoreTestCase):
"""
Tests course overview management command.
"""
def setUp(self):
"""
Create courses in modulestore.
"""
super(TestGenerateCourseOverview, self).setUp()
self.course_key_1 = CourseFactory.create().id
self.course_key_2 = CourseFactory.create().id
self.command = generate_course_overview.Command()
def _assert_courses_not_in_overview(self, *courses):
"""
Assert that courses doesn't exist in the course overviews.
"""
course_keys = CourseOverview.get_all_course_keys()
for expected_course_key in courses:
self.assertNotIn(expected_course_key, course_keys)
def _assert_courses_in_overview(self, *courses):
"""
Assert courses exists in course overviews.
"""
course_keys = CourseOverview.get_all_course_keys()
for expected_course_key in courses:
self.assertIn(expected_course_key, course_keys)
def test_generate_all(self):
"""
Test that all courses in the modulestore are loaded into course overviews.
"""
# ensure that the newly created courses aren't in course overviews
self._assert_courses_not_in_overview(self.course_key_1, self.course_key_2)
self.command.handle(all=True)
# CourseOverview will be populated with all courses in the modulestore
self._assert_courses_in_overview(self.course_key_1, self.course_key_2)
def test_generate_one(self):
"""
Test that a specified course is loaded into course overviews.
"""
self._assert_courses_not_in_overview(self.course_key_1, self.course_key_2)
self.command.handle(unicode(self.course_key_1), all=False)
self._assert_courses_in_overview(self.course_key_1)
self._assert_courses_not_in_overview(self.course_key_2)
def test_invalid_key(self):
"""
Test that CommandError is raised for invalid key.
"""
with self.assertRaises(CommandError):
self.command.handle('not/found', all=False)
@patch('openedx.core.djangoapps.content.course_overviews.models.log')
def test_not_found_key(self, mock_log):
"""
Test keys not found are logged.
"""
self.command.handle('fake/course/id', all=False)
self.assertTrue(mock_log.exception.called)
def test_no_params(self):
"""
Test exception raised when no parameters are specified.
"""
with self.assertRaises(CommandError):
self.command.handle(all=False)
| agpl-3.0 |
arvinsingla/CouchPotatoServer | libs/apscheduler/jobstores/mongodb_store.py | 132 | 2903 | """
Stores jobs in a MongoDB database.
"""
import logging
from apscheduler.jobstores.base import JobStore
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from bson.binary import Binary
from pymongo.connection import Connection
except ImportError: # pragma: nocover
raise ImportError('MongoDBJobStore requires PyMongo installed')
logger = logging.getLogger(__name__)
class MongoDBJobStore(JobStore):
def __init__(self, database='apscheduler', collection='jobs',
connection=None, pickle_protocol=pickle.HIGHEST_PROTOCOL,
**connect_args):
self.jobs = []
self.pickle_protocol = pickle_protocol
if not database:
raise ValueError('The "database" parameter must not be empty')
if not collection:
raise ValueError('The "collection" parameter must not be empty')
if connection:
self.connection = connection
else:
self.connection = Connection(**connect_args)
self.collection = self.connection[database][collection]
def add_job(self, job):
job_dict = job.__getstate__()
job_dict['trigger'] = Binary(pickle.dumps(job.trigger,
self.pickle_protocol))
job_dict['args'] = Binary(pickle.dumps(job.args,
self.pickle_protocol))
job_dict['kwargs'] = Binary(pickle.dumps(job.kwargs,
self.pickle_protocol))
job.id = self.collection.insert(job_dict)
self.jobs.append(job)
def remove_job(self, job):
self.collection.remove(job.id)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for job_dict in self.collection.find():
try:
job = Job.__new__(Job)
job_dict['id'] = job_dict.pop('_id')
job_dict['trigger'] = pickle.loads(job_dict['trigger'])
job_dict['args'] = pickle.loads(job_dict['args'])
job_dict['kwargs'] = pickle.loads(job_dict['kwargs'])
job.__setstate__(job_dict)
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
spec = {'_id': job.id}
document = {'$set': {'next_run_time': job.next_run_time},
'$inc': {'runs': 1}}
self.collection.update(spec, document)
def close(self):
self.connection.disconnect()
def __repr__(self):
connection = self.collection.database.connection
return '<%s (connection=%s)>' % (self.__class__.__name__, connection)
| gpl-3.0 |
rupran/ansible | lib/ansible/modules/network/omapi_host.py | 19 | 12359 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to configure DHCPd hosts using OMAPI protocol
(c) 2016, Loic Blot <loic.blot@unix-experience.fr>
Sponsored by Infopro Digital. http://www.infopro-digital.com/
Sponsored by E.T.A.I. http://www.etai.fr/
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: omapi_host
short_description: Setup OMAPI hosts.
description:
- Create, update and remove OMAPI hosts into compatible DHCPd servers.
version_added: "2.3"
requirements:
- pypureomapi
author: "Loic Blot (@nerzhul)"
options:
state:
description:
- Create or remove OMAPI host.
required: true
choices: ['present', 'absent']
name:
description:
- Sets the host lease hostname (mandatory if state=present).
default: None
host:
description:
- Sets OMAPI server host to interact with.
default: localhost
port:
description:
- Sets the OMAPI server port to interact with.
default: 7911
key_name:
description:
- Sets the TSIG key name for authenticating against OMAPI server.
required: true
key:
description:
- Sets the TSIG key content for authenticating against OMAPI server.
required: true
macaddr:
description:
- Sets the lease host MAC address.
required: true
ip:
description:
- Sets the lease host IP address.
required: false
default: None
statements:
description:
- Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
required: false
default: []
ddns:
description:
- Enable dynamic DNS updates for this host.
required: false
default: false
'''
EXAMPLES = '''
- name: Remove a host using OMAPI
omapi_host:
key_name: "defomapi"
key: "+bFQtBCta6j2vWkjPkNFtgA=="
host: "10.1.1.1"
macaddr: "00:66:ab:dd:11:44"
state: absent
- name: Add a host using OMAPI
omapi_host:
key_name: "defomapi"
key: "+bFQtBCta6j2vWkjPkNFtgA=="
host: "10.98.4.55"
macaddr: "44:dd:ab:dd:11:44"
name: "server01"
ip: "192.168.88.99"
ddns: yes
statements:
- 'filename "pxelinux.0"'
- 'next-server 1.1.1.1'
state: present
'''
RETURN = '''
changed:
description: If module has modified a host
returned: success
type: string
lease:
description: dictionnary containing host informations
returned: success
type: dictionnary
contains:
ip-address:
description: IP address, if there is.
returned: success
type: string
sample: '192.168.1.5'
hardware-address:
description: MAC address
returned: success
type: string
sample: '00:11:22:33:44:55'
hardware-type:
description: hardware type, generally '1'
returned: success
type: int
sample: 1
name:
description: hostname
returned: success
type: string
sample: 'mydesktop'
'''
# import module snippets
from ansible.module_utils.basic import AnsibleModule, get_exception, to_bytes
from ansible.module_utils.six import iteritems
import socket
import struct
import binascii
try:
from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound
from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac
from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE
pureomapi_found = True
except ImportError:
pureomapi_found = False
class OmapiHostManager:
def __init__(self, module):
self.module = module
self.omapi = None
self.connect()
def connect(self):
try:
self.omapi = Omapi(self.module.params['host'], self.module.params['port'], self.module.params['key_name'],
self.module.params['key'])
except binascii.Error:
self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.")
except OmapiError:
e = get_exception()
self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' "
"are valid. Exception was: %s" % e)
except socket.error:
e = get_exception()
self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % e)
def get_host(self, macaddr):
msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict'))
msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr)))
msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1)))
response = self.omapi.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
return None
return response
@staticmethod
def unpack_facts(obj):
result = dict(obj)
if 'hardware-address' in result:
result['hardware-address'] = unpack_mac(result['hardware-address'])
if 'ip-address' in result:
result['ip-address'] = unpack_ip(result['ip-address'])
if 'hardware-type' in result:
result['hardware-type'] = struct.unpack("!I", result['hardware-type'])
return result
def setup_host(self):
if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0:
self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.")
msg = None
host_response = self.get_host(self.module.params['macaddr'])
# If host was not found using macaddr, add create message
if host_response is None:
msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict'))
msg.message.append(('create', struct.pack('!I', 1)))
msg.message.append(('exclusive', struct.pack('!I', 1)))
msg.obj.append(('hardware-address', pack_mac(self.module.params['macaddr'])))
msg.obj.append(('hardware-type', struct.pack('!I', 1)))
msg.obj.append(('name', self.module.params['hostname']))
if self.module.params['ip'] is not None:
msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip'])))
stmt_join = ""
if self.module.params['ddns']:
stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname'])
try:
if len(self.module.params['statements']) > 0:
stmt_join += "; ".join(self.module.params['statements'])
stmt_join += "; "
except TypeError:
e = get_exception()
self.module.fail_json(msg="Invalid statements found: %s" % e)
if len(stmt_join) > 0:
msg.obj.append(('statements', stmt_join))
try:
response = self.omapi.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters "
"are valid.")
self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj))
except OmapiError:
e = get_exception()
self.module.fail_json(msg="OMAPI error: %s" % e)
# Forge update message
else:
response_obj = self.unpack_facts(host_response.obj)
fields_to_update = {}
if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \
unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
fields_to_update['ip-address'] = pack_ip(self.module.params['ip'])
# Name cannot be changed
if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']:
self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. "
"Please delete host and add new." %
(response_obj['name'], self.module.params['hostname']))
"""
# It seems statements are not returned by OMAPI, then we cannot modify them at this moment.
if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \
response_obj['statements'] != self.module.params['statements']:
with open('/tmp/omapi', 'w') as fb:
for (k,v) in iteritems(response_obj):
fb.writelines('statements: %s %s\n' % (k, v))
"""
if len(fields_to_update) == 0:
self.module.exit_json(changed=False, lease=response_obj)
else:
msg = OmapiMessage.update(host_response.handle)
msg.update_object(fields_to_update)
try:
response = self.omapi.query_server(msg)
if response.opcode != OMAPI_OP_STATUS:
self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters "
"are valid.")
self.module.exit_json(changed=True)
except OmapiError:
e = get_exception()
self.module.fail_json(msg="OMAPI error: %s" % e)
def remove_host(self):
try:
self.omapi.del_host(self.module.params['macaddr'])
self.module.exit_json(changed=True)
except OmapiErrorNotFound:
self.module.exit_json()
except OmapiError:
e = get_exception()
self.module.fail_json(msg="OMAPI error: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, type='str', choices=['present', 'absent']),
host=dict(type='str', default="localhost"),
port=dict(type='int', default=7911),
key_name=dict(required=True, type='str', default=None),
key=dict(required=True, type='str', default=None, no_log=True),
macaddr=dict(required=True, type='str', default=None),
hostname=dict(type='str', default=None, aliases=['name']),
ip=dict(type='str', default=None),
ddns=dict(type='bool', default=False),
statements=dict(type='list', default=[])
),
supports_check_mode=False
)
if not pureomapi_found:
module.fail_json(msg="pypureomapi library is required by this module.")
if module.params['key'] is None or len(module.params["key"]) == 0:
module.fail_json(msg="'key' parameter cannot be empty.")
if module.params['key_name'] is None or len(module.params["key_name"]) == 0:
module.fail_json(msg="'key_name' parameter cannot be empty.")
host_manager = OmapiHostManager(module)
try:
if module.params['state'] == 'present':
host_manager.setup_host()
elif module.params['state'] == 'absent':
host_manager.remove_host()
except ValueError:
e = get_exception()
module.fail_json(msg="OMAPI input value error: %s" % e)
if __name__ == '__main__':
main()
| gpl-3.0 |
vvoZokk/dnn | dnn_project/generate_protos.py | 1 | 4557 | #!/usr/bin/env python
import os
import argparse
import re
from collections import defaultdict
import sys
KNOWN_TYPES = {
"double" : "double",
"int" : "int32",
"size_t" : "uint32",
"float" : "float",
"string" : "string",
"bool" : "bool",
"complex<double>" : "double",
"pair<string, size_t>" : "TStringToUintPair",
"pair<size_t, size_t>" : "TUintToUintPair",
}
VECTOR_RE = re.compile("(?:vector|ActVector)+<(.*)>")
def generateProtos(all_structures, package, dst, imports):
for fname, structures in all_structures.iteritems():
dst_file = fname.split(".")[0] + ".proto"
with open(os.path.join(dst, dst_file), 'w') as f_ptr:
f_ptr.write("package %s;\n" % package)
f_ptr.write("\n")
for imp in imports:
f_ptr.write("import \"{}\";\n".format(imp))
f_ptr.write("\n")
for s in structures:
f_ptr.write("message %s {\n" % s['name'])
i = 1
for f in s['fields']:
if KNOWN_TYPES.get(f[0]) is None:
m = VECTOR_RE.match(f[0])
if m is None:
raise Exception("Can't match {}".format(f[0]))
f_ptr.write(" repeated %s %s = %s;\n" % (KNOWN_TYPES[ m.group(1) ], f[1], str(i)))
if m.group(1).startswith("complex"):
f_ptr.write(" repeated %s %s = %s;\n" % (KNOWN_TYPES[ m.group(1) ], f[1] + "_imag", str(i+1)))
i += 1
else:
f_ptr.write(" required %s %s = %s;\n" % (KNOWN_TYPES[ f[0] ], f[1], str(i)))
i += 1
f_ptr.write("}\n")
f_ptr.write("\n")
def parseSources(src):
structures = defaultdict(list)
for root, dirs, files in os.walk(src):
for f in files:
af = os.path.join(root, f)
generate_proto = False
if af.endswith(".cpp") or af.endswith(".h"):
for l in open(af):
l = l.strip()
l = l.split("//")[0]
if "@GENERATE_PROTO@" in l:
generate_proto = True
struct = {}
curly_counter = 0
continue
if generate_proto:
curly_counter += l.count("{")
curly_counter -= l.count("}")
if len(struct) == 0:
m = re.match("[\W]*(?:class|struct)[\W]+([^ ]+)", l)
if not m:
raise Exception("Can't parse GENERATE_PROTO class or struct")
struct['name'] = m.group(1)
struct['fields'] = []
else:
m = re.match(
"(%s)[\W]+(?!__)([^ ]*);[\W]*$" % "|".join(
KNOWN_TYPES.keys() + [ "(?:vector|ActVector)+<{}>".format(t) for t in KNOWN_TYPES.keys() ]
),
l
)
if m and curly_counter == 1:
struct['fields'].append( (m.group(1), m.group(2)) )
continue
if len(struct) > 0 and curly_counter == 0:
generate_proto = False
structures[f].append(struct)
return structures
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source-path", help="Path to the sources",
type=str, required=True)
parser.add_argument("-d", "--dest-path", help="Path where to store .proto",
type=str, required=True)
parser.add_argument("-p", "--package", help="Package name, default : %(default)s",
type=str, required=False, default="Protos")
parser.add_argument("-i", "--imports", help="Put imports to all messages (separated by ;)",
type=str, required=False, default=None)
args = parser.parse_args()
structures = parseSources(args.source_path)
imports = []
if args.imports:
imports = [ v.strip() for v in args.imports.split(";") if v.strip() ]
generateProtos(structures, args.package, args.dest_path, imports)
| mit |
akshaya9/fosswebsite | achievements/migrations/0001_initial.py | 8 | 5349 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-26 12:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField(blank=True)),
('magazine', models.CharField(max_length=200)),
('date', models.DateField()),
('area', models.CharField(blank=True, max_length=300)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date',),
},
),
migrations.CreateModel(
name='Contest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contest_id', models.CharField(max_length=200)),
('title', models.CharField(max_length=200)),
('url', models.URLField(blank=True)),
('problems_solved', models.IntegerField()),
('ranking', models.BigIntegerField()),
('date', models.DateField()),
('description', models.TextField(null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date',),
},
),
migrations.CreateModel(
name='Contribution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('contribution_id', models.CharField(max_length=200)),
('organisation', models.CharField(max_length=200)),
('url', models.URLField()),
('description', models.TextField(blank=True)),
('date', models.DateField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date',),
},
),
migrations.CreateModel(
name='Gsoc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('organization', models.CharField(max_length=200)),
('title', models.CharField(max_length=250)),
('mentors', models.CharField(max_length=300)),
('url', models.URLField(max_length=400)),
('description', models.TextField(blank=True)),
('date', models.DateField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date',),
},
),
migrations.CreateModel(
name='Intern',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('organisation', models.CharField(max_length=300)),
('title', models.CharField(max_length=300)),
('location', models.CharField(max_length=200)),
('type', models.CharField(choices=[('internship', 'Internship'), ('masters', 'Masters'), ('exchange_student', 'Exchange programme')], max_length=100)),
('date', models.DateField()),
('description', models.TextField(blank=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date',),
},
),
migrations.CreateModel(
name='Speaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('type', models.CharField(choices=[('talk', ' Talk'), ('demo', 'Demo'), ('workshop', 'Workshop'), ('paper', 'Paper Presentation'), ('other', 'Other')], max_length=100)),
('conference_name', models.CharField(max_length=200)),
('location', models.CharField(max_length=300)),
('url', models.URLField(blank=True)),
('date', models.DateField()),
('description', models.TextField(blank=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date',),
},
),
]
| mit |
yasserglez/pytiger2c | packages/pytiger2c/dot.py | 1 | 2786 | # -*- coding: utf-8 -*-
"""
Clases utilizadas en la generación de un archivo Graphviz DOT con el
árbol de sintáxis abstracta creado a partir de un programa Tiger.
"""
class DotGenerator(object):
"""
Clase utilizada para la generación de grafos en formato Graphviz DOT.
"""
def __init__(self):
"""
Esta clase es utilizada en la generación de código Graphivz DOT
a partir de un árbol de sintáxis abstracta de un programa Tiger.
"""
self._nodes = []
self._edges = []
self._num_nodes = 0
def add_node(self, label):
"""
Añade un nuevo nodo al grafo actualmente en creación.
@type label: C{str}
@param label: Nombre del nodo que se quiere añadir.
@rtype: C{str}
@return: Identificador del nuevo nodo añadido. Este identificador
puede ser utilizado para crear nuevas aristas, utilizando
el método C{add_edge} de esta misma clase, que tengan
este nodo como uno de los extremos.
"""
self._num_nodes += 1
name = 'node{number}'.format(number=self._num_nodes)
code = '{name} [label="{label}"];'.format(name=name, label=label)
self._nodes.append(code)
return name
def add_edge(self, from_node, to_node):
"""
Añade una arista no dirigida al grafo actualmente en creación.
@type from_node: C{str}
@param from_node: Cadena de caracteres que identifica un nodo
extremo de la arista.
@type to_node: C{str}
@param to_node: Cadena de caracteres que identifica un nodo
extremo de la arista.
"""
template = '{from_node} -- {to_node};'
code = template.format(from_node=from_node, to_node=to_node)
self._edges.append(code)
def write(self, output_fd):
"""
Escribe el código Graphviz DOT en un descriptor de fichero.
@type output_fd: C{file}
@param output_fd: Descriptor de fichero donde se debe escribir el
código Graphviz DOT resultante de la traducción del programa
Tiger descrito por el árbol de sintáxis abstracta.
"""
indent = ' ' * 4
output_fd.write('graph AST {\n')
output_fd.write(indent)
output_fd.write('node [shape=record];\n\n')
for node_code in self._nodes:
output_fd.write(indent)
output_fd.write(node_code)
output_fd.write('\n')
output_fd.write('\n')
for edge_code in self._edges:
output_fd.write(indent)
output_fd.write(edge_code)
output_fd.write('\n')
output_fd.write('}\n')
| mit |
AdaptiveApplications/carnegie | tarc_bus_locator_client/quantities-0.10.1/build/lib/quantities/uncertainquantity.py | 2 | 7392 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
import sys
import numpy as np
from . import markup
from .quantity import Quantity, scale_other_units
from .registry import unit_registry
from .decorators import with_doc
class UncertainQuantity(Quantity):
# TODO: what is an appropriate value?
__array_priority__ = 22
def __new__(cls, data, units='', uncertainty=None, dtype='d', copy=True):
ret = Quantity.__new__(cls, data, units, dtype, copy)
# _uncertainty initialized to be dimensionless by __array_finalize__:
ret._uncertainty._dimensionality = ret._dimensionality
if uncertainty is not None:
ret.uncertainty = Quantity(uncertainty, ret._dimensionality)
elif isinstance(data, UncertainQuantity):
if copy or ret._dimensionality != uncertainty._dimensionality:
uncertainty = data.uncertainty.rescale(ret.units)
ret.uncertainty = uncertainty
return ret
@Quantity.units.setter
def units(self, units):
super(UncertainQuantity, self)._set_units(units)
self.uncertainty.units = self._dimensionality
@property
def _reference(self):
ret = super(UncertainQuantity, self)._reference.view(UncertainQuantity)
ret.uncertainty = self.uncertainty._reference
return ret
@property
def simplified(self):
ret = super(UncertainQuantity, self).simplified.view(UncertainQuantity)
ret.uncertainty = self.uncertainty.simplified
return ret
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, uncertainty):
if not isinstance(uncertainty, Quantity):
uncertainty = Quantity(uncertainty, copy=False)
try:
assert self.shape == uncertainty.shape
except AssertionError:
raise ValueError('data and uncertainty must have identical shape')
if uncertainty._dimensionality != self._dimensionality:
uncertainty = uncertainty.rescale(self._dimensionality)
self._uncertainty = uncertainty
@property
def relative_uncertainty(self):
return self.uncertainty.magnitude/self.magnitude
@with_doc(Quantity.rescale, use_header=False)
def rescale(self, units):
cls = UncertainQuantity
ret = super(cls, self).rescale(units).view(cls)
ret.uncertainty = self.uncertainty.rescale(units)
return ret
def __array_finalize__(self, obj):
Quantity.__array_finalize__(self, obj)
self._uncertainty = getattr(
obj,
'uncertainty',
Quantity(
np.zeros(self.shape, self.dtype),
self._dimensionality,
copy=False
)
)
@with_doc(Quantity.__add__, use_header=False)
@scale_other_units
def __add__(self, other):
res = super(UncertainQuantity, self).__add__(other)
u = (self.uncertainty**2+other.uncertainty**2)**0.5
return UncertainQuantity(res, uncertainty=u, copy=False)
@with_doc(Quantity.__radd__, use_header=False)
@scale_other_units
def __radd__(self, other):
return self.__add__(other)
@with_doc(Quantity.__sub__, use_header=False)
@scale_other_units
def __sub__(self, other):
res = super(UncertainQuantity, self).__sub__(other)
u = (self.uncertainty**2+other.uncertainty**2)**0.5
return UncertainQuantity(res, uncertainty=u, copy=False)
@with_doc(Quantity.__rsub__, use_header=False)
@scale_other_units
def __rsub__(self, other):
if not isinstance(other, UncertainQuantity):
other = UncertainQuantity(other, copy=False)
return UncertainQuantity.__sub__(other, self)
@with_doc(Quantity.__mul__, use_header=False)
def __mul__(self, other):
res = super(UncertainQuantity, self).__mul__(other)
try:
sru = self.relative_uncertainty
oru = other.relative_uncertainty
ru = (sru**2+oru**2)**0.5
u = res.view(Quantity) * ru
except AttributeError:
other = np.array(other, copy=False, subok=True)
u = (self.uncertainty**2*other**2)**0.5
res._uncertainty = u
return res
@with_doc(Quantity.__rmul__, use_header=False)
def __rmul__(self, other):
return self.__mul__(other)
@with_doc(Quantity.__truediv__, use_header=False)
def __truediv__(self, other):
res = super(UncertainQuantity, self).__truediv__(other)
try:
sru = self.relative_uncertainty
oru = other.relative_uncertainty
ru = (sru**2+oru**2)**0.5
u = res.view(Quantity) * ru
except AttributeError:
other = np.array(other, copy=False, subok=True)
u = (self.uncertainty**2/other**2)**0.5
res._uncertainty = u
return res
@with_doc(Quantity.__rtruediv__, use_header=False)
def __rtruediv__(self, other):
temp = UncertainQuantity(
1/self.magnitude, self.dimensionality**-1,
self.relative_uncertainty/self.magnitude, copy=False
)
return other * temp
if sys.version_info[0] < 3:
__div__ = __truediv__
__rdiv__ = __rtruediv__
@with_doc(Quantity.__pow__, use_header=False)
def __pow__(self, other):
res = super(UncertainQuantity, self).__pow__(other)
res.uncertainty = res.view(Quantity) * other * self.relative_uncertainty
return res
@with_doc(Quantity.__getitem__, use_header=False)
def __getitem__(self, key):
return UncertainQuantity(
self.magnitude[key],
self._dimensionality,
self.uncertainty[key],
copy=False
)
@with_doc(Quantity.__repr__, use_header=False)
def __repr__(self):
return '%s(%s, %s, %s)'%(
self.__class__.__name__,
repr(self.magnitude),
self.dimensionality.string,
repr(self.uncertainty.magnitude)
)
@with_doc(Quantity.__str__, use_header=False)
def __str__(self):
if markup.config.use_unicode:
dims = self.dimensionality.unicode
else:
dims = self.dimensionality.string
s = '%s %s\n+/-%s (1 sigma)'%(
str(self.magnitude),
dims,
str(self.uncertainty)
)
if markup.config.use_unicode:
return s.replace('+/-', '±').replace(' sigma', 'σ')
return s
@with_doc(np.ndarray.sum)
def sum(self, axis=None, dtype=None, out=None):
return UncertainQuantity(
self.magnitude.sum(axis, dtype, out),
self.dimensionality,
(np.sum(self.uncertainty.magnitude**2, axis))**0.5,
copy=False
)
def __getstate__(self):
"""
Return the internal state of the quantity, for pickling
purposes.
"""
state = list(super(UncertainQuantity, self).__getstate__())
state.append(self._uncertainty)
return tuple(state)
def __setstate__(self, state):
(ver, shp, typ, isf, raw, units, sigma) = state
np.ndarray.__setstate__(self, (shp, typ, isf, raw))
self._dimensionality = units
self._uncertainty = sigma
| mit |
kaiweifan/neutron | neutron/plugins/brocade/db/models.py | 24 | 4645 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Shiv Haris (sharis@brocade.com)
# Varma Bhupatiraju (vbhupati@#brocade.com)
"""Brocade specific database schema/model."""
import sqlalchemy as sa
from neutron.db import model_base
from neutron.db import models_v2
class BrocadeNetwork(model_base.BASEV2, models_v2.HasId):
"""Schema for brocade network."""
vlan = sa.Column(sa.String(10))
class BrocadePort(model_base.BASEV2):
"""Schema for brocade port."""
port_id = sa.Column(sa.String(36), primary_key=True, default="")
network_id = sa.Column(sa.String(36),
sa.ForeignKey("brocadenetworks.id"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean, nullable=False)
physical_interface = sa.Column(sa.String(36))
vlan_id = sa.Column(sa.String(36))
tenant_id = sa.Column(sa.String(36))
def create_network(context, net_id, vlan):
"""Create a brocade specific network/port-profiles."""
session = context.session
with session.begin(subtransactions=True):
net = BrocadeNetwork(id=net_id, vlan=vlan)
session.add(net)
return net
def delete_network(context, net_id):
"""Delete a brocade specific network/port-profiles."""
session = context.session
with session.begin(subtransactions=True):
net = (session.query(BrocadeNetwork).filter_by(id=net_id).first())
if net is not None:
session.delete(net)
def get_network(context, net_id, fields=None):
"""Get brocade specific network, with vlan extension."""
session = context.session
return (session.query(BrocadeNetwork).filter_by(id=net_id).first())
def get_networks(context, filters=None, fields=None):
"""Get all brocade specific networks."""
session = context.session
try:
nets = session.query(BrocadeNetwork).all()
return nets
except sa.exc.SQLAlchemyError:
return None
def create_port(context, port_id, network_id, physical_interface,
vlan_id, tenant_id, admin_state_up):
"""Create a brocade specific port, has policy like vlan."""
# port_id is truncated: since the linux-bridge tap device names are
# based on truncated port id, this enables port lookups using
# tap devices
port_id = port_id[0:11]
session = context.session
with session.begin(subtransactions=True):
port = BrocadePort(port_id=port_id,
network_id=network_id,
physical_interface=physical_interface,
vlan_id=vlan_id,
admin_state_up=admin_state_up,
tenant_id=tenant_id)
session.add(port)
return port
def get_port(context, port_id):
"""get a brocade specific port."""
port_id = port_id[0:11]
session = context.session
port = (session.query(BrocadePort).filter_by(port_id=port_id).first())
return port
def get_ports(context, network_id=None):
"""get a brocade specific port."""
session = context.session
ports = (session.query(BrocadePort).filter_by(network_id=network_id).all())
return ports
def delete_port(context, port_id):
"""delete brocade specific port."""
port_id = port_id[0:11]
session = context.session
with session.begin(subtransactions=True):
port = (session.query(BrocadePort).filter_by(port_id=port_id).first())
if port is not None:
session.delete(port)
def get_port_from_device(session, port_id):
"""get port from the tap device."""
# device is same as truncated port_id
port = (session.query(BrocadePort).filter_by(port_id=port_id).first())
return port
def update_port_state(context, port_id, admin_state_up):
"""Update port attributes."""
port_id = port_id[0:11]
session = context.session
session.query(BrocadePort).filter_by(
port_id=port_id).update({'admin_state_up': admin_state_up})
| apache-2.0 |
bitcommoditiz/P2Pool | wstools/Utility.py | 292 | 50865 | # Copyright (c) 2003, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of
# any required approvals from the U.S. Dept. of Energy). All rights
# reserved.
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id$"
import sys, types, httplib, urllib, socket, weakref
from os.path import isfile
from string import join, strip, split
from UserDict import UserDict
from cStringIO import StringIO
from TimeoutSocket import TimeoutSocket, TimeoutError
from urlparse import urlparse
from httplib import HTTPConnection, HTTPSConnection
from exceptions import Exception
try:
from ZSI import _get_idstr
except:
def _get_idstr(pyobj):
'''Python 2.3.x generates a FutureWarning for negative IDs, so
we use a different prefix character to ensure uniqueness, and
call abs() to avoid the warning.'''
x = id(pyobj)
if x < 0:
return 'x%x' % abs(x)
return 'o%x' % x
import xml.dom.minidom
from xml.dom import Node
import logging
from c14n import Canonicalize
from Namespaces import SCHEMA, SOAP, XMLNS, ZSI_SCHEMA_URI
try:
from xml.dom.ext import SplitQName
except:
def SplitQName(qname):
'''SplitQName(qname) -> (string, string)
Split Qualified Name into a tuple of len 2, consisting
of the prefix and the local name.
(prefix, localName)
Special Cases:
xmlns -- (localName, 'xmlns')
None -- (None, localName)
'''
l = qname.split(':')
if len(l) == 1:
l.insert(0, None)
elif len(l) == 2:
if l[0] == 'xmlns':
l.reverse()
else:
return
return tuple(l)
#
# python2.3 urllib.basejoin does not remove current directory ./
# from path and this causes problems on subsequent basejoins.
#
basejoin = urllib.basejoin
if sys.version_info[0:2] < (2, 4, 0, 'final', 0)[0:2]:
#basejoin = lambda base,url: urllib.basejoin(base,url.lstrip('./'))
token = './'
def basejoin(base, url):
if url.startswith(token) is True:
return urllib.basejoin(base,url[2:])
return urllib.basejoin(base,url)
class NamespaceError(Exception):
"""Used to indicate a Namespace Error."""
class RecursionError(Exception):
"""Used to indicate a HTTP redirect recursion."""
class ParseError(Exception):
"""Used to indicate a XML parsing error."""
class DOMException(Exception):
"""Used to indicate a problem processing DOM."""
class Base:
"""Base class for instance level Logging"""
def __init__(self, module=__name__):
self.logger = logging.getLogger('%s-%s(%s)' %(module, self.__class__, _get_idstr(self)))
class HTTPResponse:
"""Captures the information in an HTTP response message."""
def __init__(self, response):
self.status = response.status
self.reason = response.reason
self.headers = response.msg
self.body = response.read() or None
response.close()
class TimeoutHTTP(HTTPConnection):
"""A custom http connection object that supports socket timeout."""
def __init__(self, host, port=None, timeout=20):
HTTPConnection.__init__(self, host, port)
self.timeout = timeout
def connect(self):
self.sock = TimeoutSocket(self.timeout)
self.sock.connect((self.host, self.port))
class TimeoutHTTPS(HTTPSConnection):
"""A custom https object that supports socket timeout. Note that this
is not really complete. The builtin SSL support in the Python socket
module requires a real socket (type) to be passed in to be hooked to
SSL. That means our fake socket won't work and our timeout hacks are
bypassed for send and recv calls. Since our hack _is_ in place at
connect() time, it should at least provide some timeout protection."""
def __init__(self, host, port=None, timeout=20, **kwargs):
HTTPSConnection.__init__(self, str(host), port, **kwargs)
self.timeout = timeout
def connect(self):
sock = TimeoutSocket(self.timeout)
sock.connect((self.host, self.port))
realsock = getattr(sock.sock, '_sock', sock.sock)
ssl = socket.ssl(realsock, self.key_file, self.cert_file)
self.sock = httplib.FakeSocket(sock, ssl)
def urlopen(url, timeout=20, redirects=None):
"""A minimal urlopen replacement hack that supports timeouts for http.
Note that this supports GET only."""
scheme, host, path, params, query, frag = urlparse(url)
if not scheme in ('http', 'https'):
return urllib.urlopen(url)
if params: path = '%s;%s' % (path, params)
if query: path = '%s?%s' % (path, query)
if frag: path = '%s#%s' % (path, frag)
if scheme == 'https':
# If ssl is not compiled into Python, you will not get an exception
# until a conn.endheaders() call. We need to know sooner, so use
# getattr.
try:
import M2Crypto
except ImportError:
if not hasattr(socket, 'ssl'):
raise RuntimeError, 'no built-in SSL Support'
conn = TimeoutHTTPS(host, None, timeout)
else:
ctx = M2Crypto.SSL.Context()
ctx.set_session_timeout(timeout)
conn = M2Crypto.httpslib.HTTPSConnection(host, ssl_context=ctx)
conn.set_debuglevel(1)
else:
conn = TimeoutHTTP(host, None, timeout)
conn.putrequest('GET', path)
conn.putheader('Connection', 'close')
conn.endheaders()
response = None
while 1:
response = conn.getresponse()
if response.status != 100:
break
conn._HTTPConnection__state = httplib._CS_REQ_SENT
conn._HTTPConnection__response = None
status = response.status
# If we get an HTTP redirect, we will follow it automatically.
if status >= 300 and status < 400:
location = response.msg.getheader('location')
if location is not None:
response.close()
if redirects is not None and redirects.has_key(location):
raise RecursionError(
'Circular HTTP redirection detected.'
)
if redirects is None:
redirects = {}
redirects[location] = 1
return urlopen(location, timeout, redirects)
raise HTTPResponse(response)
if not (status >= 200 and status < 300):
raise HTTPResponse(response)
body = StringIO(response.read())
response.close()
return body
class DOM:
"""The DOM singleton defines a number of XML related constants and
provides a number of utility methods for DOM related tasks. It
also provides some basic abstractions so that the rest of the
package need not care about actual DOM implementation in use."""
# Namespace stuff related to the SOAP specification.
NS_SOAP_ENV_1_1 = 'http://schemas.xmlsoap.org/soap/envelope/'
NS_SOAP_ENC_1_1 = 'http://schemas.xmlsoap.org/soap/encoding/'
NS_SOAP_ENV_1_2 = 'http://www.w3.org/2001/06/soap-envelope'
NS_SOAP_ENC_1_2 = 'http://www.w3.org/2001/06/soap-encoding'
NS_SOAP_ENV_ALL = (NS_SOAP_ENV_1_1, NS_SOAP_ENV_1_2)
NS_SOAP_ENC_ALL = (NS_SOAP_ENC_1_1, NS_SOAP_ENC_1_2)
NS_SOAP_ENV = NS_SOAP_ENV_1_1
NS_SOAP_ENC = NS_SOAP_ENC_1_1
_soap_uri_mapping = {
NS_SOAP_ENV_1_1 : '1.1',
NS_SOAP_ENV_1_2 : '1.2',
}
SOAP_ACTOR_NEXT_1_1 = 'http://schemas.xmlsoap.org/soap/actor/next'
SOAP_ACTOR_NEXT_1_2 = 'http://www.w3.org/2001/06/soap-envelope/actor/next'
SOAP_ACTOR_NEXT_ALL = (SOAP_ACTOR_NEXT_1_1, SOAP_ACTOR_NEXT_1_2)
def SOAPUriToVersion(self, uri):
"""Return the SOAP version related to an envelope uri."""
value = self._soap_uri_mapping.get(uri)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP envelope uri: %s' % uri
)
def GetSOAPEnvUri(self, version):
"""Return the appropriate SOAP envelope uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'NS_SOAP_ENV_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
def GetSOAPEncUri(self, version):
"""Return the appropriate SOAP encoding uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'NS_SOAP_ENC_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
def GetSOAPActorNextUri(self, version):
"""Return the right special next-actor uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'SOAP_ACTOR_NEXT_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
# Namespace stuff related to XML Schema.
NS_XSD_99 = 'http://www.w3.org/1999/XMLSchema'
NS_XSI_99 = 'http://www.w3.org/1999/XMLSchema-instance'
NS_XSD_00 = 'http://www.w3.org/2000/10/XMLSchema'
NS_XSI_00 = 'http://www.w3.org/2000/10/XMLSchema-instance'
NS_XSD_01 = 'http://www.w3.org/2001/XMLSchema'
NS_XSI_01 = 'http://www.w3.org/2001/XMLSchema-instance'
NS_XSD_ALL = (NS_XSD_99, NS_XSD_00, NS_XSD_01)
NS_XSI_ALL = (NS_XSI_99, NS_XSI_00, NS_XSI_01)
NS_XSD = NS_XSD_01
NS_XSI = NS_XSI_01
_xsd_uri_mapping = {
NS_XSD_99 : NS_XSI_99,
NS_XSD_00 : NS_XSI_00,
NS_XSD_01 : NS_XSI_01,
}
for key, value in _xsd_uri_mapping.items():
_xsd_uri_mapping[value] = key
def InstanceUriForSchemaUri(self, uri):
"""Return the appropriate matching XML Schema instance uri for
the given XML Schema namespace uri."""
return self._xsd_uri_mapping.get(uri)
def SchemaUriForInstanceUri(self, uri):
"""Return the appropriate matching XML Schema namespace uri for
the given XML Schema instance namespace uri."""
return self._xsd_uri_mapping.get(uri)
# Namespace stuff related to WSDL.
NS_WSDL_1_1 = 'http://schemas.xmlsoap.org/wsdl/'
NS_WSDL_ALL = (NS_WSDL_1_1,)
NS_WSDL = NS_WSDL_1_1
NS_SOAP_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/soap/'
NS_HTTP_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/http/'
NS_MIME_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/mime/'
NS_SOAP_BINDING_ALL = (NS_SOAP_BINDING_1_1,)
NS_HTTP_BINDING_ALL = (NS_HTTP_BINDING_1_1,)
NS_MIME_BINDING_ALL = (NS_MIME_BINDING_1_1,)
NS_SOAP_BINDING = NS_SOAP_BINDING_1_1
NS_HTTP_BINDING = NS_HTTP_BINDING_1_1
NS_MIME_BINDING = NS_MIME_BINDING_1_1
NS_SOAP_HTTP_1_1 = 'http://schemas.xmlsoap.org/soap/http'
NS_SOAP_HTTP_ALL = (NS_SOAP_HTTP_1_1,)
NS_SOAP_HTTP = NS_SOAP_HTTP_1_1
_wsdl_uri_mapping = {
NS_WSDL_1_1 : '1.1',
}
def WSDLUriToVersion(self, uri):
"""Return the WSDL version related to a WSDL namespace uri."""
value = self._wsdl_uri_mapping.get(uri)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP envelope uri: %s' % uri
)
def GetWSDLUri(self, version):
attr = 'NS_WSDL_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLSoapBindingUri(self, version):
attr = 'NS_SOAP_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLHttpBindingUri(self, version):
attr = 'NS_HTTP_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLMimeBindingUri(self, version):
attr = 'NS_MIME_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLHttpTransportUri(self, version):
attr = 'NS_SOAP_HTTP_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
# Other xml namespace constants.
NS_XMLNS = 'http://www.w3.org/2000/xmlns/'
def isElement(self, node, name, nsuri=None):
"""Return true if the given node is an element with the given
name and optional namespace uri."""
if node.nodeType != node.ELEMENT_NODE:
return 0
return node.localName == name and \
(nsuri is None or self.nsUriMatch(node.namespaceURI, nsuri))
def getElement(self, node, name, nsuri=None, default=join):
"""Return the first child of node with a matching name and
namespace uri, or the default if one is provided."""
nsmatch = self.nsUriMatch
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if ((child.localName == name or name is None) and
(nsuri is None or nsmatch(child.namespaceURI, nsuri))
):
return child
if default is not join:
return default
raise KeyError, name
def getElementById(self, node, id, default=join):
"""Return the first child of node matching an id reference."""
attrget = self.getAttr
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if attrget(child, 'id') == id:
return child
if default is not join:
return default
raise KeyError, name
def getMappingById(self, document, depth=None, element=None,
mapping=None, level=1):
"""Create an id -> element mapping of those elements within a
document that define an id attribute. The depth of the search
may be controlled by using the (1-based) depth argument."""
if document is not None:
element = document.documentElement
mapping = {}
attr = element._attrs.get('id', None)
if attr is not None:
mapping[attr.value] = element
if depth is None or depth > level:
level = level + 1
ELEMENT_NODE = element.ELEMENT_NODE
for child in element.childNodes:
if child.nodeType == ELEMENT_NODE:
self.getMappingById(None, depth, child, mapping, level)
return mapping
def getElements(self, node, name, nsuri=None):
"""Return a sequence of the child elements of the given node that
match the given name and optional namespace uri."""
nsmatch = self.nsUriMatch
result = []
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if ((child.localName == name or name is None) and (
(nsuri is None) or nsmatch(child.namespaceURI, nsuri))):
result.append(child)
return result
def hasAttr(self, node, name, nsuri=None):
"""Return true if element has attribute with the given name and
optional nsuri. If nsuri is not specified, returns true if an
attribute exists with the given name with any namespace."""
if nsuri is None:
if node.hasAttribute(name):
return True
return False
return node.hasAttributeNS(nsuri, name)
def getAttr(self, node, name, nsuri=None, default=join):
"""Return the value of the attribute named 'name' with the
optional nsuri, or the default if one is specified. If
nsuri is not specified, an attribute that matches the
given name will be returned regardless of namespace."""
if nsuri is None:
result = node._attrs.get(name, None)
if result is None:
for item in node._attrsNS.keys():
if item[1] == name:
result = node._attrsNS[item]
break
else:
result = node._attrsNS.get((nsuri, name), None)
if result is not None:
return result.value
if default is not join:
return default
return ''
def getAttrs(self, node):
"""Return a Collection of all attributes
"""
attrs = {}
for k,v in node._attrs.items():
attrs[k] = v.value
return attrs
def getElementText(self, node, preserve_ws=None):
"""Return the text value of an xml element node. Leading and trailing
whitespace is stripped from the value unless the preserve_ws flag
is passed with a true value."""
result = []
for child in node.childNodes:
nodetype = child.nodeType
if nodetype == child.TEXT_NODE or \
nodetype == child.CDATA_SECTION_NODE:
result.append(child.nodeValue)
value = join(result, '')
if preserve_ws is None:
value = strip(value)
return value
def findNamespaceURI(self, prefix, node):
"""Find a namespace uri given a prefix and a context node."""
attrkey = (self.NS_XMLNS, prefix)
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node is None:
raise DOMException('Value for prefix %s not found.' % prefix)
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = node._attrsNS.get(attrkey, None)
if result is not None:
return result.value
if hasattr(node, '__imported__'):
raise DOMException('Value for prefix %s not found.' % prefix)
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Value for prefix %s not found.' % prefix)
def findDefaultNS(self, node):
"""Return the current default namespace uri for the given node."""
attrkey = (self.NS_XMLNS, 'xmlns')
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = node._attrsNS.get(attrkey, None)
if result is not None:
return result.value
if hasattr(node, '__imported__'):
raise DOMException('Cannot determine default namespace.')
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Cannot determine default namespace.')
def findTargetNS(self, node):
"""Return the defined target namespace uri for the given node."""
attrget = self.getAttr
attrkey = (self.NS_XMLNS, 'xmlns')
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = attrget(node, 'targetNamespace', default=None)
if result is not None:
return result
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Cannot determine target namespace.')
def getTypeRef(self, element):
"""Return (namespaceURI, name) for a type attribue of the given
element, or None if the element does not have a type attribute."""
typeattr = self.getAttr(element, 'type', default=None)
if typeattr is None:
return None
parts = typeattr.split(':', 1)
if len(parts) == 2:
nsuri = self.findNamespaceURI(parts[0], element)
else:
nsuri = self.findDefaultNS(element)
return (nsuri, parts[1])
def importNode(self, document, node, deep=0):
"""Implements (well enough for our purposes) DOM node import."""
nodetype = node.nodeType
if nodetype in (node.DOCUMENT_NODE, node.DOCUMENT_TYPE_NODE):
raise DOMException('Illegal node type for importNode')
if nodetype == node.ENTITY_REFERENCE_NODE:
deep = 0
clone = node.cloneNode(deep)
self._setOwnerDoc(document, clone)
clone.__imported__ = 1
return clone
def _setOwnerDoc(self, document, node):
node.ownerDocument = document
for child in node.childNodes:
self._setOwnerDoc(document, child)
def nsUriMatch(self, value, wanted, strict=0, tt=type(())):
"""Return a true value if two namespace uri values match."""
if value == wanted or (type(wanted) is tt) and value in wanted:
return 1
if not strict and value is not None:
wanted = type(wanted) is tt and wanted or (wanted,)
value = value[-1:] != '/' and value or value[:-1]
for item in wanted:
if item == value or item[:-1] == value:
return 1
return 0
def createDocument(self, nsuri, qname, doctype=None):
"""Create a new writable DOM document object."""
impl = xml.dom.minidom.getDOMImplementation()
return impl.createDocument(nsuri, qname, doctype)
def loadDocument(self, data):
"""Load an xml file from a file-like object and return a DOM
document instance."""
return xml.dom.minidom.parse(data)
def loadFromURL(self, url):
"""Load an xml file from a URL and return a DOM document."""
if isfile(url) is True:
file = open(url, 'r')
else:
file = urlopen(url)
try:
result = self.loadDocument(file)
except Exception, ex:
file.close()
raise ParseError(('Failed to load document %s' %url,) + ex.args)
else:
file.close()
return result
DOM = DOM()
class MessageInterface:
'''Higher Level Interface, delegates to DOM singleton, must
be subclassed and implement all methods that throw NotImplementedError.
'''
def __init__(self, sw):
'''Constructor, May be extended, do not override.
sw -- soapWriter instance
'''
self.sw = None
if type(sw) != weakref.ReferenceType and sw is not None:
self.sw = weakref.ref(sw)
else:
self.sw = sw
def AddCallback(self, func, *arglist):
self.sw().AddCallback(func, *arglist)
def Known(self, obj):
return self.sw().Known(obj)
def Forget(self, obj):
return self.sw().Forget(obj)
def canonicalize(self):
'''canonicalize the underlying DOM, and return as string.
'''
raise NotImplementedError, ''
def createDocument(self, namespaceURI=SOAP.ENV, localName='Envelope'):
'''create Document
'''
raise NotImplementedError, ''
def createAppendElement(self, namespaceURI, localName):
'''create and append element(namespaceURI,localName), and return
the node.
'''
raise NotImplementedError, ''
def findNamespaceURI(self, qualifiedName):
raise NotImplementedError, ''
def resolvePrefix(self, prefix):
raise NotImplementedError, ''
def setAttributeNS(self, namespaceURI, localName, value):
'''set attribute (namespaceURI, localName)=value
'''
raise NotImplementedError, ''
def setAttributeType(self, namespaceURI, localName):
'''set attribute xsi:type=(namespaceURI, localName)
'''
raise NotImplementedError, ''
def setNamespaceAttribute(self, namespaceURI, prefix):
'''set namespace attribute xmlns:prefix=namespaceURI
'''
raise NotImplementedError, ''
class ElementProxy(Base, MessageInterface):
'''
'''
_soap_env_prefix = 'SOAP-ENV'
_soap_enc_prefix = 'SOAP-ENC'
_zsi_prefix = 'ZSI'
_xsd_prefix = 'xsd'
_xsi_prefix = 'xsi'
_xml_prefix = 'xml'
_xmlns_prefix = 'xmlns'
_soap_env_nsuri = SOAP.ENV
_soap_enc_nsuri = SOAP.ENC
_zsi_nsuri = ZSI_SCHEMA_URI
_xsd_nsuri = SCHEMA.XSD3
_xsi_nsuri = SCHEMA.XSI3
_xml_nsuri = XMLNS.XML
_xmlns_nsuri = XMLNS.BASE
standard_ns = {\
_xml_prefix:_xml_nsuri,
_xmlns_prefix:_xmlns_nsuri
}
reserved_ns = {\
_soap_env_prefix:_soap_env_nsuri,
_soap_enc_prefix:_soap_enc_nsuri,
_zsi_prefix:_zsi_nsuri,
_xsd_prefix:_xsd_nsuri,
_xsi_prefix:_xsi_nsuri,
}
name = None
namespaceURI = None
def __init__(self, sw, message=None):
'''Initialize.
sw -- SoapWriter
'''
self._indx = 0
MessageInterface.__init__(self, sw)
Base.__init__(self)
self._dom = DOM
self.node = None
if type(message) in (types.StringType,types.UnicodeType):
self.loadFromString(message)
elif isinstance(message, ElementProxy):
self.node = message._getNode()
else:
self.node = message
self.processorNss = self.standard_ns.copy()
self.processorNss.update(self.reserved_ns)
def __str__(self):
return self.toString()
def evaluate(self, expression, processorNss=None):
'''expression -- XPath compiled expression
'''
from Ft.Xml import XPath
if not processorNss:
context = XPath.Context.Context(self.node, processorNss=self.processorNss)
else:
context = XPath.Context.Context(self.node, processorNss=processorNss)
nodes = expression.evaluate(context)
return map(lambda node: ElementProxy(self.sw,node), nodes)
#############################################
# Methods for checking/setting the
# classes (namespaceURI,name) node.
#############################################
def checkNode(self, namespaceURI=None, localName=None):
'''
namespaceURI -- namespace of element
localName -- local name of element
'''
namespaceURI = namespaceURI or self.namespaceURI
localName = localName or self.name
check = False
if localName and self.node:
check = self._dom.isElement(self.node, localName, namespaceURI)
if not check:
raise NamespaceError, 'unexpected node type %s, expecting %s' %(self.node, localName)
def setNode(self, node=None):
if node:
if isinstance(node, ElementProxy):
self.node = node._getNode()
else:
self.node = node
elif self.node:
node = self._dom.getElement(self.node, self.name, self.namespaceURI, default=None)
if not node:
raise NamespaceError, 'cant find element (%s,%s)' %(self.namespaceURI,self.name)
self.node = node
else:
#self.node = self._dom.create(self.node, self.name, self.namespaceURI, default=None)
self.createDocument(self.namespaceURI, localName=self.name, doctype=None)
self.checkNode()
#############################################
# Wrapper Methods for direct DOM Element Node access
#############################################
def _getNode(self):
return self.node
def _getElements(self):
return self._dom.getElements(self.node, name=None)
def _getOwnerDocument(self):
return self.node.ownerDocument or self.node
def _getUniquePrefix(self):
'''I guess we need to resolve all potential prefixes
because when the current node is attached it copies the
namespaces into the parent node.
'''
while 1:
self._indx += 1
prefix = 'ns%d' %self._indx
try:
self._dom.findNamespaceURI(prefix, self._getNode())
except DOMException, ex:
break
return prefix
def _getPrefix(self, node, nsuri):
'''
Keyword arguments:
node -- DOM Element Node
nsuri -- namespace of attribute value
'''
try:
if node and (node.nodeType == node.ELEMENT_NODE) and \
(nsuri == self._dom.findDefaultNS(node)):
return None
except DOMException, ex:
pass
if nsuri == XMLNS.XML:
return self._xml_prefix
if node.nodeType == Node.ELEMENT_NODE:
for attr in node.attributes.values():
if attr.namespaceURI == XMLNS.BASE \
and nsuri == attr.value:
return attr.localName
else:
if node.parentNode:
return self._getPrefix(node.parentNode, nsuri)
raise NamespaceError, 'namespaceURI "%s" is not defined' %nsuri
def _appendChild(self, node):
'''
Keyword arguments:
node -- DOM Element Node
'''
if node is None:
raise TypeError, 'node is None'
self.node.appendChild(node)
def _insertBefore(self, newChild, refChild):
'''
Keyword arguments:
child -- DOM Element Node to insert
refChild -- DOM Element Node
'''
self.node.insertBefore(newChild, refChild)
def _setAttributeNS(self, namespaceURI, qualifiedName, value):
'''
Keyword arguments:
namespaceURI -- namespace of attribute
qualifiedName -- qualified name of new attribute value
value -- value of attribute
'''
self.node.setAttributeNS(namespaceURI, qualifiedName, value)
#############################################
#General Methods
#############################################
def isFault(self):
'''check to see if this is a soap:fault message.
'''
return False
def getPrefix(self, namespaceURI):
try:
prefix = self._getPrefix(node=self.node, nsuri=namespaceURI)
except NamespaceError, ex:
prefix = self._getUniquePrefix()
self.setNamespaceAttribute(prefix, namespaceURI)
return prefix
def getDocument(self):
return self._getOwnerDocument()
def setDocument(self, document):
self.node = document
def importFromString(self, xmlString):
doc = self._dom.loadDocument(StringIO(xmlString))
node = self._dom.getElement(doc, name=None)
clone = self.importNode(node)
self._appendChild(clone)
def importNode(self, node):
if isinstance(node, ElementProxy):
node = node._getNode()
return self._dom.importNode(self._getOwnerDocument(), node, deep=1)
def loadFromString(self, data):
self.node = self._dom.loadDocument(StringIO(data))
def canonicalize(self):
return Canonicalize(self.node)
def toString(self):
return self.canonicalize()
def createDocument(self, namespaceURI, localName, doctype=None):
'''If specified must be a SOAP envelope, else may contruct an empty document.
'''
prefix = self._soap_env_prefix
if namespaceURI == self.reserved_ns[prefix]:
qualifiedName = '%s:%s' %(prefix,localName)
elif namespaceURI is localName is None:
self.node = self._dom.createDocument(None,None,None)
return
else:
raise KeyError, 'only support creation of document in %s' %self.reserved_ns[prefix]
document = self._dom.createDocument(nsuri=namespaceURI, qname=qualifiedName, doctype=doctype)
self.node = document.childNodes[0]
#set up reserved namespace attributes
for prefix,nsuri in self.reserved_ns.items():
self._setAttributeNS(namespaceURI=self._xmlns_nsuri,
qualifiedName='%s:%s' %(self._xmlns_prefix,prefix),
value=nsuri)
#############################################
#Methods for attributes
#############################################
def hasAttribute(self, namespaceURI, localName):
return self._dom.hasAttr(self._getNode(), name=localName, nsuri=namespaceURI)
def setAttributeType(self, namespaceURI, localName):
'''set xsi:type
Keyword arguments:
namespaceURI -- namespace of attribute value
localName -- name of new attribute value
'''
self.logger.debug('setAttributeType: (%s,%s)', namespaceURI, localName)
value = localName
if namespaceURI:
value = '%s:%s' %(self.getPrefix(namespaceURI),localName)
xsi_prefix = self.getPrefix(self._xsi_nsuri)
self._setAttributeNS(self._xsi_nsuri, '%s:type' %xsi_prefix, value)
def createAttributeNS(self, namespace, name, value):
document = self._getOwnerDocument()
##this function doesn't exist!! it has only two arguments
attrNode = document.createAttributeNS(namespace, name, value)
def setAttributeNS(self, namespaceURI, localName, value):
'''
Keyword arguments:
namespaceURI -- namespace of attribute to create, None is for
attributes in no namespace.
localName -- local name of new attribute
value -- value of new attribute
'''
prefix = None
if namespaceURI:
try:
prefix = self.getPrefix(namespaceURI)
except KeyError, ex:
prefix = 'ns2'
self.setNamespaceAttribute(prefix, namespaceURI)
qualifiedName = localName
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
self._setAttributeNS(namespaceURI, qualifiedName, value)
def setNamespaceAttribute(self, prefix, namespaceURI):
'''
Keyword arguments:
prefix -- xmlns prefix
namespaceURI -- value of prefix
'''
self._setAttributeNS(XMLNS.BASE, 'xmlns:%s' %prefix, namespaceURI)
#############################################
#Methods for elements
#############################################
def createElementNS(self, namespace, qname):
'''
Keyword arguments:
namespace -- namespace of element to create
qname -- qualified name of new element
'''
document = self._getOwnerDocument()
node = document.createElementNS(namespace, qname)
return ElementProxy(self.sw, node)
def createAppendSetElement(self, namespaceURI, localName, prefix=None):
'''Create a new element (namespaceURI,name), append it
to current node, then set it to be the current node.
Keyword arguments:
namespaceURI -- namespace of element to create
localName -- local name of new element
prefix -- if namespaceURI is not defined, declare prefix. defaults
to 'ns1' if left unspecified.
'''
node = self.createAppendElement(namespaceURI, localName, prefix=None)
node=node._getNode()
self._setNode(node._getNode())
def createAppendElement(self, namespaceURI, localName, prefix=None):
'''Create a new element (namespaceURI,name), append it
to current node, and return the newly created node.
Keyword arguments:
namespaceURI -- namespace of element to create
localName -- local name of new element
prefix -- if namespaceURI is not defined, declare prefix. defaults
to 'ns1' if left unspecified.
'''
declare = False
qualifiedName = localName
if namespaceURI:
try:
prefix = self.getPrefix(namespaceURI)
except:
declare = True
prefix = prefix or self._getUniquePrefix()
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
node = self.createElementNS(namespaceURI, qualifiedName)
if declare:
node._setAttributeNS(XMLNS.BASE, 'xmlns:%s' %prefix, namespaceURI)
self._appendChild(node=node._getNode())
return node
def createInsertBefore(self, namespaceURI, localName, refChild):
qualifiedName = localName
prefix = self.getPrefix(namespaceURI)
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
node = self.createElementNS(namespaceURI, qualifiedName)
self._insertBefore(newChild=node._getNode(), refChild=refChild._getNode())
return node
def getElement(self, namespaceURI, localName):
'''
Keyword arguments:
namespaceURI -- namespace of element
localName -- local name of element
'''
node = self._dom.getElement(self.node, localName, namespaceURI, default=None)
if node:
return ElementProxy(self.sw, node)
return None
def getAttributeValue(self, namespaceURI, localName):
'''
Keyword arguments:
namespaceURI -- namespace of attribute
localName -- local name of attribute
'''
if self.hasAttribute(namespaceURI, localName):
attr = self.node.getAttributeNodeNS(namespaceURI,localName)
return attr.value
return None
def getValue(self):
return self._dom.getElementText(self.node, preserve_ws=True)
#############################################
#Methods for text nodes
#############################################
def createAppendTextNode(self, pyobj):
node = self.createTextNode(pyobj)
self._appendChild(node=node._getNode())
return node
def createTextNode(self, pyobj):
document = self._getOwnerDocument()
node = document.createTextNode(pyobj)
return ElementProxy(self.sw, node)
#############################################
#Methods for retrieving namespaceURI's
#############################################
def findNamespaceURI(self, qualifiedName):
parts = SplitQName(qualifiedName)
element = self._getNode()
if len(parts) == 1:
return (self._dom.findTargetNS(element), value)
return self._dom.findNamespaceURI(parts[0], element)
def resolvePrefix(self, prefix):
element = self._getNode()
return self._dom.findNamespaceURI(prefix, element)
def getSOAPEnvURI(self):
return self._soap_env_nsuri
def isEmpty(self):
return not self.node
class Collection(UserDict):
"""Helper class for maintaining ordered named collections."""
default = lambda self,k: k.name
def __init__(self, parent, key=None):
UserDict.__init__(self)
self.parent = weakref.ref(parent)
self.list = []
self._func = key or self.default
def __getitem__(self, key):
if type(key) is type(1):
return self.list[key]
return self.data[key]
def __setitem__(self, key, item):
item.parent = weakref.ref(self)
self.list.append(item)
self.data[key] = item
def keys(self):
return map(lambda i: self._func(i), self.list)
def items(self):
return map(lambda i: (self._func(i), i), self.list)
def values(self):
return self.list
class CollectionNS(UserDict):
"""Helper class for maintaining ordered named collections."""
default = lambda self,k: k.name
def __init__(self, parent, key=None):
UserDict.__init__(self)
self.parent = weakref.ref(parent)
self.targetNamespace = None
self.list = []
self._func = key or self.default
def __getitem__(self, key):
self.targetNamespace = self.parent().targetNamespace
if type(key) is types.IntType:
return self.list[key]
elif self.__isSequence(key):
nsuri,name = key
return self.data[nsuri][name]
return self.data[self.parent().targetNamespace][key]
def __setitem__(self, key, item):
item.parent = weakref.ref(self)
self.list.append(item)
targetNamespace = getattr(item, 'targetNamespace', self.parent().targetNamespace)
if not self.data.has_key(targetNamespace):
self.data[targetNamespace] = {}
self.data[targetNamespace][key] = item
def __isSequence(self, key):
return (type(key) in (types.TupleType,types.ListType) and len(key) == 2)
def keys(self):
keys = []
for tns in self.data.keys():
keys.append(map(lambda i: (tns,self._func(i)), self.data[tns].values()))
return keys
def items(self):
return map(lambda i: (self._func(i), i), self.list)
def values(self):
return self.list
# This is a runtime guerilla patch for pulldom (used by minidom) so
# that xml namespace declaration attributes are not lost in parsing.
# We need them to do correct QName linking for XML Schema and WSDL.
# The patch has been submitted to SF for the next Python version.
from xml.dom.pulldom import PullDOM, START_ELEMENT
if 1:
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or ''
PullDOM.startPrefixMapping = startPrefixMapping
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
PullDOM.startElementNS = startElementNS
#
# This is a runtime guerilla patch for minidom so
# that xmlns prefixed attributes dont raise AttributeErrors
# during cloning.
#
# Namespace declarations can appear in any start-tag, must look for xmlns
# prefixed attribute names during cloning.
#
# key (attr.namespaceURI, tag)
# ('http://www.w3.org/2000/xmlns/', u'xsd') <xml.dom.minidom.Attr instance at 0x82227c4>
# ('http://www.w3.org/2000/xmlns/', 'xmlns') <xml.dom.minidom.Attr instance at 0x8414b3c>
#
# xml.dom.minidom.Attr.nodeName = xmlns:xsd
# xml.dom.minidom.Attr.value = = http://www.w3.org/2001/XMLSchema
if 1:
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
prefix, tag = xml.dom.minidom._nssplit(attr.nodeName)
if prefix == 'xmlns':
a = clone.getAttributeNodeNS(attr.namespaceURI, tag)
elif prefix:
a = clone.getAttributeNodeNS(attr.namespaceURI, tag)
else:
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.nodeName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = xml.dom.minidom._clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == xml.dom.minidom.Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = xml.dom.minidom._clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == xml.dom.minidom.Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == xml.dom.minidom.Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == xml.dom.minidom.Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == xml.dom.minidom.Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == xml.dom.minidom.Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = xml.dom.minidom.Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = xml.dom.minidom.Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implemenetation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
xml.dom.minidom._clone_node = _clone_node
| gpl-3.0 |
geopython/QGIS | python/plugins/processing/algs/qgis/Datasources2Vrt.py | 15 | 8905 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : May 2015
Copyright : (C) 2015 by Luigi Pirelli
Email : luipir at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Luigi Pirelli'
__date__ = 'May 2015'
__copyright__ = '(C) 2015, Luigi Pirelli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import codecs
import xml.sax.saxutils
from osgeo import ogr
from qgis.core import (QgsProcessingFeedback,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterBoolean,
QgsProcessing,
QgsProcessingParameterVectorDestination,
QgsProcessingOutputString,
QgsProcessingException)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class Datasources2Vrt(QgisAlgorithm):
INPUT = 'INPUT'
UNIONED = 'UNIONED'
OUTPUT = 'OUTPUT'
VRT_STRING = 'VRT_STRING'
def group(self):
return self.tr('Vector general')
def groupId(self):
return 'vectorgeneral'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterMultipleLayers(self.INPUT,
self.tr('Input datasources'),
QgsProcessing.TypeVector))
self.addParameter(QgsProcessingParameterBoolean(self.UNIONED,
self.tr('Create "unioned" VRT'),
defaultValue=False))
class ParameterVectorVrtDestination(QgsProcessingParameterVectorDestination):
def __init__(self, name, description):
super().__init__(name, description)
def clone(self):
copy = ParameterVectorVrtDestination(self.name(), self.description())
return copy
def type(self):
return 'vrt_vector_destination'
def defaultFileExtension(self):
return 'vrt'
self.addParameter(ParameterVectorVrtDestination(self.OUTPUT,
self.tr('Virtual vector')))
self.addOutput(QgsProcessingOutputString(self.VRT_STRING,
self.tr('Virtual string')))
def name(self):
return 'buildvirtualvector'
def displayName(self):
return self.tr('Build virtual vector')
def processAlgorithm(self, parameters, context, feedback):
input_layers = self.parameterAsLayerList(parameters, self.INPUT, context)
unioned = self.parameterAsBool(parameters, self.UNIONED, context)
vrtPath = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
vrtString = self.mergeDataSources2Vrt(input_layers,
vrtPath,
union=unioned,
relative=False,
schema=False,
feedback=feedback)
return {self.OUTPUT: vrtPath, self.VRT_STRING: vrtString}
def mergeDataSources2Vrt(self, dataSources, outFile, union=False, relative=False,
schema=False, feedback=None):
'''Function to do the work of merging datasources in a single vrt format
@param data_sources: Array of path strings
@param outfile: the output vrt file to generate
@param relative: Write relative flag. DOES NOT relativise paths. They have to be already relative
@param schema: Schema flag
@return: vrt in string format
'''
if feedback is None:
feedback = QgsProcessingFeedback()
vrt = '<OGRVRTDataSource>'
if union:
vrt += '<OGRVRTUnionLayer name="UnionedLayer">'
total = 100.0 / len(dataSources) if dataSources else 1
for current, layer in enumerate(dataSources):
if feedback.isCanceled():
break
feedback.setProgress(int(current * total))
inFile = layer.source()
srcDS = ogr.Open(inFile, 0)
if srcDS is None:
raise QgsProcessingException(
self.tr('Invalid datasource: {}'.format(inFile)))
if schema:
inFile = '@dummy@'
for layer in srcDS:
layerDef = layer.GetLayerDefn()
layerName = layerDef.GetName()
vrt += '<OGRVRTLayer name="{}">'.format(self.XmlEsc(layerName))
vrt += '<SrcDataSource relativeToVRT="{}" shared="{}">{}</SrcDataSource>'.format(1 if relative else 0, not schema, self.XmlEsc(inFile))
if schema:
vrt += '<SrcLayer>@dummy@</SrcLayer>'
else:
vrt += '<SrcLayer>{}</SrcLayer>'.format(self.XmlEsc(layerName))
vrt += '<GeometryType>{}</GeometryType>'.format(self.GeomType2Name(layerDef.GetGeomType()))
crs = layer.GetSpatialRef()
if crs is not None:
vrt += '<LayerSRS>{}</LayerSRS>'.format(self.XmlEsc(crs.ExportToWkt()))
# Process all the fields.
for fieldIdx in range(layerDef.GetFieldCount()):
fieldDef = layerDef.GetFieldDefn(fieldIdx)
vrt += '<Field name="{}" type="{}"'.format(self.XmlEsc(fieldDef.GetName()), self.fieldType2Name(fieldDef.GetType()))
if not schema:
vrt += ' src="{}"'.format(self.XmlEsc(fieldDef.GetName()))
if fieldDef.GetWidth() > 0:
vrt += ' width="{}"'.format(fieldDef.GetWidth())
if fieldDef.GetPrecision() > 0:
vrt += ' precision="{}"'.format(fieldDef.GetPrecision())
vrt += '/>'
vrt += '</OGRVRTLayer>'
srcDS.Destroy()
if union:
vrt += '</OGRVRTUnionLayer>'
vrt += '</OGRVRTDataSource>'
#TODO: pretty-print XML
if outFile is not None:
with codecs.open(outFile, 'w') as f:
f.write(vrt)
return vrt
def GeomType2Name(self, geomType):
if geomType == ogr.wkbUnknown:
return 'wkbUnknown'
elif geomType == ogr.wkbPoint:
return 'wkbPoint'
elif geomType == ogr.wkbLineString:
return 'wkbLineString'
elif geomType == ogr.wkbPolygon:
return 'wkbPolygon'
elif geomType == ogr.wkbMultiPoint:
return 'wkbMultiPoint'
elif geomType == ogr.wkbMultiLineString:
return 'wkbMultiLineString'
elif geomType == ogr.wkbMultiPolygon:
return 'wkbMultiPolygon'
elif geomType == ogr.wkbGeometryCollection:
return 'wkbGeometryCollection'
elif geomType == ogr.wkbNone:
return 'wkbNone'
elif geomType == ogr.wkbLinearRing:
return 'wkbLinearRing'
else:
return 'wkbUnknown'
def fieldType2Name(self, fieldType):
if fieldType == ogr.OFTInteger:
return 'Integer'
elif fieldType == ogr.OFTString:
return 'String'
elif fieldType == ogr.OFTReal:
return 'Real'
elif fieldType == ogr.OFTStringList:
return 'StringList'
elif fieldType == ogr.OFTIntegerList:
return 'IntegerList'
elif fieldType == ogr.OFTRealList:
return 'RealList'
elif fieldType == ogr.OFTBinary:
return 'Binary'
elif fieldType == ogr.OFTDate:
return 'Date'
elif fieldType == ogr.OFTTime:
return 'Time'
elif fieldType == ogr.OFTDateTime:
return 'DateTime'
else:
return 'String'
def XmlEsc(self, text):
return xml.sax.saxutils.escape(text)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.