repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
jbking/demo-appengine-django-golang | myproject/django/utils/formats.py | 104 | 7799 | import decimal
import datetime
from django.conf import settings
from django.utils import dateformat, numberformat, datetime_safe
from django.utils.importlib import import_module
from django.utils.encoding import force_str
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import get_language, to_locale, check_for_language
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
ISO_INPUT_FORMATS = {
'DATE_INPUT_FORMATS': ('%Y-%m-%d',),
'TIME_INPUT_FORMATS': ('%H:%M:%S', '%H:%M'),
'DATETIME_INPUT_FORMATS': (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M',
'%Y-%m-%d'
),
}
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang):
"""
Does the heavy lifting of finding format modules.
"""
if check_for_language(lang):
format_locations = ['django.conf.locale.%s']
if settings.FORMAT_MODULE_PATH:
format_locations.append(settings.FORMAT_MODULE_PATH + '.%s')
format_locations.reverse()
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('.formats', location % loc)
except ImportError:
pass
def get_format_modules(lang=None, reverse=False):
"""
Returns a list of the format modules found
"""
if lang is None:
lang = get_language()
modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang)))
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
format_type = force_str(format_type)
if use_l10n or (use_l10n is None and settings.USE_L10N):
if lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
cached = _format_cache[cache_key]
if cached is not None:
return cached
else:
# Return the general setting by default
return getattr(settings, format_type)
except KeyError:
for module in get_format_modules(lang):
try:
val = getattr(module, format_type)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
if isinstance(val, tuple):
val = list(val)
val.append(iso_input)
_format_cache[cache_key] = val
return val
except AttributeError:
pass
_format_cache[cache_key] = None
return getattr(settings, format_type)
get_format_lazy = lazy(get_format, six.text_type, list, tuple)
def date_format(value, format=None, use_l10n=None):
"""
Formats a datetime.date or datetime.datetime object using a
localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Formats a datetime.time object using a localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Formats a numeric value using localization settings
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
force_grouping=force_grouping
)
def localize(value, use_l10n=None):
"""
Checks if value is a localizable type (date, number...) and returns it
formatted as a string using current locale format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, bool):
return mark_safe(six.text_type(value))
elif isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
else:
return value
def localize_input(value, default=None):
"""
Checks if an input value is a localizable type and returns it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = force_str(default or get_format('DATE_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.time):
format = force_str(default or get_format('TIME_INPUT_FORMATS')[0])
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitizes a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if settings.USE_L10N:
decimal_separator = get_format('DECIMAL_SEPARATOR')
if isinstance(value, six.string_types):
parts = []
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
parts.append(value.replace(get_format('THOUSAND_SEPARATOR'), ''))
else:
parts.append(value)
value = '.'.join(reversed(parts))
return value
| mit |
antiface/mne-python | examples/time_frequency/plot_compute_raw_data_spectrum.py | 16 | 2573 | """
==================================================
Compute the power spectral density of raw data
==================================================
This script shows how to compute the power spectral density (PSD)
of measurements on a raw dataset. It also show the effect of applying SSP
to the data to reduce ECG and EOG artifacts.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog_proj.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
tmin, tmax = 0, 60 # use the first 60s of data
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
plt.ion()
# Let's first check out all channel types
raw.plot_psd(area_mode='range', tmax=10.0)
# Now let's focus on a smaller subset:
# Pick MEG magnetometers in the Left-temporal region
selection = read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's just look at the first few channels for demonstration purposes
picks = picks[:4]
plt.figure()
ax = plt.axes()
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks)
# And now do the same with SSP applied
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks)
# And now do the same with SSP + notch filtering
raw.notch_filter(np.arange(60, 241, 60), picks=picks, n_jobs=1)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks)
ax.set_title('Four left-temporal magnetometers')
plt.legend(['Without SSP', 'With SSP', 'SSP + Notch'])
| bsd-3-clause |
kamcpp/tensorflow | tensorflow/tools/dist_test/server/grpc_tensorflow_server.py | 31 | 3644 | #!/usr/bin/python
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python-based TensorFlow GRPC server.
Takes input arguments cluster_spec, job_name and task_id, and start a blocking
TensorFlow GRPC server.
Usage:
grpc_tensorflow_server.py --cluster_spec=SPEC --job_name=NAME --task_id=ID
Where:
SPEC is <JOB>(,<JOB>)*
JOB is <NAME>|<HOST:PORT>(;<HOST:PORT>)*
NAME is a valid job name ([a-z][0-9a-z]*)
HOST is a hostname or IP address
PORT is a port number
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("cluster_spec", "",
"""Cluster spec: SPEC.
SPEC is <JOB>(,<JOB>)*,"
JOB is <NAME>|<HOST:PORT>(;<HOST:PORT>)*,"
NAME is a valid job name ([a-z][0-9a-z]*),"
HOST is a hostname or IP address,"
PORT is a port number."
E.g., local|localhost:2222;localhost:2223, ps|ps0:2222;ps1:2222""")
tf.app.flags.DEFINE_string("job_name", "", "Job name: e.g., local")
tf.app.flags.DEFINE_integer("task_id", 0, "Task index, e.g., 0")
tf.app.flags.DEFINE_boolean("verbose", False, "Verbose mode")
def parse_cluster_spec(cluster_spec, cluster):
"""Parse content of cluster_spec string and inject info into cluster protobuf.
Args:
cluster_spec: cluster specification string, e.g.,
"local|localhost:2222;localhost:2223"
cluster: cluster protobuf.
Raises:
ValueError: if the cluster_spec string is invalid.
"""
job_strings = cluster_spec.split(",")
if not cluster_spec:
raise ValueError("Empty cluster_spec string")
for job_string in job_strings:
job_def = cluster.job.add()
if job_string.count("|") != 1:
raise ValueError("Not exactly one instance of '|' in cluster_spec")
job_name = job_string.split("|")[0]
if not job_name:
raise ValueError("Empty job_name in cluster_spec")
job_def.name = job_name
if FLAGS.verbose:
print("Added job named \"%s\"" % job_name)
job_tasks = job_string.split("|")[1].split(";")
for i in range(len(job_tasks)):
if not job_tasks[i]:
raise ValueError("Empty task string at position %d" % i)
job_def.tasks[i] = job_tasks[i]
if FLAGS.verbose:
print(" Added task \"%s\" to job \"%s\"" % (job_tasks[i], job_name))
def main(unused_args):
# Create Protobuf ServerDef
server_def = tf.train.ServerDef(protocol="grpc")
# Cluster info
parse_cluster_spec(FLAGS.cluster_spec, server_def.cluster)
# Job name
if not FLAGS.job_name:
raise ValueError("Empty job_name")
server_def.job_name = FLAGS.job_name
# Task index
if FLAGS.task_id < 0:
raise ValueError("Invalid task_id: %d" % FLAGS.task_id)
server_def.task_index = FLAGS.task_id
# Create GRPC Server instance
server = tf.train.Server(server_def)
# join() is blocking, unlike start()
server.join()
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
luistorresm/odoo | openerp/tools/__init__.py | 337 | 1447 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import copy
import win32
import appdirs
from config import config
from misc import *
from convert import *
from translate import *
from graph import graph
from image import *
from amount_to_text import *
from amount_to_text_en import *
from pdf_utils import *
from yaml_import import *
from sql import *
from float_utils import *
from mail import *
from func import *
from debugger import *
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sumspr/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
kthordarson/youtube-dl-ruv | youtube_dl/extractor/canalplus.py | 3 | 4443 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
url_basename,
qualities,
)
class CanalplusIE(InfoExtractor):
IE_DESC = 'canalplus.fr, piwiplus.fr and d8.tv'
_VALID_URL = r'https?://(?:www\.(?P<site>canalplus\.fr|piwiplus\.fr|d8\.tv)/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s'
_SITE_ID_MAP = {
'canalplus.fr': 'cplus',
'piwiplus.fr': 'teletoon',
'd8.tv': 'd8',
}
_TESTS = [{
'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
'md5': '3db39fb48b9685438ecf33a1078023e4',
'info_dict': {
'id': '922470',
'ext': 'flv',
'title': 'Zapping - 26/08/13',
'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
'upload_date': '20130826',
},
}, {
'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
'info_dict': {
'id': '1108190',
'ext': 'flv',
'title': 'Le labyrinthe - Boing super ranger',
'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
'upload_date': '20140724',
},
'skip': 'Only works from France',
}, {
'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
'info_dict': {
'id': '966289',
'ext': 'flv',
'title': 'Campagne intime - Documentaire exceptionnel',
'description': 'md5:d2643b799fb190846ae09c61e59a859f',
'upload_date': '20131108',
},
'skip': 'videos get deleted after a while',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.groupdict().get('id')
site_id = self._SITE_ID_MAP[mobj.group('site') or 'canal']
# Beware, some subclasses do not define an id group
display_id = url_basename(mobj.group('path'))
if video_id is None:
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id')
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
doc = self._download_xml(info_url, video_id, 'Downloading video XML')
video_info = [video for video in doc if video.find('ID').text == video_id][0]
media = video_info.find('MEDIA')
infos = video_info.find('INFOS')
preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS'])
formats = []
for fmt in media.find('VIDEOS'):
format_url = fmt.text
if not format_url:
continue
format_id = fmt.tag
if format_id == 'HLS':
hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
for fmt in hls_formats:
fmt['preference'] = preference(format_id)
formats.extend(hls_formats)
elif format_id == 'HDS':
hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
for fmt in hds_formats:
fmt['preference'] = preference(format_id)
formats.extend(hds_formats)
else:
formats.append({
'url': format_url,
'format_id': format_id,
'preference': preference(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': '%s - %s' % (infos.find('TITRAGE/TITRE').text,
infos.find('TITRAGE/SOUS_TITRE').text),
'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),
'thumbnail': media.find('IMAGES/GRAND').text,
'description': infos.find('DESCRIPTION').text,
'view_count': int(infos.find('NB_VUES').text),
'like_count': int(infos.find('NB_LIKES').text),
'comment_count': int(infos.find('NB_COMMENTS').text),
'formats': formats,
} | unlicense |
NewpTone/stacklab-cinder | cinder/db/base.py | 5 | 1430 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for classes that need modular database access."""
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import importutils
db_driver_opt = cfg.StrOpt('db_driver',
default='cinder.db',
help='driver to use for database access')
FLAGS = flags.FLAGS
FLAGS.register_opt(db_driver_opt)
class Base(object):
"""DB driver is injected in the init method."""
def __init__(self, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
self.db = importutils.import_module(db_driver) # pylint: disable=C0103
| apache-2.0 |
antiface/audiolazy | audiolazy/lazy_io.py | 1 | 14038 | # -*- coding: utf-8 -*-
# This file is part of AudioLazy, the signal processing Python package.
# Copyright (C) 2012-2014 Danilo de Jesus da Silva Bellini
#
# AudioLazy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Created on Fri Jul 20 2012
# danilo [dot] bellini [at] gmail [dot] com
"""
Audio recording input and playing output module
"""
import threading
import struct
import array
# Audiolazy internal imports
from ._internals import deprecate
from .lazy_stream import Stream
from .lazy_misc import DEFAULT_SAMPLE_RATE, blocks
from .lazy_compat import xrange, xmap
from .lazy_math import inf
from .lazy_core import StrategyDict
__all__ = ["chunks", "RecStream", "AudioIO", "AudioThread"]
# Conversion dict from structs.Struct() format symbols to PyAudio constants
_STRUCT2PYAUDIO = {"f": 1, #pyaudio.paFloat32
"i": 2, #pyaudio.paInt32
"h": 8, #pyaudio.paInt16
"b": 16, #pyaudio.paInt8
"B": 32, #pyaudio.paUInt8
}
chunks = StrategyDict("chunks")
chunks.__class__.size = 2048 # Samples
@chunks.strategy("struct")
def chunks(seq, size=None, dfmt="f", byte_order=None, padval=0.):
"""
Chunk generator based on the struct module (Python standard library).
Low-level data blockenizer for homogeneous data as a generator, to help
writing an iterable into a file.
The dfmt should be one char, chosen from the ones in link:
`<http://docs.python.org/library/struct.html#format-characters>`_
Useful examples (integer are signed, use upper case for unsigned ones):
- "b" for 8 bits (1 byte) integer
- "h" for 16 bits (2 bytes) integer
- "i" for 32 bits (4 bytes) integer
- "f" for 32 bits (4 bytes) float (default)
- "d" for 64 bits (8 bytes) float (double)
Byte order follows native system defaults. Other options are in the site:
`<http://docs.python.org/library/struct.html#struct-alignment>`_
They are:
- "<" means little-endian
- ">" means big-endian
Note
----
Default chunk size can be accessed (and changed) via chunks.size.
"""
if size is None:
size = chunks.size
dfmt = str(size) + dfmt
if byte_order is None:
struct_string = dfmt
else:
struct_string = byte_order + dfmt
s = struct.Struct(struct_string)
for block in blocks(seq, size, padval=padval):
yield s.pack(*block)
@chunks.strategy("array")
def chunks(seq, size=None, dfmt="f", byte_order=None, padval=0.):
"""
Chunk generator based on the array module (Python standard library).
See chunk.struct for more help. This strategy uses array.array (random access
by indexing management) instead of struct.Struct and blocks/deque (circular
queue appending) from the chunks.struct strategy.
Hint
----
Try each one to find the faster one for your machine, and chooses
the default one by assigning ``chunks.default = chunks.strategy_name``.
It'll be the one used by the AudioIO/AudioThread playing mechanism.
Note
----
The ``dfmt`` symbols for arrays might differ from structs' defaults.
"""
if size is None:
size = chunks.size
chunk = array.array(dfmt, xrange(size))
idx = 0
for el in seq:
chunk[idx] = el
idx += 1
if idx == size:
yield chunk.tostring()
idx = 0
if idx != 0:
for idx in xrange(idx, size):
chunk[idx] = padval
yield chunk.tostring()
class RecStream(Stream):
"""
Recording Stream
A common Stream class with a ``stop`` method for input data recording
and a ``recording`` read-only property for status.
"""
def __init__(self, device_manager, file_obj, chunk_size, dfmt):
if chunk_size is None:
chunk_size = chunks.size
s = struct.Struct("{0}{1}".format(chunk_size, dfmt))
def rec():
try:
while self._recording:
for k in s.unpack(file_obj.read(chunk_size)):
yield k
finally:
file_obj.close()
self._recording = False # Loop can be broken by StopIteration
self.device_manager.recording_finished(self)
super(RecStream, self).__init__(rec())
self._recording = True
self.device_manager = device_manager
def stop(self):
""" Finishes the recording stream, so it can raise StopIteration """
self._recording = False
@property
def recording(self):
return self._recording
class AudioIO(object):
"""
Multi-thread stream manager wrapper for PyAudio.
"""
def __init__(self, wait=False, api=None):
"""
Constructor to PyAudio Multi-thread manager audio IO interface.
The "wait" input is a boolean about the behaviour on closing the
instance, if it should or not wait for the streaming audio to finish.
Defaults to False. Only works if the close method is explicitly
called.
"""
import pyaudio
self._pa = pa = pyaudio.PyAudio()
self._threads = []
self.wait = wait # Wait threads to finish at end (constructor parameter)
self._recordings = []
# Lockers
self.halting = threading.Lock() # Only for "close" method
self.lock = threading.Lock() # "_threads" access locking
self.finished = False
# Choosing the PortAudio API (needed to use Jack)
if not (api is None):
api_count = pa.get_host_api_count()
apis_gen = xmap(pa.get_host_api_info_by_index, xrange(api_count))
try:
self.api = next(el for el in apis_gen
if el["name"].lower().startswith(api))
except StopIteration:
raise RuntimeError("API '{}' not found".format(api))
def __del__(self):
"""
Default destructor. Use close method instead, or use the class
instance as the expression of a with block.
"""
self.close()
def __exit__(self, etype, evalue, etraceback):
"""
Closing destructor for use internally in a with-expression.
"""
self.close()
def __enter__(self):
"""
To be used only internally, in the with-expression protocol.
"""
return self
def close(self):
"""
Destructor for this audio interface. Waits the threads to finish their
streams, if desired.
"""
with self.halting: # Avoid simultaneous "close" threads
if not self.finished: # Ignore all "close" calls, but the first,
self.finished = True # and any call to play would raise ThreadError
# Closes all playing AudioThread instances
while True:
with self.lock: # Ensure there's no other thread messing around
try:
thread = self._threads[0] # Needless to say: pop = deadlock
except IndexError: # Empty list
break # No more threads
if not self.wait:
thread.stop()
thread.join()
# Closes all recording RecStream instances
while self._recordings:
recst = self._recordings[-1]
recst.stop()
recst.take(inf) # Ensure it'll be closed
# Finishes
assert not self._pa._streams # No stream should survive
self._pa.terminate()
def terminate(self):
"""
Same as "close".
"""
self.close() # Avoids direct calls to inherited "terminate"
def play(self, audio, **kwargs):
"""
Start another thread playing the given audio sample iterable (e.g. a
list, a generator, a NumPy np.ndarray with samples), and play it.
The arguments are used to customize behaviour of the new thread, as
parameters directly sent to PyAudio's new stream opening method, see
AudioThread.__init__ for more.
"""
with self.lock:
if self.finished:
raise threading.ThreadError("Trying to play an audio stream while "
"halting the AudioIO manager object")
new_thread = AudioThread(self, audio, **kwargs)
self._threads.append(new_thread)
new_thread.start()
return new_thread
def thread_finished(self, thread):
"""
Updates internal status about open threads. Should be called only by
the internal closing mechanism of AudioThread instances.
"""
with self.lock:
self._threads.remove(thread)
def recording_finished(self, recst):
"""
Updates internal status about open recording streams. Should be called
only by the internal closing mechanism of children RecStream instances.
"""
self._recordings.remove(recst)
def record(self, chunk_size = None,
dfmt = "f",
channels = 1,
rate = DEFAULT_SAMPLE_RATE,
**kwargs
):
"""
Records audio from device into a Stream.
Parameters
----------
chunk_size :
Number of samples per chunk (block sent to device).
dfmt :
Format, as in chunks(). Default is "f" (Float32).
channels :
Channels in audio stream (serialized).
rate :
Sample rate (same input used in sHz).
Returns
-------
Endless Stream instance that gather data from the audio input device.
"""
if chunk_size is None:
chunk_size = chunks.size
if hasattr(self, "api"):
kwargs.setdefault("input_device_index", self.api["defaultInputDevice"])
channels = kwargs.pop("nchannels", channels) # Backwards compatibility
input_stream = RecStream(self,
self._pa.open(format=_STRUCT2PYAUDIO[dfmt],
channels=channels,
rate=rate,
frames_per_buffer=chunk_size,
input=True,
**kwargs),
chunk_size,
dfmt
)
self._recordings.append(input_stream)
return input_stream
class AudioThread(threading.Thread):
"""
Audio output thread.
This class is a wrapper to ease the use of PyAudio using iterables of
numbers (Stream instances, lists, tuples, NumPy 1D arrays, generators) as
audio data streams.
"""
def __init__(self, device_manager, audio,
chunk_size = None,
dfmt = "f",
channels = 1,
rate = DEFAULT_SAMPLE_RATE,
daemon = True, # This shouldn't survive after crashes
**kwargs
):
"""
Sets a new thread to play the given audio.
Parameters
----------
chunk_size :
Number of samples per chunk (block sent to device).
dfmt :
Format, as in chunks(). Default is "f" (Float32).
channels :
Channels in audio stream (serialized).
rate :
Sample rate (same input used in sHz).
daemon :
Boolean telling if thread should be daemon. Default is True.
"""
super(AudioThread, self).__init__()
self.daemon = daemon # threading.Thread property, couldn't be assigned
# before the superclass constructor
# Stores data needed by the run method
self.audio = audio
self.device_manager = device_manager
self.dfmt = dfmt
self.channels = kwargs.pop("nchannels", channels)
self.chunk_size = chunks.size if chunk_size is None else chunk_size
# Lockers
self.lock = threading.Lock() # Avoid control methods simultaneous call
self.go = threading.Event() # Communication between the 2 threads
self.go.set()
self.halting = False # The stop message
# Get the streaming function
import _portaudio # Just to be slightly faster (per chunk!)
self.write_stream = _portaudio.write_stream
if hasattr(device_manager, "api"):
kwargs.setdefault("output_device_index",
device_manager.api["defaultOutputDevice"])
# Open a new audio output stream
self.stream = device_manager._pa.open(format=_STRUCT2PYAUDIO[dfmt],
channels=channels,
rate=rate,
frames_per_buffer=self.chunk_size,
output=True,
**kwargs)
# Backwards compatibility
nchannels = property(deprecate(lambda self: self.channels))
def run(self):
"""
Plays the audio. This method plays the audio, and shouldn't be called
explicitly, let the constructor do so.
"""
# From now on, it's multi-thread. Let the force be with them.
st = self.stream._stream
for chunk in chunks(self.audio,
size=self.chunk_size*self.nchannels,
dfmt=self.dfmt):
#Below is a faster way to call:
# self.stream.write(chunk, self.chunk_size)
self.write_stream(st, chunk, self.chunk_size, False)
if not self.go.is_set():
self.stream.stop_stream()
if self.halting:
break
self.go.wait()
self.stream.start_stream()
# Finished playing! Destructor-like step: let's close the thread
with self.lock:
if self in self.device_manager._threads: # If not already closed
self.stream.close()
self.device_manager.thread_finished(self)
def stop(self):
""" Stops the playing thread and close """
with self.lock:
self.halting = True
self.go.clear()
def pause(self):
""" Pauses the audio. """
with self.lock:
self.go.clear()
def play(self):
""" Resume playing the audio. """
with self.lock:
self.go.set()
| gpl-3.0 |
Answeror/aip | aip/imfs/cascade.py | 1 | 1705 | from .base import NameMixin
def load_ext(name, bases):
return need_raw(
name,
bases,
lambda base: base.load(name)
)
def thumbnail_ext(name, width, height, bases):
return need_raw(
name,
bases,
lambda base: base.thumbnail(name, width, height)
)
def mtime_ext(name, bases):
return need_raw(
name,
bases,
lambda base: base.mtime(name)
)
def need_raw(name, bases, f):
assert bases
if len(bases) == 1:
return f(bases[0])
try:
data = f(bases[0])
if data is not None:
return data
except:
pass
data = load_ext(name, bases[1:])
if data is not None:
try:
bases[0].save(name, data)
except:
pass
return f(bases[0])
class Cascade(NameMixin):
def __init__(self, *args):
self.bases = args
assert self.bases
def _load(self, name):
return load_ext(name, self.bases)
def _save(self, name, data):
for base in self.bases:
base.save(name, data)
def _thumbnail(self, name, width, height):
return thumbnail_ext(name, width, height, self.bases)
def _has(self, name):
for base in self.bases:
if base.has(name):
return True
return False
def _remove(self, name):
for base in self.bases:
base.remove(name)
def _mtime(self, name):
return mtime_ext(name, self.bases)
def _cache_timeout(self, name):
for base in self.bases:
ret = base.cache_timeout(name)
if ret is not None:
return ret
return None
| mit |
escapewindow/mozharness | scripts/mobile_l10n.py | 2 | 21172 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""mobile_l10n.py
This currently supports nightly and release single locale repacks for
Android. This also creates nightly updates.
"""
from copy import deepcopy
import os
import re
import subprocess
import sys
try:
import simplejson as json
assert json
except ImportError:
import json
# load modules from parent dir
sys.path.insert(1, os.path.dirname(sys.path[0]))
from mozharness.base.errors import BaseErrorList, MakefileErrorList
from mozharness.base.log import OutputParser
from mozharness.base.transfer import TransferMixin
from mozharness.mozilla.buildbot import BuildbotMixin
from mozharness.mozilla.purge import PurgeMixin
from mozharness.mozilla.release import ReleaseMixin
from mozharness.mozilla.signing import MobileSigningMixin
from mozharness.mozilla.tooltool import TooltoolMixin
from mozharness.base.vcs.vcsbase import MercurialScript
from mozharness.mozilla.l10n.locales import LocalesMixin
from mozharness.mozilla.mock import MockMixin
# MobileSingleLocale {{{1
class MobileSingleLocale(MockMixin, LocalesMixin, ReleaseMixin,
MobileSigningMixin, TransferMixin, TooltoolMixin,
BuildbotMixin, PurgeMixin, MercurialScript):
config_options = [[
['--locale', ],
{"action": "extend",
"dest": "locales",
"type": "string",
"help": "Specify the locale(s) to sign and update"
}
], [
['--locales-file', ],
{"action": "store",
"dest": "locales_file",
"type": "string",
"help": "Specify a file to determine which locales to sign and update"
}
], [
['--tag-override', ],
{"action": "store",
"dest": "tag_override",
"type": "string",
"help": "Override the tags set for all repos"
}
], [
['--user-repo-override', ],
{"action": "store",
"dest": "user_repo_override",
"type": "string",
"help": "Override the user repo path for all repos"
}
], [
['--release-config-file', ],
{"action": "store",
"dest": "release_config_file",
"type": "string",
"help": "Specify the release config file to use"
}
], [
['--key-alias', ],
{"action": "store",
"dest": "key_alias",
"type": "choice",
"default": "nightly",
"choices": ["nightly", "release"],
"help": "Specify the signing key alias"
}
], [
['--this-chunk', ],
{"action": "store",
"dest": "this_locale_chunk",
"type": "int",
"help": "Specify which chunk of locales to run"
}
], [
['--total-chunks', ],
{"action": "store",
"dest": "total_locale_chunks",
"type": "int",
"help": "Specify the total number of chunks of locales"
}
]]
def __init__(self, require_config_file=True):
LocalesMixin.__init__(self)
MercurialScript.__init__(
self,
config_options=self.config_options,
all_actions=[
"clobber",
"pull",
"list-locales",
"setup",
"repack",
"upload-repacks",
"create-nightly-snippets",
"upload-nightly-snippets",
"summary",
],
require_config_file=require_config_file
)
self.base_package_name = None
self.buildid = None
self.make_ident_output = None
self.repack_env = None
self.revision = None
self.upload_env = None
self.version = None
self.upload_urls = {}
self.locales_property = {}
# Helper methods {{{2
def query_repack_env(self):
if self.repack_env:
return self.repack_env
c = self.config
replace_dict = {}
if c.get('release_config_file'):
rc = self.query_release_config()
replace_dict = {
'version': rc['version'],
'buildnum': rc['buildnum']
}
repack_env = self.query_env(partial_env=c.get("repack_env"),
replace_dict=replace_dict)
if c.get('base_en_us_binary_url') and c.get('release_config_file'):
rc = self.query_release_config()
repack_env['EN_US_BINARY_URL'] = c['base_en_us_binary_url'] % replace_dict
if 'MOZ_SIGNING_SERVERS' in os.environ:
repack_env['MOZ_SIGN_CMD'] = subprocess.list2cmdline(self.query_moz_sign_cmd(formats='jar'))
self.repack_env = repack_env
return self.repack_env
def query_upload_env(self):
if self.upload_env:
return self.upload_env
c = self.config
buildid = self.query_buildid()
version = self.query_version()
upload_env = self.query_env(partial_env=c.get("upload_env"),
replace_dict={'buildid': buildid,
'version': version})
if 'MOZ_SIGNING_SERVERS' in os.environ:
upload_env['MOZ_SIGN_CMD'] = subprocess.list2cmdline(self.query_moz_sign_cmd())
self.upload_env = upload_env
return self.upload_env
def _query_make_ident_output(self):
"""Get |make ident| output from the objdir.
Only valid after setup is run.
"""
if self.make_ident_output:
return self.make_ident_output
env = self.query_repack_env()
dirs = self.query_abs_dirs()
output = self.get_output_from_command_m(["make", "ident"],
cwd=dirs['abs_locales_dir'],
env=env,
silent=True,
halt_on_failure=True)
parser = OutputParser(config=self.config, log_obj=self.log_obj,
error_list=MakefileErrorList)
parser.add_lines(output)
self.make_ident_output = output
return output
def query_buildid(self):
"""Get buildid from the objdir.
Only valid after setup is run.
"""
if self.buildid:
return self.buildid
r = re.compile("buildid (\d+)")
output = self._query_make_ident_output()
for line in output.splitlines():
m = r.match(line)
if m:
self.buildid = m.groups()[0]
return self.buildid
def query_revision(self):
"""Get revision from the objdir.
Only valid after setup is run.
"""
if self.revision:
return self.revision
r = re.compile(r"gecko_revision ([0-9a-f]{12}\+?)")
output = self._query_make_ident_output()
for line in output.splitlines():
m = r.match(line)
if m:
self.revision = m.groups()[0]
return self.revision
def _query_make_variable(self, variable, make_args=None):
make = self.query_exe('make')
env = self.query_repack_env()
dirs = self.query_abs_dirs()
if make_args is None:
make_args = []
# TODO error checking
output = self.get_output_from_command_m(
[make, "echo-variable-%s" % variable] + make_args,
cwd=dirs['abs_locales_dir'], silent=True,
env=env
)
parser = OutputParser(config=self.config, log_obj=self.log_obj,
error_list=MakefileErrorList)
parser.add_lines(output)
return output.strip()
def query_base_package_name(self):
"""Get the package name from the objdir.
Only valid after setup is run.
"""
if self.base_package_name:
return self.base_package_name
self.base_package_name = self._query_make_variable(
"PACKAGE",
make_args=['AB_CD=%(locale)s']
)
return self.base_package_name
def query_version(self):
"""Get the package name from the objdir.
Only valid after setup is run.
"""
if self.version:
return self.version
c = self.config
if c.get('release_config_file'):
rc = self.query_release_config()
self.version = rc['version']
else:
self.version = self._query_make_variable("MOZ_APP_VERSION")
return self.version
def query_upload_url(self, locale):
if locale in self.upload_urls:
return self.upload_urls[locale]
if 'snippet_base_url' in self.config:
return self.config['snippet_base_url'] % {'locale': locale}
self.error("Can't determine the upload url for %s!" % locale)
self.error("You either need to run --upload-repacks before --create-nightly-snippets, or specify the 'snippet_base_url' in self.config!")
def add_failure(self, locale, message, **kwargs):
self.locales_property[locale] = "Failed"
prop_key = "%s_failure" % locale
prop_value = self.query_buildbot_property(prop_key)
if prop_value:
prop_value = "%s %s" % (prop_value, message)
else:
prop_value = message
self.set_buildbot_property(prop_key, prop_value, write_to_file=True)
MercurialScript.add_failure(self, locale, message=message, **kwargs)
def summary(self):
MercurialScript.summary(self)
# TODO we probably want to make this configurable on/off
locales = self.query_locales()
for locale in locales:
self.locales_property.setdefault(locale, "Success")
self.set_buildbot_property("locales", json.dumps(self.locales_property), write_to_file=True)
# Actions {{{2
def clobber(self):
self.read_buildbot_config()
dirs = self.query_abs_dirs()
c = self.config
objdir = os.path.join(dirs['abs_work_dir'], c['mozilla_dir'],
c['objdir'])
super(MobileSingleLocale, self).clobber(always_clobber_dirs=[objdir])
def pull(self):
c = self.config
dirs = self.query_abs_dirs()
repos = []
replace_dict = {}
if c.get("user_repo_override"):
replace_dict['user_repo_override'] = c['user_repo_override']
# deepcopy() needed because of self.config lock bug :(
for repo_dict in deepcopy(c['repos']):
repo_dict['repo'] = repo_dict['repo'] % replace_dict
repos.append(repo_dict)
else:
repos = c['repos']
self.vcs_checkout_repos(repos, parent_dir=dirs['abs_work_dir'],
tag_override=c.get('tag_override'))
self.pull_locale_source()
# list_locales() is defined in LocalesMixin.
def _setup_configure(self, buildid=None):
c = self.config
dirs = self.query_abs_dirs()
env = self.query_repack_env()
make = self.query_exe("make")
if self.run_command_m([make, "-f", "client.mk", "configure"],
cwd=dirs['abs_mozilla_dir'],
env=env,
error_list=MakefileErrorList):
self.fatal("Configure failed!")
for make_dir in c.get('make_dirs', []):
self.run_command_m([make],
cwd=os.path.join(dirs['abs_objdir'], make_dir),
env=env,
error_list=MakefileErrorList,
halt_on_failure=True)
if buildid:
self.run_command_m([make, 'export',
'MOZ_BUILD_DATE=%s' % str(buildid)],
cwd=os.path.join(dirs['abs_objdir'], make_dir),
env=env,
error_list=MakefileErrorList)
def setup(self):
c = self.config
dirs = self.query_abs_dirs()
mozconfig_path = os.path.join(dirs['abs_mozilla_dir'], '.mozconfig')
self.copyfile(os.path.join(dirs['abs_work_dir'], c['mozconfig']),
mozconfig_path)
# TODO stop using cat
cat = self.query_exe("cat")
hg = self.query_exe("hg")
make = self.query_exe("make")
self.run_command_m([cat, mozconfig_path])
env = self.query_repack_env()
if self.config.get("tooltool_config"):
self.tooltool_fetch(
self.config['tooltool_config']['manifest'],
bootstrap_cmd=self.config['tooltool_config']['bootstrap_cmd'],
output_dir=self.config['tooltool_config']['output_dir'] % self.query_abs_dirs(),
)
self._setup_configure()
self.run_command_m([make, "wget-en-US"],
cwd=dirs['abs_locales_dir'],
env=env,
error_list=MakefileErrorList,
halt_on_failure=True)
self.run_command_m([make, "unpack"],
cwd=dirs['abs_locales_dir'],
env=env,
error_list=MakefileErrorList,
halt_on_failure=True)
revision = self.query_revision()
if not revision:
self.fatal("Can't determine revision!")
# TODO do this through VCSMixin instead of hardcoding hg
self.run_command_m([hg, "update", "-r", revision],
cwd=dirs["abs_mozilla_dir"],
env=env,
error_list=BaseErrorList,
halt_on_failure=True)
self.set_buildbot_property('revision', revision, write_to_file=True)
# Configure again since the hg update may have invalidated it.
buildid = self.query_buildid()
self._setup_configure(buildid=buildid)
def repack(self):
# TODO per-locale logs and reporting.
c = self.config
dirs = self.query_abs_dirs()
locales = self.query_locales()
make = self.query_exe("make")
repack_env = self.query_repack_env()
base_package_name = self.query_base_package_name()
base_package_dir = os.path.join(dirs['abs_objdir'], 'dist')
success_count = total_count = 0
for locale in locales:
total_count += 1
self.enable_mock()
result = self.run_compare_locales(locale)
self.disable_mock()
if result:
self.add_failure(locale, message="%s failed in compare-locales!" % locale)
continue
if self.run_command_m([make, "installers-%s" % locale],
cwd=dirs['abs_locales_dir'],
env=repack_env,
error_list=MakefileErrorList,
halt_on_failure=False):
self.add_failure(locale, message="%s failed in make installers-%s!" % (locale, locale))
continue
signed_path = os.path.join(base_package_dir,
base_package_name % {'locale': locale})
# We need to wrap what this function does with mock, since
# MobileSigningMixin doesn't know about mock
self.enable_mock()
status = self.verify_android_signature(
signed_path,
script=c['signature_verification_script'],
env=repack_env,
key_alias=c['key_alias'],
)
self.disable_mock()
if status:
self.add_failure(locale, message="Errors verifying %s binary!" % locale)
# No need to rm because upload is per-locale
continue
success_count += 1
self.summarize_success_count(success_count, total_count,
message="Repacked %d of %d binaries successfully.")
def upload_repacks(self):
c = self.config
dirs = self.query_abs_dirs()
locales = self.query_locales()
make = self.query_exe("make")
base_package_name = self.query_base_package_name()
version = self.query_version()
upload_env = self.query_upload_env()
success_count = total_count = 0
buildnum = None
if c.get('release_config_file'):
rc = self.query_release_config()
buildnum = rc['buildnum']
for locale in locales:
if self.query_failure(locale):
self.warning("Skipping previously failed locale %s." % locale)
continue
total_count += 1
if c.get('base_post_upload_cmd'):
upload_env['POST_UPLOAD_CMD'] = c['base_post_upload_cmd'] % {'version': version, 'locale': locale, 'buildnum': str(buildnum)}
output = self.get_output_from_command_m(
# Ugly hack to avoid |make upload| stderr from showing up
# as get_output_from_command errors
"%s upload AB_CD=%s 2>&1" % (make, locale),
cwd=dirs['abs_locales_dir'],
env=upload_env,
silent=True
)
parser = OutputParser(config=self.config, log_obj=self.log_obj,
error_list=MakefileErrorList)
parser.add_lines(output)
if parser.num_errors:
self.add_failure(locale, message="%s failed in make upload!" % (locale))
continue
package_name = base_package_name % {'locale': locale}
r = re.compile("(http.*%s)" % package_name)
success = False
for line in output.splitlines():
m = r.match(line)
if m:
self.upload_urls[locale] = m.groups()[0]
self.info("Found upload url %s" % self.upload_urls[locale])
success = True
if not success:
self.add_failure(locale, message="Failed to detect %s url in make upload!" % (locale))
print output
continue
success_count += 1
self.summarize_success_count(success_count, total_count,
message="Uploaded %d of %d binaries successfully.")
def create_nightly_snippets(self):
c = self.config
dirs = self.query_abs_dirs()
locales = self.query_locales()
base_package_name = self.query_base_package_name()
buildid = self.query_buildid()
version = self.query_version()
binary_dir = os.path.join(dirs['abs_objdir'], 'dist')
success_count = total_count = 0
replace_dict = {
'buildid': buildid,
'build_target': c['build_target'],
}
for locale in locales:
total_count += 1
replace_dict['locale'] = locale
aus_base_dir = c['aus_base_dir'] % replace_dict
aus_abs_dir = os.path.join(dirs['abs_work_dir'], 'update',
aus_base_dir)
binary_path = os.path.join(binary_dir,
base_package_name % {'locale': locale})
url = self.query_upload_url(locale)
if not url:
self.add_failure(locale, "Can't create a snippet for %s without an upload url." % locale)
continue
if not self.create_complete_snippet(binary_path, version, buildid, url, aus_abs_dir):
self.add_failure(locale, message="Errors creating snippet for %s! Removing snippet directory." % locale)
self.rmtree(aus_abs_dir)
continue
self.run_command_m(["touch", os.path.join(aus_abs_dir, "partial.txt")])
success_count += 1
self.summarize_success_count(success_count, total_count,
message="Created %d of %d snippets successfully.")
def upload_nightly_snippets(self):
c = self.config
dirs = self.query_abs_dirs()
update_dir = os.path.join(dirs['abs_work_dir'], 'update')
if not os.path.exists(update_dir):
self.error("No such directory %s! Skipping..." % update_dir)
return
if self.rsync_upload_directory(update_dir, c['aus_ssh_key'],
c['aus_user'], c['aus_server'],
c['aus_upload_base_dir']):
self.return_code += 1
# main {{{1
if __name__ == '__main__':
single_locale = MobileSingleLocale()
single_locale.run_and_exit()
| mpl-2.0 |
HusseinReda/Valduino | arduino-core/src/processing/app/i18n/python/requests/packages/charade/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| lgpl-2.1 |
sugartom/tensorflow-alien | tensorflow/python/util/example_parser_configuration_test.py | 157 | 2775 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ExampleParserConfiguration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.example import example_parser_configuration_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util.example_parser_configuration import extract_example_parser_configuration
BASIC_PROTO = """
feature_map {
key: "x"
value {
fixed_len_feature {
dtype: DT_FLOAT
shape {
dim {
size: 1
}
}
default_value {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
}
float_val: 33.0
}
values_output_tensor_name: "ParseExample/ParseExample:3"
}
}
}
feature_map {
key: "y"
value {
var_len_feature {
dtype: DT_STRING
values_output_tensor_name: "ParseExample/ParseExample:1"
indices_output_tensor_name: "ParseExample/ParseExample:0"
shapes_output_tensor_name: "ParseExample/ParseExample:2"
}
}
}
"""
class ExampleParserConfigurationTest(test.TestCase):
def testBasic(self):
golden_config = example_parser_configuration_pb2.ExampleParserConfiguration(
)
text_format.Parse(BASIC_PROTO, golden_config)
with session.Session() as sess:
examples = array_ops.placeholder(dtypes.string, shape=[1])
feature_to_type = {
'x': parsing_ops.FixedLenFeature([1], dtypes.float32, 33.0),
'y': parsing_ops.VarLenFeature(dtypes.string)
}
_ = parsing_ops.parse_example(examples, feature_to_type)
parse_example_op = sess.graph.get_operation_by_name(
'ParseExample/ParseExample')
config = extract_example_parser_configuration(parse_example_op, sess)
self.assertProtoEquals(golden_config, config)
if __name__ == '__main__':
test.main()
| apache-2.0 |
JustRamon/SpeechController | SC.py | 1 | 1113 | #!/usr/bin/env python3
import speech_recognition as sr
import ksr10
import time
arm = ksr10.ksr10_class()
while 1:
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
try:
rn = r.recognize_google(audio)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
if rn == "up":
arm.move("elbow","up")
time.sleep(1.5)
arm.stop()
if rn == "down":
arm.move("elbow","down")
time.sleep(1.5)
arm.stop()
if rn == "light":
arm.lights()
if rn == "grip":
with open ("Save.txt", "r") as file_:
oc = file_.read()
if oc == "1":
arm.move("grip","close")
time.sleep(1.6)
arm.stop()
with open ("Save.txt", "w") as file_:
file_.write("0")
elif oc == "0":
arm.move("grip","open")
time.sleep(1.4)
arm.stop()
with open ("Save.txt", "w") as file_:
file_.write("1")
else:
print "Error, file contains: " + oc
if rn == "stop":
break
| gpl-2.0 |
alqfahad/odoo | addons/hw_posbox_upgrade/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aleida/django | tests/regressiontests/string_lookup/tests.py | 65 | 2413 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from .models import Foo, Whiz, Bar, Article, Base, Child
class StringLookupTests(TestCase):
def test_string_form_referencing(self):
"""
Regression test for #1661 and #1662
Check that string form referencing of
models works, both as pre and post reference, on all RelatedField types.
"""
f1 = Foo(name="Foo1")
f1.save()
f2 = Foo(name="Foo2")
f2.save()
w1 = Whiz(name="Whiz1")
w1.save()
b1 = Bar(name="Bar1", normal=f1, fwd=w1, back=f2)
b1.save()
self.assertEqual(b1.normal, f1)
self.assertEqual(b1.fwd, w1)
self.assertEqual(b1.back, f2)
base1 = Base(name="Base1")
base1.save()
child1 = Child(name="Child1", parent=base1)
child1.save()
self.assertEqual(child1.parent, base1)
def test_unicode_chars_in_queries(self):
"""
Regression tests for #3937
make sure we can use unicode characters in queries.
If these tests fail on MySQL, it's a problem with the test setup.
A properly configured UTF-8 database can handle this.
"""
fx = Foo(name='Bjorn', friend='François')
fx.save()
self.assertEqual(Foo.objects.get(friend__contains='\xe7'), fx)
# We can also do the above query using UTF-8 strings.
self.assertEqual(Foo.objects.get(friend__contains=b'\xc3\xa7'), fx)
def test_queries_on_textfields(self):
"""
Regression tests for #5087
make sure we can perform queries on TextFields.
"""
a = Article(name='Test', text='The quick brown fox jumps over the lazy dog.')
a.save()
self.assertEqual(Article.objects.get(text__exact='The quick brown fox jumps over the lazy dog.'), a)
self.assertEqual(Article.objects.get(text__contains='quick brown fox'), a)
def test_ipaddress_on_postgresql(self):
"""
Regression test for #708
"like" queries on IP address fields require casting to text (on PostgreSQL).
"""
a = Article(name='IP test', text='The body', submitted_from='192.0.2.100')
a.save()
self.assertEqual(repr(Article.objects.filter(submitted_from__contains='192.0.2')),
repr([a]))
| bsd-3-clause |
allotria/intellij-community | python/helpers/py3only/docutils/statemachine.py | 44 | 57608 | # $Id: statemachine.py 7464 2012-06-25 13:16:03Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import re
import sys
import unicodedata
from docutils import utils
from docutils.utils.error_reporting import ErrorOutput
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=False):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in list(self.states.values()):
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print((
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines))), file=self._stderr)
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print('\nStateMachine.run: bof transition', file=self._stderr)
context, result = state.bof(context)
results.extend(result)
while True:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print((
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
% (source, offset, self.line)), file=self._stderr)
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print((
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__), file=self._stderr)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection as exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print((
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0])), file=self._stderr)
continue
except StateCorrection as exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print((
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0])), file=self._stderr)
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print((
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number())), file=self._stderr)
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError as err:
block = err.args[0]
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print((
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions)), file=self._stderr)
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print((
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__)), file=self._stderr)
return method(match, context, next_state)
else:
if self.debug:
print((
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__), file=self._stderr)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in list(self.states.values()):
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print('%s: %s' % (type, value), file=self._stderr)
print('input line %s' % (self.abs_line_number()), file=self._stderr)
print(('module %s, line %s, function %s' %
(module, line, function)), file=self._stderr)
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=False):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true.
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=False, strip_indent=True):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=False, strip_indent=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip `indent` characters of indentation if true
(default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=False,
strip_indent=True, strip_top=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=False):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, slice):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, slice):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = list(zip(self.data, self.items))
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print("%s:%d:%s" % line)
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxsize):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=False, strip_indent=True,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=True):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
# get slice from line, care for combining characters
ci = utils.column_indices(block.data[i])
try:
left = ci[left]
except IndexError:
left += len(block.data[i]) - len(ci)
try:
right = ci[right]
except IndexError:
right += len(block.data[i]) - len(ci)
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, str):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=False,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| apache-2.0 |
QuLogic/specfem3d | EXAMPLES/layered_halfspace/2lay_mesh_boundary_fig8-nodoubling.py | 5 | 4016 | #!/usr/bin/env python
###########################################################################
#### TNM: This is the mesh generation, adapted from a journal file
#### specific to the settings of Komatitsch and Tromp 1999, Fig.8
#### Aug 2009
###########################################################################
import cubit
import boundary_definition
import cubit2specfem3d
import os
import sys
cubit.cmd('reset')
cubit.cmd('brick x 134000 y 134000 z 60000')
# This seems to conflict with boundary_definition.py
# ....which leaves the model space at e.g. x=[-67,67] km
cubit.cmd('volume 1 move x 67000 y 67000 z -30000')
# create vertices for discontinuity
cubit.cmd('split curve 9 distance 3000')
cubit.cmd('split curve 10 distance 3000')
cubit.cmd('split curve 11 distance 3000')
cubit.cmd('split curve 12 distance 3000')
# create surface for interface
cubit.cmd('create surface vertex 9 10 12 11')
cubit.cmd('section volume 1 with surface 7 keep normal')
cubit.cmd('section volume 1 with surface 7 reverse')
# create vertices for auxiliary interface to allow for refinement
cubit.cmd('split curve 29 distance 9000')
cubit.cmd('split curve 31 distance 9000')
cubit.cmd('split curve 32 distance 9000')
cubit.cmd('split curve 36 distance 9000')
# create surface for buffer interface to refine BELOW the discontinuity
cubit.cmd('create surface vertex 25 26 28 27')
cubit.cmd('section volume 3 with surface 19 keep normal')
cubit.cmd('section volume 3 with surface 19 reverse')
cubit.cmd('delete volume 2 4')
cubit.cmd('merge all')
cubit.cmd('imprint all')
# Meshing the volumes
#elementsize = 1196.4 #hi-resolution
#elementsize = 1500.0 # mid-resolution
elementsize = 3000.0 # low-resolution
cubit.cmd('volume 3 size '+str(elementsize))
cubit.cmd('mesh volume 3')
#cubit.cmd('refine surface 8 numsplit 1 bias 1.0 depth 1')
cubit.cmd('volume 1 size '+str(elementsize))
cubit.cmd('mesh volume 1')
cubit.cmd('volume 5 size '+str(elementsize))
cubit.cmd('mesh volume 5')
#### End of meshing
###### This is boundary_definition.py of GEOCUBIT
#..... which extracts the bounding faces and defines them into blocks
boundary_definition.entities=['face']
boundary_definition.define_bc(boundary_definition.entities,parallel=True)
#### Define material properties for the 3 volumes ################
cubit.cmd('#### DEFINE MATERIAL PROPERTIES #######################')
cubit.cmd('block 1 name "elastic 1" ') # elastic material region
cubit.cmd('block 1 attribute count 6')
cubit.cmd('block 1 attribute index 1 1 ') # volume 1
cubit.cmd('block 1 attribute index 2 2800 ') # vp
cubit.cmd('block 1 attribute index 3 1500 ') # vs
cubit.cmd('block 1 attribute index 4 2300 ') # rho
cubit.cmd('block 1 attribute index 5 9000. ') # Q_mu
cubit.cmd('block 1 attribute index 6 0 ') # anisotropy_flag
cubit.cmd('block 2 name "elastic 2" ') # elastic material region
cubit.cmd('block 2 attribute count 6')
cubit.cmd('block 2 attribute index 1 2 ') # volume 2
cubit.cmd('block 2 attribute index 2 7500 ') # vp
cubit.cmd('block 2 attribute index 3 4300 ') # vs
cubit.cmd('block 2 attribute index 4 3200 ') # rho
cubit.cmd('block 2 attribute index 5 9000.0') # Q_mu
cubit.cmd('block 2 attribute index 6 0 ') # anisotropy_flag
cubit.cmd('block 3 name "elastic 3" ') # elastic material region
cubit.cmd('block 3 attribute count 6')
cubit.cmd('block 3 attribute index 1 3 ') # same material properties as for volume 2
cubit.cmd('block 3 attribute index 2 7500 ')
cubit.cmd('block 3 attribute index 3 4300 ')
cubit.cmd('block 3 attribute index 4 3200 ')
cubit.cmd('block 3 attribute index 5 9000.0')
cubit.cmd('block 3 attribute index 6 0')
cubit.cmd('export mesh "top.e" dimension 3 overwrite')
cubit.cmd('save as "meshing.cub" overwrite')
#### Export to SPECFEM3D format using cubit2specfem3d.py of GEOCUBIT
os.system('mkdir -p MESH')
cubit2specfem3d.export2SPECFEM3D('MESH')
# all files needed by SCOTCH are now in directory MESH
| gpl-2.0 |
ebaskoro/node-gyp | gyp/test/generator-output/gyptest-top-all.py | 74 | 1455 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a project hierarchy created when the --generator-output=
option is used to put the build configuration files in a separate
directory tree.
"""
import TestGyp
# Ninja and Android don't support --generator-output.
test = TestGyp.TestGyp(formats=['!ninja', '!android'])
test.writable(test.workpath('src'), False)
test.run_gyp('prog1.gyp',
'-Dset_symroot=1',
'--generator-output=' + test.workpath('gypfiles'),
chdir='src')
test.writable(test.workpath('src/build'), True)
test.writable(test.workpath('src/subdir2/build'), True)
test.writable(test.workpath('src/subdir3/build'), True)
test.build('prog1.gyp', test.ALL, chdir='gypfiles')
chdir = 'gypfiles'
expect = """\
Hello from %s
Hello from inc.h
Hello from inc1/include1.h
Hello from inc2/include2.h
Hello from inc3/include3.h
Hello from subdir2/deeper/deeper.h
"""
if test.format == 'xcode':
chdir = 'src'
test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
if test.format == 'xcode':
chdir = 'src/subdir2'
test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
if test.format == 'xcode':
chdir = 'src/subdir3'
test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
test.pass_test()
| mit |
Orav/kbengine | kbe/res/scripts/common/Lib/msilib/__init__.py | 2 | 18010 | # Copyright (C) 2005 Martin v. Löwis
# Licensed to PSF under a Contributor Agreement.
from _msi import *
import os, string, re, sys
AMD64 = "AMD64" in sys.version
Itanium = "Itanium" in sys.version
Win64 = AMD64 or Itanium
# Partially taken from Wine
datasizemask= 0x00ff
type_valid= 0x0100
type_localizable= 0x0200
typemask= 0x0c00
type_long= 0x0000
type_short= 0x0400
type_string= 0x0c00
type_binary= 0x0800
type_nullable= 0x1000
type_key= 0x2000
# XXX temporary, localizable?
knownbits = datasizemask | type_valid | type_localizable | \
typemask | type_nullable | type_key
class Table:
def __init__(self, name):
self.name = name
self.fields = []
def add_field(self, index, name, type):
self.fields.append((index,name,type))
def sql(self):
fields = []
keys = []
self.fields.sort()
fields = [None]*len(self.fields)
for index, name, type in self.fields:
index -= 1
unk = type & ~knownbits
if unk:
print("%s.%s unknown bits %x" % (self.name, name, unk))
size = type & datasizemask
dtype = type & typemask
if dtype == type_string:
if size:
tname="CHAR(%d)" % size
else:
tname="CHAR"
elif dtype == type_short:
assert size==2
tname = "SHORT"
elif dtype == type_long:
assert size==4
tname="LONG"
elif dtype == type_binary:
assert size==0
tname="OBJECT"
else:
tname="unknown"
print("%s.%sunknown integer type %d" % (self.name, name, size))
if type & type_nullable:
flags = ""
else:
flags = " NOT NULL"
if type & type_localizable:
flags += " LOCALIZABLE"
fields[index] = "`%s` %s%s" % (name, tname, flags)
if type & type_key:
keys.append("`%s`" % name)
fields = ", ".join(fields)
keys = ", ".join(keys)
return "CREATE TABLE %s (%s PRIMARY KEY %s)" % (self.name, fields, keys)
def create(self, db):
v = db.OpenView(self.sql())
v.Execute(None)
v.Close()
class _Unspecified:pass
def change_sequence(seq, action, seqno=_Unspecified, cond = _Unspecified):
"Change the sequence number of an action in a sequence list"
for i in range(len(seq)):
if seq[i][0] == action:
if cond is _Unspecified:
cond = seq[i][1]
if seqno is _Unspecified:
seqno = seq[i][2]
seq[i] = (action, cond, seqno)
return
raise ValueError("Action not found in sequence")
def add_data(db, table, values):
v = db.OpenView("SELECT * FROM `%s`" % table)
count = v.GetColumnInfo(MSICOLINFO_NAMES).GetFieldCount()
r = CreateRecord(count)
for value in values:
assert len(value) == count, value
for i in range(count):
field = value[i]
if isinstance(field, int):
r.SetInteger(i+1,field)
elif isinstance(field, str):
r.SetString(i+1,field)
elif field is None:
pass
elif isinstance(field, Binary):
r.SetStream(i+1, field.name)
else:
raise TypeError("Unsupported type %s" % field.__class__.__name__)
try:
v.Modify(MSIMODIFY_INSERT, r)
except Exception as e:
raise MSIError("Could not insert "+repr(values)+" into "+table)
r.ClearData()
v.Close()
def add_stream(db, name, path):
v = db.OpenView("INSERT INTO _Streams (Name, Data) VALUES ('%s', ?)" % name)
r = CreateRecord(1)
r.SetStream(1, path)
v.Execute(r)
v.Close()
def init_database(name, schema,
ProductName, ProductCode, ProductVersion,
Manufacturer):
try:
os.unlink(name)
except OSError:
pass
ProductCode = ProductCode.upper()
# Create the database
db = OpenDatabase(name, MSIDBOPEN_CREATE)
# Create the tables
for t in schema.tables:
t.create(db)
# Fill the validation table
add_data(db, "_Validation", schema._Validation_records)
# Initialize the summary information, allowing atmost 20 properties
si = db.GetSummaryInformation(20)
si.SetProperty(PID_TITLE, "Installation Database")
si.SetProperty(PID_SUBJECT, ProductName)
si.SetProperty(PID_AUTHOR, Manufacturer)
if Itanium:
si.SetProperty(PID_TEMPLATE, "Intel64;1033")
elif AMD64:
si.SetProperty(PID_TEMPLATE, "x64;1033")
else:
si.SetProperty(PID_TEMPLATE, "Intel;1033")
si.SetProperty(PID_REVNUMBER, gen_uuid())
si.SetProperty(PID_WORDCOUNT, 2) # long file names, compressed, original media
si.SetProperty(PID_PAGECOUNT, 200)
si.SetProperty(PID_APPNAME, "Python MSI Library")
# XXX more properties
si.Persist()
add_data(db, "Property", [
("ProductName", ProductName),
("ProductCode", ProductCode),
("ProductVersion", ProductVersion),
("Manufacturer", Manufacturer),
("ProductLanguage", "1033")])
db.Commit()
return db
def add_tables(db, module):
for table in module.tables:
add_data(db, table, getattr(module, table))
def make_id(str):
identifier_chars = string.ascii_letters + string.digits + "._"
str = "".join([c if c in identifier_chars else "_" for c in str])
if str[0] in (string.digits + "."):
str = "_" + str
assert re.match("^[A-Za-z_][A-Za-z0-9_.]*$", str), "FILE"+str
return str
def gen_uuid():
return "{"+UuidCreate().upper()+"}"
class CAB:
def __init__(self, name):
self.name = name
self.files = []
self.filenames = set()
self.index = 0
def gen_id(self, file):
logical = _logical = make_id(file)
pos = 1
while logical in self.filenames:
logical = "%s.%d" % (_logical, pos)
pos += 1
self.filenames.add(logical)
return logical
def append(self, full, file, logical):
if os.path.isdir(full):
return
if not logical:
logical = self.gen_id(file)
self.index += 1
self.files.append((full, logical))
return self.index, logical
def commit(self, db):
from tempfile import mktemp
filename = mktemp()
FCICreate(filename, self.files)
add_data(db, "Media",
[(1, self.index, None, "#"+self.name, None, None)])
add_stream(db, self.name, filename)
os.unlink(filename)
db.Commit()
_directories = set()
class Directory:
def __init__(self, db, cab, basedir, physical, _logical, default, componentflags=None):
"""Create a new directory in the Directory table. There is a current component
at each point in time for the directory, which is either explicitly created
through start_component, or implicitly when files are added for the first
time. Files are added into the current component, and into the cab file.
To create a directory, a base directory object needs to be specified (can be
None), the path to the physical directory, and a logical directory name.
Default specifies the DefaultDir slot in the directory table. componentflags
specifies the default flags that new components get."""
index = 1
_logical = make_id(_logical)
logical = _logical
while logical in _directories:
logical = "%s%d" % (_logical, index)
index += 1
_directories.add(logical)
self.db = db
self.cab = cab
self.basedir = basedir
self.physical = physical
self.logical = logical
self.component = None
self.short_names = set()
self.ids = set()
self.keyfiles = {}
self.componentflags = componentflags
if basedir:
self.absolute = os.path.join(basedir.absolute, physical)
blogical = basedir.logical
else:
self.absolute = physical
blogical = None
add_data(db, "Directory", [(logical, blogical, default)])
def start_component(self, component = None, feature = None, flags = None, keyfile = None, uuid=None):
"""Add an entry to the Component table, and make this component the current for this
directory. If no component name is given, the directory name is used. If no feature
is given, the current feature is used. If no flags are given, the directory's default
flags are used. If no keyfile is given, the KeyPath is left null in the Component
table."""
if flags is None:
flags = self.componentflags
if uuid is None:
uuid = gen_uuid()
else:
uuid = uuid.upper()
if component is None:
component = self.logical
self.component = component
if Win64:
flags |= 256
if keyfile:
keyid = self.cab.gen_id(self.absolute, keyfile)
self.keyfiles[keyfile] = keyid
else:
keyid = None
add_data(self.db, "Component",
[(component, uuid, self.logical, flags, None, keyid)])
if feature is None:
feature = current_feature
add_data(self.db, "FeatureComponents",
[(feature.id, component)])
def make_short(self, file):
oldfile = file
file = file.replace('+', '_')
file = ''.join(c for c in file if not c in ' "/\[]:;=,')
parts = file.split(".")
if len(parts) > 1:
prefix = "".join(parts[:-1]).upper()
suffix = parts[-1].upper()
if not prefix:
prefix = suffix
suffix = None
else:
prefix = file.upper()
suffix = None
if len(parts) < 3 and len(prefix) <= 8 and file == oldfile and (
not suffix or len(suffix) <= 3):
if suffix:
file = prefix+"."+suffix
else:
file = prefix
else:
file = None
if file is None or file in self.short_names:
prefix = prefix[:6]
if suffix:
suffix = suffix[:3]
pos = 1
while 1:
if suffix:
file = "%s~%d.%s" % (prefix, pos, suffix)
else:
file = "%s~%d" % (prefix, pos)
if file not in self.short_names: break
pos += 1
assert pos < 10000
if pos in (10, 100, 1000):
prefix = prefix[:-1]
self.short_names.add(file)
assert not re.search(r'[\?|><:/*"+,;=\[\]]', file) # restrictions on short names
return file
def add_file(self, file, src=None, version=None, language=None):
"""Add a file to the current component of the directory, starting a new one
if there is no current component. By default, the file name in the source
and the file table will be identical. If the src file is specified, it is
interpreted relative to the current directory. Optionally, a version and a
language can be specified for the entry in the File table."""
if not self.component:
self.start_component(self.logical, current_feature, 0)
if not src:
# Allow relative paths for file if src is not specified
src = file
file = os.path.basename(file)
absolute = os.path.join(self.absolute, src)
assert not re.search(r'[\?|><:/*]"', file) # restrictions on long names
if file in self.keyfiles:
logical = self.keyfiles[file]
else:
logical = None
sequence, logical = self.cab.append(absolute, file, logical)
assert logical not in self.ids
self.ids.add(logical)
short = self.make_short(file)
full = "%s|%s" % (short, file)
filesize = os.stat(absolute).st_size
# constants.msidbFileAttributesVital
# Compressed omitted, since it is the database default
# could add r/o, system, hidden
attributes = 512
add_data(self.db, "File",
[(logical, self.component, full, filesize, version,
language, attributes, sequence)])
#if not version:
# # Add hash if the file is not versioned
# filehash = FileHash(absolute, 0)
# add_data(self.db, "MsiFileHash",
# [(logical, 0, filehash.IntegerData(1),
# filehash.IntegerData(2), filehash.IntegerData(3),
# filehash.IntegerData(4))])
# Automatically remove .pyc/.pyo files on uninstall (2)
# XXX: adding so many RemoveFile entries makes installer unbelievably
# slow. So instead, we have to use wildcard remove entries
if file.endswith(".py"):
add_data(self.db, "RemoveFile",
[(logical+"c", self.component, "%sC|%sc" % (short, file),
self.logical, 2),
(logical+"o", self.component, "%sO|%so" % (short, file),
self.logical, 2)])
return logical
def glob(self, pattern, exclude = None):
"""Add a list of files to the current component as specified in the
glob pattern. Individual files can be excluded in the exclude list."""
files = glob.glob1(self.absolute, pattern)
for f in files:
if exclude and f in exclude: continue
self.add_file(f)
return files
def remove_pyc(self):
"Remove .pyc/.pyo files on uninstall"
add_data(self.db, "RemoveFile",
[(self.component+"c", self.component, "*.pyc", self.logical, 2),
(self.component+"o", self.component, "*.pyo", self.logical, 2)])
class Binary:
def __init__(self, fname):
self.name = fname
def __repr__(self):
return 'msilib.Binary(os.path.join(dirname,"%s"))' % self.name
class Feature:
def __init__(self, db, id, title, desc, display, level = 1,
parent=None, directory = None, attributes=0):
self.id = id
if parent:
parent = parent.id
add_data(db, "Feature",
[(id, parent, title, desc, display,
level, directory, attributes)])
def set_current(self):
global current_feature
current_feature = self
class Control:
def __init__(self, dlg, name):
self.dlg = dlg
self.name = name
def event(self, event, argument, condition = "1", ordering = None):
add_data(self.dlg.db, "ControlEvent",
[(self.dlg.name, self.name, event, argument,
condition, ordering)])
def mapping(self, event, attribute):
add_data(self.dlg.db, "EventMapping",
[(self.dlg.name, self.name, event, attribute)])
def condition(self, action, condition):
add_data(self.dlg.db, "ControlCondition",
[(self.dlg.name, self.name, action, condition)])
class RadioButtonGroup(Control):
def __init__(self, dlg, name, property):
self.dlg = dlg
self.name = name
self.property = property
self.index = 1
def add(self, name, x, y, w, h, text, value = None):
if value is None:
value = name
add_data(self.dlg.db, "RadioButton",
[(self.property, self.index, value,
x, y, w, h, text, None)])
self.index += 1
class Dialog:
def __init__(self, db, name, x, y, w, h, attr, title, first, default, cancel):
self.db = db
self.name = name
self.x, self.y, self.w, self.h = x,y,w,h
add_data(db, "Dialog", [(name, x,y,w,h,attr,title,first,default,cancel)])
def control(self, name, type, x, y, w, h, attr, prop, text, next, help):
add_data(self.db, "Control",
[(self.name, name, type, x, y, w, h, attr, prop, text, next, help)])
return Control(self, name)
def text(self, name, x, y, w, h, attr, text):
return self.control(name, "Text", x, y, w, h, attr, None,
text, None, None)
def bitmap(self, name, x, y, w, h, text):
return self.control(name, "Bitmap", x, y, w, h, 1, None, text, None, None)
def line(self, name, x, y, w, h):
return self.control(name, "Line", x, y, w, h, 1, None, None, None, None)
def pushbutton(self, name, x, y, w, h, attr, text, next):
return self.control(name, "PushButton", x, y, w, h, attr, None, text, next, None)
def radiogroup(self, name, x, y, w, h, attr, prop, text, next):
add_data(self.db, "Control",
[(self.name, name, "RadioButtonGroup",
x, y, w, h, attr, prop, text, next, None)])
return RadioButtonGroup(self, name, prop)
def checkbox(self, name, x, y, w, h, attr, prop, text, next):
return self.control(name, "CheckBox", x, y, w, h, attr, prop, text, next, None)
| lgpl-3.0 |
starbt/flea_market | market/migrations/0006_auto_20161206_2033.py | 1 | 1232 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-06 12:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('market', '0005_auto_20161206_1204'),
]
operations = [
migrations.AddField(
model_name='goods',
name='discount',
field=models.IntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='goods',
name='goods_phone',
field=models.IntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='goods',
name='goods_qq',
field=models.IntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name='goods',
name='picture_url',
field=models.CharField(blank=True, max_length=128),
),
migrations.AlterField(
model_name='userprofile',
name='picture_url',
field=models.CharField(blank=True, default='http://ershou.u.qiniudn.com/Android_1480732854630_186265.jpg?imageView2/5/w/800/h/800', max_length=128),
),
]
| mit |
semonte/intellij-community | python/lib/Lib/wsgiref/headers.py | 104 | 5916 | """Manage HTTP Response Headers
Much of this module is red-handedly pilfered from email.Message in the stdlib,
so portions are Copyright (C) 2001,2002 Python Software Foundation, and were
written by Barry Warsaw.
"""
from types import ListType, TupleType
# Regular expression that matches `special' characters in parameters, the
# existance of which force quoting of the parameter value.
import re
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers:
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if type(headers) is not ListType:
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
#
| apache-2.0 |
shelsonjava/TeaJS | deps/v8/build/gyp/test/variables/filelist/gyptest-filelist-golden.py | 228 | 1584 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<|(list.txt ...)' syntax commands.
"""
import os
import sys
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('filelist.gyp.stdout')
if sys.platform == 'win32':
expect = expect.replace('/', r'\\').replace('\r\n', '\n')
test.run_gyp('src/filelist.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the filelist.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('src/filelist.gypd').replace(
'\r', '').replace('\\\\', '/')
expect = test.read('filelist.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `src/filelist.gypd'"
test.diff(expect, contents, 'src/filelist.gypd ')
test.fail_test()
contents = test.read('src/names.txt')
expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
if not test.match(contents, expect):
print "Unexpected contents of `src/names.txt'"
test.diff(expect, contents, 'src/names.txt ')
test.fail_test()
test.pass_test()
| bsd-3-clause |
UbiCastTeam/touchwizard | touchwizard/canvas.py | 1 | 16655 | # -*- coding: utf-8 -*
import clutter
import gobject
import easyevent
import logging
import os
import time
from touchwizard.loading import LoadingWidget
logger = logging.getLogger('touchwizard')
class Canvas(clutter.Actor, clutter.Container, easyevent.User):
"""Wizard main actor which manages the user interface and pages.
Listen for event:
- next_page (page_name)
Request for a new page identified by its name passed as content.
The current page becomes in top of the page history.
- previous_page
Request for displaying back the top of the page history. No content
expected. If the history is empty, quit the wizard.
- request_quit
Request for quitting the wizard. Call prepare_quit callback
if it exists and there launch the wizard_quit which should
be handled by the user main script.
Launch the event:
- wizard_quit
Sent after prepare_quit callback to notify the main script that it
can end the process.
"""
__gtype_name__ = 'Canvas'
# infobar_height = 104
# iconbar_height = 200
def __init__(self, first_page):
import touchwizard
clutter.Actor.__init__(self)
easyevent.User.__init__(self)
self.session = touchwizard.Session()
self.background = None
self.last_page_name = None
self.last_page_timestamp = None
self.previous_page_locked = False
self.previous_page_timeout_id = None
if touchwizard.canvas_bg:
if not os.path.exists(touchwizard.canvas_bg):
logger.error('Canvas background %s not found.', touchwizard.canvas_bg)
self.background = clutter.Texture(touchwizard.canvas_bg)
self.background.set_parent(self)
self.infobar = touchwizard.InfoBar()
self.infobar.set_parent(self)
self.iconbar = touchwizard.IconBar()
self.iconbar.set_parent(self)
self.loading = LoadingWidget()
self.loading.set_parent(self)
self.loading.hide()
self.loading_padding = 10
self.home_icon = touchwizard.Icon('shutdown')
self.home_icon.build()
self.previous_icon = touchwizard.IconRef(touchwizard.Icon('previous'))
# self.previous_icon.build()
easyevent.forward_event('icon_previous_actioned', 'previous_page')
self.history = list()
self.first_page = first_page
self.available_pages = dict()
self.current_page = None
self.register_event('next_page', 'previous_page', 'refresh_page', 'clear_history')
self.register_event('request_quit')
gobject.idle_add(self.lookup_pages)
gobject.idle_add(self.display_page, first_page)
def lookup_pages(self):
import touchwizard
origin = ''
path = touchwizard.page_path
if path is None:
if self.first_page is None:
return tuple()
self.available_pages[self.first_page.name] = self.first_page
import sys
origin = sys.modules[self.first_page.__module__].__file__
path = os.path.dirname(os.path.abspath(os.path.expanduser(origin)))
import imp
for f in os.listdir(path):
if f.endswith('.py') and f != os.path.basename(origin):
try:
module = imp.load_source(f[:-3], os.path.join(path, f))
except:
import traceback
logger.error('Cannot import page %s:\n%s', f[:-3], traceback.format_exc())
if not touchwizard.tolerant_to_page_import_error:
import sys
sys.exit(1)
continue
for attr_name in dir(module):
if attr_name.startswith('__'):
continue
attribute = getattr(module, attr_name)
if isinstance(attribute, type) \
and issubclass(attribute, touchwizard.Page) \
and attribute is not touchwizard.Page:
self.available_pages[attribute.name] = attribute
logger.info('%d pages found.', len(self.available_pages))
# print self.available_pages
def display_page(self, page, icons=None):
if isinstance(page, type):
self.current_page = page(self.session)
if self.current_page.reuse:
logger.info('Storing reusable page %s in cache.', self.current_page.name)
self.available_pages[self.current_page.name] = self.current_page
else:
self.current_page = page
logger.info('Reusing already instanciated page %s from cache.', self.current_page.name)
os.environ["TOUCHWIZARD_CURRENT_PAGE"] = self.current_page.name
os.environ.pop("TOUCHWIZARD_REQUESTED_PAGE", None)
if page.need_loading:
self.loading.hide()
self._build_iconbar(icons)
self.current_page.panel.set_parent(self)
self.current_page.panel.lower_bottom()
if hasattr(self.current_page.panel, 'prepare') and callable(self.current_page.panel.prepare):
self.current_page.panel.prepare()
self.current_page.panel.show()
self.previous_page_locked = False
self.last_page_name = page.name
def _build_iconbar(self, icons):
import touchwizard
self.iconbar.clear()
if icons is not None:
# cached icons
previous_icon = icons[0]
next_icon = icons[-1]
icons = icons[1:-1]
else:
# uninstanciated icons
icons = self.current_page.icons
previous_icon = self.current_page.previous
next_icon = self.current_page.next
# Icon "previous"
self.home_icon.unregister_all_events()
if previous_icon is None:
if self.history:
last_page, last_icons = self.history[-1]
previous_icon = last_page.my_icon
if previous_icon is None:
previous_icon = self.previous_icon
else:
self.home_icon.register_events()
previous_icon = self.home_icon
condition = True
if isinstance(previous_icon, touchwizard.IconRef):
if callable(previous_icon.condition):
condition = previous_icon.condition()
else:
condition = previous_icon.condition
previous_icon = previous_icon.get_icon()
if condition:
previous_icon.build()
self.iconbar.set_previous(previous_icon)
# Icon "next"
condition = True
if next_icon is not None:
if isinstance(next_icon, touchwizard.IconRef):
if callable(next_icon.condition):
condition = next_icon.condition()
else:
condition = next_icon.condition
next_icon = next_icon.get_icon()
if condition:
next_icon.build()
self.iconbar.set_next(next_icon)
# Other icons
for icon in icons:
if isinstance(icon, touchwizard.IconRef):
if callable(icon.condition):
condition = icon.condition()
else:
condition = icon.condition
if not condition:
continue
icon = icon.get_icon()
icon.build()
self.iconbar.append(icon)
def evt_next_page(self, event):
if self.last_page_name is None or self.last_page_name != event.content:
gobject.timeout_add(100, self.do_next_page, event, priority=gobject.PRIORITY_HIGH)
self.unregister_event('next_page')
def do_next_page(self, event):
now = time.time()
name = event.content
if not self.last_page_timestamp or (now - self.last_page_timestamp) > 0.5:
logger.info('Page %r requested.', name)
os.environ["TOUCHWIZARD_REQUESTED_PAGE"] = name
self.current_page.panel.hide()
self.current_page.panel.unparent()
icon_states = self.iconbar.get_icon_states()
self.history.append((self.current_page, icon_states))
new_page = self.available_pages[name]
self.iconbar.clear(keep_back=True)
if new_page.need_loading:
self.loading.show()
gobject.idle_add(self.display_page, new_page)
else:
logger.warning('Page %s requested too quickly twice in a row (less than 500ms), not displaying', name)
self.register_event('next_page')
self.last_page_timestamp = now
def evt_previous_page(self, event):
if not self.previous_page_locked:
self.previous_page_locked = True
if self.previous_page_timeout_id is not None:
gobject.source_remove(self.previous_page_timeout_id)
self.previous_page_timeout_id = gobject.timeout_add(300, self.do_previous_page, event, priority=gobject.PRIORITY_HIGH)
def do_previous_page(self, event):
name = None
if event.content:
name = event.content
for page, icons in self.history[::-1]:
try:
previous, icons = self.history.pop()
except IndexError:
# logger.error('Previous page requested but history is empty.')
self.evt_request_quit(event)
return
logger.info('Back to %r page.', previous.name)
os.environ["TOUCHWIZARD_REQUESTED_PAGE"] = previous.name
self.current_page.panel.hide()
gobject.idle_add(self.current_page.panel.unparent)
if previous.need_loading:
self.loading.show()
if not self.current_page.reuse:
gobject.idle_add(self.current_page.panel.destroy)
if name is None or page.name == name:
break
self.current_page = page
gobject.idle_add(self.display_page, previous, icons)
def evt_refresh_page(self, event):
gobject.idle_add(self.do_refresh_page, event)
self.unregister_event('refresh_page')
def do_refresh_page(self, event):
name = self.current_page.name
logger.info('Page %r refresh requested.', name)
self.current_page.panel.hide()
self.current_page.panel.unparent()
gobject.idle_add(self.current_page.panel.destroy)
new_page = self.available_pages[name]
self.iconbar.clear(keep_back=True)
if new_page.need_loading:
self.loading.show()
gobject.idle_add(self.display_page, new_page)
self.register_event('refresh_page')
def evt_clear_history(self, event):
for page, icons in self.history:
gobject.idle_add(page.panel.destroy)
self.history = list()
def evt_request_quit(self, event):
self.evt_request_quit = self.evt_request_quit_fake
logger.info('Quit requested.')
try:
prepare_quit = getattr(self.current_page, "prepare_quit", None)
if prepare_quit:
if not callable(prepare_quit):
prepare_quit = getattr(self.current_page.panel, prepare_quit, None)
if callable(prepare_quit):
logger.info('prepare_quit callback found')
prepare_quit()
except Exception, e:
logger.warning("Failed to call prepare_quit method in page %s: %s", self.current_page, e)
self.launch_event('wizard_quit')
def evt_request_quit_fake(self, event):
logger.error('Quit request rejected.')
def evt_request_session(self, event):
self.launch_event('dispatch_session', self.session)
def evt_update_session(self, event):
self.session.update(event)
self.launch_event('dispatch_session', self.session)
def do_remove(self, actor):
logger.info.debug('Panel "%s" removed.', actor.__name__)
def do_get_preferred_width(self, for_height):
import touchwizard
width = float(touchwizard.canvas_width)
return width, width
def do_get_preferred_height(self, for_width):
import touchwizard
height = float(touchwizard.canvas_height)
return height, height
def do_allocate(self, box, flags):
canvas_width = box.x2 - box.x1
canvas_height = box.y2 - box.y1
infobar_height = round(self.infobar.get_preferred_height(canvas_width)[1])
infobar_box = clutter.ActorBox()
infobar_box.x1 = 0
infobar_box.y1 = 0
infobar_box.x2 = canvas_width
infobar_box.y2 = infobar_height
self.infobar.allocate(infobar_box, flags)
iconbar_height = round(self.iconbar.get_preferred_height(canvas_width)[1])
iconbar_box = clutter.ActorBox()
iconbar_box.x1 = 0
iconbar_box.y1 = canvas_height - iconbar_height
iconbar_box.x2 = canvas_width
iconbar_box.y2 = canvas_height
self.iconbar.allocate(iconbar_box, flags)
loading_box = clutter.ActorBox()
loading_box.x1 = self.loading_padding
loading_box.y1 = infobar_height + self.loading_padding
loading_box.x2 = canvas_width - self.loading_padding
loading_box.y2 = canvas_height - iconbar_height - self.loading_padding
self.loading.allocate(loading_box, flags)
panel_box = clutter.ActorBox()
panel_box.x1 = 0
panel_box.y1 = infobar_height
panel_box.x2 = canvas_width
panel_box.y2 = canvas_height - iconbar_height
if self.background is not None:
self.background.allocate(panel_box, flags)
if self.current_page is not None:
self.current_page.panel.allocate(panel_box, flags)
clutter.Actor.do_allocate(self, box, flags)
def do_foreach(self, func, data=None):
children = [self.infobar, self.iconbar, self.loading]
if self.background:
children.append(self.background)
if self.current_page:
children.append(self.current_page.panel)
for child in children:
func(child, data)
def do_paint(self):
if self.background:
self.background.paint()
self.iconbar.paint()
if self.current_page:
self.current_page.panel.paint()
self.infobar.paint()
self.loading.paint()
def do_pick(self, color):
self.do_paint()
def quick_launch(page, width=None, height=None, overlay=None, main_loop_run_cb=None, main_loop_stop_cb=None):
if not logging._handlers:
# Install a default log handler if none set
import sys
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)s %(message)s',
stream=sys.stderr)
logger.info('Initializing touchwizard app.')
import touchwizard
stage = clutter.Stage()
if width == None and height == None:
width = touchwizard.canvas_width
height = touchwizard.canvas_height
else:
touchwizard.canvas_width = width
touchwizard.canvas_height = height
stage.set_size(width, height)
if page is not None:
stage.set_title(page.title)
canvas = Canvas(page)
stage.add(canvas)
if overlay is not None:
logger.info('Adding overlay %s', overlay)
stage.add(overlay)
overlay.show()
stage.show()
main_loop_name = 'External'
if main_loop_run_cb is None:
main_loop_run_cb = clutter.main
main_loop_name = 'Clutter'
if main_loop_stop_cb is None:
main_loop_stop_cb = clutter.main_quit
def quit(*args):
logger.info('Quitting %s main loop by stage destroy', main_loop_name)
main_loop_stop_cb()
import sys
gobject.timeout_add_seconds(2, sys.exit)
stage.connect('destroy', quit)
class Quitter(easyevent.Listener):
def __init__(self):
easyevent.Listener.__init__(self)
self.register_event('wizard_quit')
def evt_wizard_quit(self, event):
logging.info('Quitting %s main loop by touchwizard button', main_loop_name)
main_loop_stop_cb()
import sys
gobject.timeout_add_seconds(2, sys.exit)
Quitter()
logger.info('Running %s main loop.', main_loop_name)
main_loop_run_cb()
if __name__ == '__main__':
quick_launch(None)
| gpl-3.0 |
cernops/neutron | neutron/plugins/nuage/extensions/nuage_subnet.py | 15 | 1769 | # Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
EXTENDED_ATTRIBUTES_2_0 = {
'subnets': {
'net_partition': {
'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'validate': {'type:string_or_none': None}
},
'nuage_subnet_template': {
'allow_post': True,
'allow_put': False,
'is_visible': True,
'default': None,
'validate': {'type:uuid_or_none': None}
},
},
}
class Nuage_subnet(object):
"""Extension class supporting Nuage subnet.
"""
@classmethod
def get_name(cls):
return "Nuage subnet"
@classmethod
def get_alias(cls):
return "nuage-subnet"
@classmethod
def get_description(cls):
return "Nuage subnet"
@classmethod
def get_namespace(cls):
return "http://nuagenetworks.net/ext/subnets/api/v1.0"
@classmethod
def get_updated(cls):
return "2014-01-01T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 |
cuedpc/edpcmentoring | edpcmentoring/mentoring/test_models.py | 2 | 1986 | from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.test import TestCase
from cuedmembers.models import Member
from .models import Relationship
class RelationshipActiveInactiveTestCase(TestCase):
fixtures = ['cuedmembers/test_users_and_members']
def setUp(self):
# Pull active members from the database
m1, m2, m3 = Member.objects.active().all()[:3]
# Create relationship between them
self.active_relationship = Relationship(
mentor=m1.user, mentee=m2.user, is_active=True)
self.active_relationship.save()
# Create inactive relationship between them
self.inactive_relationship = Relationship(
mentor=m1.user, mentee=m3.user, is_active=False)
self.inactive_relationship.save()
def test_active(self):
relationships = Relationship.objects.active().all()
self.assertEqual(len(relationships), 1)
self.assertEqual(relationships[0].id, self.active_relationship.id)
def test_inactive(self):
relationships = Relationship.objects.inactive().all()
self.assertEqual(len(relationships), 1)
self.assertEqual(relationships[0].id, self.inactive_relationship.id)
class RelationshipTestCase(TestCase):
fixtures = ['cuedmembers/test_users_and_members']
def test_cannot_have_identical_mentor_and_mentee(self):
m = Member.objects.all()[0]
u = m.user
r = Relationship(mentor=u, mentee=u)
with self.assertRaises(ValidationError):
r.full_clean()
def test_cannot_have_non_unique_mentor_mentee(self):
u1, u2 = [m.user for m in Member.objects.all()[:2]]
# should succeed
r = Relationship(mentor=u1, mentee=u2, is_active=False)
r.save()
# should fail due to non uniqueness
r = Relationship(mentor=u1, mentee=u2, is_active=True)
with self.assertRaises(IntegrityError):
r.save()
| mit |
bguillot/OpenUpgrade | openerp/tools/graph.py | 441 | 26118 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import operator
import math
class graph(object):
def __init__(self, nodes, transitions, no_ancester=None):
"""Initialize graph's object
@param nodes list of ids of nodes in the graph
@param transitions list of edges in the graph in the form (source_node, destination_node)
@param no_ancester list of nodes with no incoming edges
"""
self.nodes = nodes or []
self.edges = transitions or []
self.no_ancester = no_ancester or {}
trans = {}
for t in transitions:
trans.setdefault(t[0], [])
trans[t[0]].append(t[1])
self.transitions = trans
self.result = {}
def init_rank(self):
"""Computes rank of the nodes of the graph by finding initial feasible tree
"""
self.edge_wt = {}
for link in self.links:
self.edge_wt[link] = self.result[link[1]]['x'] - self.result[link[0]]['x']
tot_node = len(self.partial_order)
#do until all the nodes in the component are searched
while self.tight_tree()<tot_node:
list_node = []
list_edge = []
for node in self.nodes:
if node not in self.reachable_nodes:
list_node.append(node)
for edge in self.edge_wt:
if edge not in self.tree_edges:
list_edge.append(edge)
slack = 100
for edge in list_edge:
if ((edge[0] in self.reachable_nodes and edge[1] not in self.reachable_nodes) or
(edge[1] in self.reachable_nodes and edge[0] not in self.reachable_nodes)):
if slack > self.edge_wt[edge]-1:
slack = self.edge_wt[edge]-1
new_edge = edge
if new_edge[0] not in self.reachable_nodes:
delta = -(self.edge_wt[new_edge]-1)
else:
delta = self.edge_wt[new_edge]-1
for node in self.result:
if node in self.reachable_nodes:
self.result[node]['x'] += delta
for edge in self.edge_wt:
self.edge_wt[edge] = self.result[edge[1]]['x'] - self.result[edge[0]]['x']
self.init_cutvalues()
def tight_tree(self):
self.reachable_nodes = []
self.tree_edges = []
self.reachable_node(self.start)
return len(self.reachable_nodes)
def reachable_node(self, node):
"""Find the nodes of the graph which are only 1 rank apart from each other
"""
if node not in self.reachable_nodes:
self.reachable_nodes.append(node)
for edge in self.edge_wt:
if edge[0]==node:
if self.edge_wt[edge]==1:
self.tree_edges.append(edge)
if edge[1] not in self.reachable_nodes:
self.reachable_nodes.append(edge[1])
self.reachable_node(edge[1])
def init_cutvalues(self):
"""Initailize cut values of edges of the feasible tree.
Edges with negative cut-values are removed from the tree to optimize rank assignment
"""
self.cut_edges = {}
self.head_nodes = []
i=0
for edge in self.tree_edges:
self.head_nodes = []
rest_edges = []
rest_edges += self.tree_edges
del rest_edges[i]
self.head_component(self.start, rest_edges)
i+=1
positive = 0
negative = 0
for source_node in self.transitions:
if source_node in self.head_nodes:
for dest_node in self.transitions[source_node]:
if dest_node not in self.head_nodes:
negative+=1
else:
for dest_node in self.transitions[source_node]:
if dest_node in self.head_nodes:
positive+=1
self.cut_edges[edge] = positive - negative
def head_component(self, node, rest_edges):
"""Find nodes which are reachable from the starting node, after removing an edge
"""
if node not in self.head_nodes:
self.head_nodes.append(node)
for edge in rest_edges:
if edge[0]==node:
self.head_component(edge[1],rest_edges)
def process_ranking(self, node, level=0):
"""Computes initial feasible ranking after making graph acyclic with depth-first search
"""
if node not in self.result:
self.result[node] = {'y': None, 'x':level, 'mark':0}
else:
if level > self.result[node]['x']:
self.result[node]['x'] = level
if self.result[node]['mark']==0:
self.result[node]['mark'] = 1
for sec_end in self.transitions.get(node, []):
self.process_ranking(sec_end, level+1)
def make_acyclic(self, parent, node, level, tree):
"""Computes Partial-order of the nodes with depth-first search
"""
if node not in self.partial_order:
self.partial_order[node] = {'level':level, 'mark':0}
if parent:
tree.append((parent, node))
if self.partial_order[node]['mark']==0:
self.partial_order[node]['mark'] = 1
for sec_end in self.transitions.get(node, []):
self.links.append((node, sec_end))
self.make_acyclic(node, sec_end, level+1, tree)
return tree
def rev_edges(self, tree):
"""reverse the direction of the edges whose source-node-partail_order> destination-node-partail_order
to make the graph acyclic
"""
Is_Cyclic = False
i=0
for link in self.links:
src = link[0]
des = link[1]
edge_len = self.partial_order[des]['level'] - self.partial_order[src]['level']
if edge_len < 0:
del self.links[i]
self.links.insert(i, (des, src))
self.transitions[src].remove(des)
self.transitions.setdefault(des, []).append(src)
Is_Cyclic = True
elif math.fabs(edge_len) > 1:
Is_Cyclic = True
i += 1
return Is_Cyclic
def exchange(self, e, f):
"""Exchange edges to make feasible-tree optimized
:param e: edge with negative cut-value
:param f: new edge with minimum slack-value
"""
del self.tree_edges[self.tree_edges.index(e)]
self.tree_edges.append(f)
self.init_cutvalues()
def enter_edge(self, edge):
"""Finds a new_edge with minimum slack value to replace an edge with negative cut-value
@param edge edge with negative cut-value
"""
self.head_nodes = []
rest_edges = []
rest_edges += self.tree_edges
del rest_edges[rest_edges.index(edge)]
self.head_component(self.start, rest_edges)
if edge[1] in self.head_nodes:
l = []
for node in self.result:
if node not in self.head_nodes:
l.append(node)
self.head_nodes = l
slack = 100
new_edge = edge
for source_node in self.transitions:
if source_node in self.head_nodes:
for dest_node in self.transitions[source_node]:
if dest_node not in self.head_nodes:
if slack>(self.edge_wt[edge]-1):
slack = self.edge_wt[edge]-1
new_edge = (source_node, dest_node)
return new_edge
def leave_edge(self):
"""Returns the edge with negative cut_value(if exists)
"""
if self.critical_edges:
for edge in self.critical_edges:
self.cut_edges[edge] = 0
for edge in self.cut_edges:
if self.cut_edges[edge]<0:
return edge
return None
def finalize_rank(self, node, level):
self.result[node]['x'] = level
for destination in self.optimal_edges.get(node, []):
self.finalize_rank(destination, level+1)
def normalize(self):
"""The ranks are normalized by setting the least rank to zero.
"""
least_rank = min(map(lambda x: x['x'], self.result.values()))
if least_rank!=0:
for node in self.result:
self.result[node]['x']-=least_rank
def make_chain(self):
"""Edges between nodes more than one rank apart are replaced by chains of unit
length edges between temporary nodes.
"""
for edge in self.edge_wt:
if self.edge_wt[edge]>1:
self.transitions[edge[0]].remove(edge[1])
start = self.result[edge[0]]['x']
end = self.result[edge[1]]['x']
for rank in range(start+1, end):
if not self.result.get((rank, 'temp'), False):
self.result[(rank, 'temp')] = {'y': None, 'x': rank, 'mark': 0}
for rank in range(start, end):
if start==rank:
self.transitions[edge[0]].append((rank+1, 'temp'))
elif rank==end-1:
self.transitions.setdefault((rank, 'temp'), []).append(edge[1])
else:
self.transitions.setdefault((rank, 'temp'), []).append((rank+1, 'temp'))
def init_order(self, node, level):
"""Initialize orders the nodes in each rank with depth-first search
"""
if not self.result[node]['y']:
self.result[node]['y'] = self.order[level]
self.order[level] += 1
for sec_end in self.transitions.get(node, []):
if node!=sec_end:
self.init_order(sec_end, self.result[sec_end]['x'])
def order_heuristic(self):
for i in range(12):
self.wmedian()
def wmedian(self):
"""Applies median heuristic to find optimzed order of the nodes with in their ranks
"""
for level in self.levels:
node_median = []
nodes = self.levels[level]
for node in nodes:
node_median.append((node, self.median_value(node, level-1)))
sort_list = sorted(node_median, key=operator.itemgetter(1))
new_list = [tuple[0] for tuple in sort_list]
self.levels[level] = new_list
order = 0
for node in nodes:
self.result[node]['y'] = order
order +=1
def median_value(self, node, adj_rank):
"""Returns median value of a vertex , defined as the median position of the adjacent vertices
@param node node to process
@param adj_rank rank 1 less than the node's rank
"""
adj_nodes = self.adj_position(node, adj_rank)
l = len(adj_nodes)
m = l/2
if l==0:
return -1.0
elif l%2 == 1:
return adj_nodes[m]#median of the middle element
elif l==2:
return (adj_nodes[0]+adj_nodes[1])/2
else:
left = adj_nodes[m-1] - adj_nodes[0]
right = adj_nodes[l-1] - adj_nodes[m]
return ((adj_nodes[m-1]*right) + (adj_nodes[m]*left))/(left+right)
def adj_position(self, node, adj_rank):
"""Returns list of the present positions of the nodes adjacent to node in the given adjacent rank.
@param node node to process
@param adj_rank rank 1 less than the node's rank
"""
pre_level_nodes = self.levels.get(adj_rank, [])
adj_nodes = []
if pre_level_nodes:
for src in pre_level_nodes:
if self.transitions.get(src) and node in self.transitions[src]:
adj_nodes.append(self.result[src]['y'])
return adj_nodes
def preprocess_order(self):
levels = {}
for r in self.partial_order:
l = self.result[r]['x']
levels.setdefault(l,[])
levels[l].append(r)
self.levels = levels
def graph_order(self):
"""Finds actual-order of the nodes with respect to maximum number of nodes in a rank in component
"""
mid_pos = 0.0
max_level = max(map(lambda x: len(x), self.levels.values()))
for level in self.levels:
if level:
no = len(self.levels[level])
factor = (max_level - no) * 0.10
list = self.levels[level]
list.reverse()
if no%2==0:
first_half = list[no/2:]
factor = -factor
else:
first_half = list[no/2+1:]
if max_level==1:#for the case when horizontal graph is there
self.result[list[no/2]]['y'] = mid_pos + (self.result[list[no/2]]['x']%2 * 0.5)
else:
self.result[list[no/2]]['y'] = mid_pos + factor
last_half = list[:no/2]
i=1
for node in first_half:
self.result[node]['y'] = mid_pos - (i + factor)
i += 1
i=1
for node in last_half:
self.result[node]['y'] = mid_pos + (i + factor)
i += 1
else:
self.max_order += max_level+1
mid_pos = self.result[self.start]['y']
def tree_order(self, node, last=0):
mid_pos = self.result[node]['y']
l = self.transitions.get(node, [])
l.reverse()
no = len(l)
rest = no%2
first_half = l[no/2+rest:]
last_half = l[:no/2]
for i, child in enumerate(first_half):
self.result[child]['y'] = mid_pos - (i+1 - (0 if rest else 0.5))
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
last = self.tree_order(child, last)
if rest:
mid_node = l[no/2]
self.result[mid_node]['y'] = mid_pos
if self.transitions.get(mid_node, False):
if last:
self.result[mid_node]['y'] = last + len(self.transitions[mid_node])/2 + 1
if node!=mid_node:
last = self.tree_order(mid_node)
else:
if last:
self.result[mid_node]['y'] = last + 1
self.result[node]['y'] = self.result[mid_node]['y']
mid_pos = self.result[node]['y']
i=1
last_child = None
for child in last_half:
self.result[child]['y'] = mid_pos + (i - (0 if rest else 0.5))
last_child = child
i += 1
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
if node!=child:
last = self.tree_order(child, last)
if last_child:
last = self.result[last_child]['y']
return last
def process_order(self):
"""Finds actual-order of the nodes with respect to maximum number of nodes in a rank in component
"""
if self.Is_Cyclic:
max_level = max(map(lambda x: len(x), self.levels.values()))
if max_level%2:
self.result[self.start]['y'] = (max_level+1)/2 + self.max_order + (self.max_order and 1)
else:
self.result[self.start]['y'] = max_level /2 + self.max_order + (self.max_order and 1)
self.graph_order()
else:
self.result[self.start]['y'] = 0
self.tree_order(self.start, 0)
min_order = math.fabs(min(map(lambda x: x['y'], self.result.values())))
index = self.start_nodes.index(self.start)
same = False
roots = []
if index>0:
for start in self.start_nodes[:index]:
same = True
for edge in self.tree_list[start][1:]:
if edge in self.tree_list[self.start]:
continue
else:
same = False
break
if same:
roots.append(start)
if roots:
min_order += self.max_order
else:
min_order += self.max_order + 1
for level in self.levels:
for node in self.levels[level]:
self.result[node]['y'] += min_order
if roots:
roots.append(self.start)
one_level_el = self.tree_list[self.start][0][1]
base = self.result[one_level_el]['y']# * 2 / (index + 2)
no = len(roots)
first_half = roots[:no/2]
if no%2==0:
last_half = roots[no/2:]
else:
last_half = roots[no/2+1:]
factor = -math.floor(no/2)
for start in first_half:
self.result[start]['y'] = base + factor
factor += 1
if no%2:
self.result[roots[no/2]]['y'] = base + factor
factor +=1
for start in last_half:
self.result[start]['y'] = base + factor
factor += 1
self.max_order = max(map(lambda x: x['y'], self.result.values()))
def find_starts(self):
"""Finds other start nodes of the graph in the case when graph is disconneted
"""
rem_nodes = []
for node in self.nodes:
if not self.partial_order.get(node):
rem_nodes.append(node)
cnt = 0
while True:
if len(rem_nodes)==1:
self.start_nodes.append(rem_nodes[0])
break
else:
count = 0
new_start = rem_nodes[0]
largest_tree = []
for node in rem_nodes:
self.partial_order = {}
tree = self.make_acyclic(None, node, 0, [])
if len(tree)+1 > count:
count = len(tree) + 1
new_start = node
largest_tree = tree
else:
if not largest_tree:
new_start = rem_nodes[0]
rem_nodes.remove(new_start)
self.start_nodes.append(new_start)
for edge in largest_tree:
if edge[0] in rem_nodes:
rem_nodes.remove(edge[0])
if edge[1] in rem_nodes:
rem_nodes.remove(edge[1])
if not rem_nodes:
break
def rank(self):
"""Finds the optimized rank of the nodes using Network-simplex algorithm
"""
self.levels = {}
self.critical_edges = []
self.partial_order = {}
self.links = []
self.Is_Cyclic = False
self.tree_list[self.start] = self.make_acyclic(None, self.start, 0, [])
self.Is_Cyclic = self.rev_edges(self.tree_list[self.start])
self.process_ranking(self.start)
self.init_rank()
#make cut values of all tree edges to 0 to optimize feasible tree
e = self.leave_edge()
while e :
f = self.enter_edge(e)
if e==f:
self.critical_edges.append(e)
else:
self.exchange(e,f)
e = self.leave_edge()
#finalize rank using optimum feasible tree
# self.optimal_edges = {}
# for edge in self.tree_edges:
# source = self.optimal_edges.setdefault(edge[0], [])
# source.append(edge[1])
# self.finalize_rank(self.start, 0)
#normalization
self.normalize()
for edge in self.edge_wt:
self.edge_wt[edge] = self.result[edge[1]]['x'] - self.result[edge[0]]['x']
def order_in_rank(self):
"""Finds optimized order of the nodes within their ranks using median heuristic
"""
self.make_chain()
self.preprocess_order()
self.order = {}
max_rank = max(map(lambda x: x, self.levels.keys()))
for i in range(max_rank+1):
self.order[i] = 0
self.init_order(self.start, self.result[self.start]['x'])
for level in self.levels:
self.levels[level].sort(lambda x, y: cmp(self.result[x]['y'], self.result[y]['y']))
self.order_heuristic()
self.process_order()
def process(self, starting_node):
"""Process the graph to find ranks and order of the nodes
@param starting_node node from where to start the graph search
"""
self.start_nodes = starting_node or []
self.partial_order = {}
self.links = []
self.tree_list = {}
if self.nodes:
if self.start_nodes:
#add dummy edges to the nodes which does not have any incoming edges
tree = self.make_acyclic(None, self.start_nodes[0], 0, [])
for node in self.no_ancester:
for sec_node in self.transitions.get(node, []):
if sec_node in self.partial_order.keys():
self.transitions[self.start_nodes[0]].append(node)
break
self.partial_order = {}
tree = self.make_acyclic(None, self.start_nodes[0], 0, [])
# if graph is disconnected or no start-node is given
#than to find starting_node for each component of the node
if len(self.nodes) > len(self.partial_order):
self.find_starts()
self.max_order = 0
#for each component of the graph find ranks and order of the nodes
for s in self.start_nodes:
self.start = s
self.rank() # First step:Netwoek simplex algorithm
self.order_in_rank() #Second step: ordering nodes within ranks
def __str__(self):
result = ''
for l in self.levels:
result += 'PosY: ' + str(l) + '\n'
for node in self.levels[l]:
result += '\tPosX: '+ str(self.result[node]['y']) + ' - Node:' + str(node) + "\n"
return result
def scale(self, maxx, maxy, nwidth=0, nheight=0, margin=20):
"""Computes actual co-ordiantes of the nodes
"""
#for flat edges ie. source an destination nodes are on the same rank
for src in self.transitions:
for des in self.transitions[src]:
if self.result[des]['x'] - self.result[src]['x'] == 0:
self.result[src]['x'] += 0.08
self.result[des]['x'] -= 0.08
factorX = maxx + nheight
factorY = maxy + nwidth
for node in self.result:
self.result[node]['y'] = (self.result[node]['y']) * factorX + margin
self.result[node]['x'] = (self.result[node]['x']) * factorY + margin
def result_get(self):
return self.result
if __name__=='__main__':
starting_node = ['profile'] # put here nodes with flow_start=True
nodes = ['project','account','hr','base','product','mrp','test','profile']
transitions = [
('profile','mrp'),
('mrp','project'),
('project','product'),
('mrp','hr'),
('mrp','test'),
('project','account'),
('project','hr'),
('product','base'),
('account','product'),
('account','test'),
('account','base'),
('hr','base'),
('test','base')
]
radius = 20
g = graph(nodes, transitions)
g.process(starting_node)
g.scale(radius*3,radius*3, radius, radius)
from PIL import Image
from PIL import ImageDraw
img = Image.new("RGB", (800, 600), "#ffffff")
draw = ImageDraw.Draw(img)
result = g.result_get()
node_res = {}
for node in nodes:
node_res[node] = result[node]
for name,node in node_res.items():
draw.arc( (int(node['y']-radius), int(node['x']-radius),int(node['y']+radius), int(node['x']+radius) ), 0, 360, (128,128,128))
draw.text( (int(node['y']), int(node['x'])), str(name), (128,128,128))
for t in transitions:
draw.line( (int(node_res[t[0]]['y']), int(node_res[t[0]]['x']),int(node_res[t[1]]['y']),int(node_res[t[1]]['x'])),(128,128,128) )
img.save("graph.png", "PNG")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bdang2012/taiga-back | taiga/projects/votes/serializers.py | 10 | 1172 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api import serializers
from taiga.users.models import User
class VoterSerializer(serializers.ModelSerializer):
full_name = serializers.CharField(source='get_full_name', required=False)
class Meta:
model = User
fields = ('id', 'username', 'full_name')
| agpl-3.0 |
samzhang111/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 8 | 12376 |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
repotvsupertuga/tvsupertuga.repository | plugin.video.SportsDevil/lib/utils/mycrypt.py | 15 | 17416 | #!/usr/bin/env python2
#!python
'''
Implement openssl compatible AES-256 CBC mode encryption/decryption.
This module provides encrypt() and decrypt() functions that are compatible
with the openssl algorithms.
This is basically a python encoding of my C++ work on the Cipher class
using the Crypto.Cipher.AES class.
URL: http://projects.joelinoff.com/cipher-1.1/doxydocs/html/
'''
# LICENSE
#
# Copyright (c) 2014 Joe Linoff
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#import argparse
import base64
import os
import re
import hashlib
import sys
from getpass import getpass
try:
from Crypto.Cipher import AES
except ImportError:
import pyaes as AES
VERSION='1.1'
# ================================================================
# get_key_and_iv
# ================================================================
def get_key_and_iv(password, salt, klen=32, ilen=16, msgdgst='md5'):
'''
Derive the key and the IV from the given password and salt.
This is a niftier implementation than my direct transliteration of
the C++ code although I modified to support different digests.
CITATION: http://stackoverflow.com/questions/13907841/implement-openssl-aes-encryption-in-python
@param password The password to use as the seed.
@param salt The salt.
@param klen The key length.
@param ilen The initialization vector length.
@param msgdgst The message digest algorithm to use.
'''
# equivalent to:
# from hashlib import <mdi> as mdf
# from hashlib import md5 as mdf
# from hashlib import sha512 as mdf
mdf = getattr(__import__('hashlib', fromlist=[msgdgst]), msgdgst)
password = password.encode('ascii','ignore') # convert to ASCII
try:
maxlen = klen + ilen
keyiv = mdf(password + salt).digest()
tmp = [keyiv]
while len(tmp) < maxlen:
tmp.append( mdf(tmp[-1] + password + salt).digest() )
keyiv += tmp[-1] # append the last byte
key = keyiv[:klen]
iv = keyiv[klen:klen+ilen]
return key, iv
except UnicodeDecodeError:
return None, None
# ================================================================
# encrypt
# ================================================================
def encrypt(password, plaintext, chunkit=True, msgdgst='md5'):
'''
Encrypt the plaintext using the password using an openssl
compatible encryption algorithm. It is the same as creating a file
with plaintext contents and running openssl like this:
$ cat plaintext
<plaintext>
$ openssl enc -e -aes-256-cbc -base64 -salt \\
-pass pass:<password> -n plaintext
@param password The password.
@param plaintext The plaintext to encrypt.
@param chunkit Flag that tells encrypt to split the ciphertext
into 64 character (MIME encoded) lines.
This does not affect the decrypt operation.
@param msgdgst The message digest algorithm.
'''
salt = os.urandom(8)
key, iv = get_key_and_iv(password, salt, msgdgst=msgdgst)
if key is None:
return None
# PKCS#7 padding
padding_len = 16 - (len(plaintext) % 16)
padded_plaintext = plaintext + (chr(padding_len) * padding_len)
# Encrypt
cipher = AES.new(key, AES.MODE_CBC, iv)
ciphertext = cipher.encrypt(padded_plaintext)
# Make openssl compatible.
# I first discovered this when I wrote the C++ Cipher class.
# CITATION: http://projects.joelinoff.com/cipher-1.1/doxydocs/html/
openssl_ciphertext = 'Salted__' + salt + ciphertext
b64 = base64.b64encode(openssl_ciphertext)
if not chunkit:
return b64
LINELEN = 64
chunk = lambda s: '\n'.join(s[i:min(i+LINELEN, len(s))]
for i in xrange(0, len(s), LINELEN))
return chunk(b64)
# ================================================================
# decrypt
# ================================================================
def decrypt(password, ciphertext, msgdgst='md5'):
'''
Decrypt the ciphertext using the password using an openssl
compatible decryption algorithm. It is the same as creating a file
with ciphertext contents and running openssl like this:
$ cat ciphertext
# ENCRYPTED
<ciphertext>
$ egrep -v '^#|^$' | \\
openssl enc -d -aes-256-cbc -base64 -salt -pass pass:<password> -in ciphertext
@param password The password.
@param ciphertext The ciphertext to decrypt.
@param msgdgst The message digest algorithm.
@returns the decrypted data.
'''
# unfilter -- ignore blank lines and comments
filtered = ''
for line in ciphertext.split('\n'):
line = line.strip()
if re.search('^\s*$', line) or re.search('^\s*#', line):
continue
filtered += line + '\n'
# Base64 decode
raw = base64.b64decode(filtered)
assert( raw[:8] == 'Salted__' )
salt = raw[8:16] # get the salt
# Now create the key and iv.
key, iv = get_key_and_iv(password, salt, msgdgst=msgdgst)
if key is None:
return None
# The original ciphertext
ciphertext = raw[16:]
# Decrypt
cipher = AES.new(key, AES.MODE_CBC, iv)
padded_plaintext = cipher.decrypt(ciphertext)
padding_len = ord(padded_plaintext[-1])
plaintext = padded_plaintext[:-padding_len]
return plaintext
# ================================================================
# _open_ios
# ================================================================
def _open_ios(args):
'''
Open the IO files.
'''
ifp = sys.stdin
ofp = sys.stdout
if args.input is not None:
try:
ifp = open(args.input, 'r')
except IOError:
print 'ERROR: can\'t read file: %s' % (args.input)
sys.exit(1)
if args.output is not None:
try:
ifp = open(args.output, 'w')
except IOError:
print 'ERROR: can\'t write file: %s' % (args.output)
sys.exit(1)
return ifp, ofp
# ================================================================
# _close_ios
# ================================================================
def _close_ios(ifp, ofp):
'''
Close the IO files if necessary.
'''
if ifp != sys.stdin:
ifp.close()
if ofp != sys.stdout:
ofp.close()
# ================================================================
# _runenc
# ================================================================
def _runenc(args):
'''
Encrypt data.
'''
if args.passphrase is None:
while True:
passphrase = getpass('Passphrase: ')
tmp = getpass('Re-enter passphrase: ')
if passphrase == tmp:
break
print
print 'Passphrases don\'t match, please try again.'
else:
passphrase = args.passphrase
ifp, ofp = _open_ios(args)
text = ifp.read()
out = encrypt(passphrase, text, msgdgst=args.msgdgst)
ofp.write(out+'\n')
_close_ios(ifp, ofp)
# ================================================================
# _rundec
# ================================================================
def _rundec(args):
'''
Decrypt data.
'''
if args.passphrase is None:
passphrase = getpass('Passphrase: ')
else:
passphrase = args.passphrase
ifp, ofp = _open_ios(args)
text = ifp.read()
out = decrypt(passphrase, text, msgdgst=args.msgdgst)
ofp.write(out)
_close_ios(ifp, ofp)
# ================================================================
# _runtest
# ================================================================
def _runtest(args):
'''
Run a series of iteration where each iteration generates a random
password from 8-32 characters and random text from 20 to 256
characters. The encrypts and decrypts the random data. It then
compares the results to make sure that everything works correctly.
The test output looks like this:
$ crypt 2000
2000 of 2000 100.00% 15 139 2000 0
$ # ^ ^ ^ ^ ^ ^
$ # | | | | | +-- num failed
$ # | | | | +---------- num passed
$ # | | | +-------------- size of text for a test
$ # | | +----------------- size of passphrase for a test
$ # | +-------------------------- percent completed
$ # +------------------------------- total
# #+------------------------------------ current test
@param args The args parse arguments.
'''
import string
import random
from random import randint
# Encrypt/decrypt N random sets of plaintext and passwords.
num = args.test
ofp = sys.stdout
if args.output is not None:
try:
ofp = open(args.output, 'w')
except IOError:
print 'ERROR: can open file for writing: %s' % (args.output)
sys.exit(1)
chset = string.printable
passed = 0
failed = []
maxlen = len(str(num))
for i in range(num):
ran1 = randint(8,32)
password = ''.join(random.choice(chset) for x in range(ran1))
ran2 = randint(20, 256)
plaintext = ''.join(random.choice(chset) for x in range(ran2))
ciphertext = encrypt(password, plaintext, msgdgst=args.msgdgst)
verification = decrypt(password, ciphertext, msgdgst=args.msgdgst)
if plaintext != verification:
failed.append( [password, plaintext] )
else:
passed += 1
output = '%*d of %d %6.2f%% %3d %3d %*d %*d %s' % (maxlen,i+1,
num,
100*(i+1)/num,
len(password),
len(plaintext),
maxlen, passed,
maxlen, len(failed),
args.msgdgst)
if args.output is None:
ofp.write('\b'*80)
ofp.write(output)
ofp.flush()
else:
ofp.write(output+'\n')
ofp.write('\n')
if len(failed):
for i in range(len(failed)):
ofp.write('%3d %2d %-34s %3d %s\n' % (i,
len(failed[i][0]),
'"'+failed[i][0]+'"',
len(failed[i][1]),
'"'+failed[i][1]+'"'))
ofp.write('\n')
if args.output is not None:
ofp.close()
# ================================================================
# _cli_opts
# ================================================================
def _cli_opts():
'''
Parse command line options.
@returns the arguments
'''
mepath = unicode(os.path.abspath(sys.argv[0]))
mebase = '%s' % (os.path.basename(mepath))
description = '''
Implements encryption/decryption that is compatible with openssl
AES-256 CBC mode.
You can use it as follows:
EXAMPLE 1: %s -> %s (MD5)
$ # Encrypt and decrypt using %s.
$ echo 'Lorem ipsum dolor sit amet' | \\
%s -e -p secret | \\
%s -d -p secret
Lorem ipsum dolor sit amet
EXAMPLE 2: %s -> openssl (MD5)
$ # Encrypt using %s and decrypt using openssl.
$ echo 'Lorem ipsum dolor sit amet' | \\
%s -e -p secret | \\
openssl enc -d -aes-256-cbc -md md5 -base64 -salt -pass pass:secret
Lorem ipsum dolor sit amet
EXAMPLE 3: openssl -> %s (MD5)
$ # Encrypt using openssl and decrypt using %s
$ echo 'Lorem ipsum dolor sit amet' | \\
openssl enc -e -aes-256-cbc -md md5 -base64 -salt -pass pass:secret
%s -d -p secret
Lorem ipsum dolor sit amet
EXAMPLE 4: openssl -> openssl (MD5)
$ # Encrypt and decrypt using openssl
$ echo 'Lorem ipsum dolor sit amet' | \\
openssl enc -e -aes-256-cbc -md md5 -base64 -salt -pass pass:secret
openssl enc -d -aes-256-cbc -md md5 -base64 -salt -pass pass:secret
Lorem ipsum dolor sit amet
EXAMPLE 5: %s -> %s (SHA512)
$ # Encrypt and decrypt using %s.
$ echo 'Lorem ipsum dolor sit amet' | \\
%s -e -m sha512 -p secret | \\
%s -d -m sha512 -p secret
Lorem ipsum dolor sit amet
EXAMPLE 6: %s -> openssl (SHA512)
$ # Encrypt using %s and decrypt using openssl.
$ echo 'Lorem ipsum dolor sit amet' | \\
%s -e -m sha512 -p secret | \\
openssl enc -d -aes-256-cbc -md sha1=512 -base64 -salt -pass pass:secret
Lorem ipsum dolor sit amet
EXAMPLE 7:
$ # Run internal tests.
$ %s -t 2000
2000 of 2000 100.00%% 21 104 2000 0 md5
$ # ^ ^ ^ ^ ^ ^ ^
$ # | | | | | | +- message digest
$ # | | | | | +--- num failed
$ # | | | | +----------- num passed
$ # | | | +--------------- size of text for a test
$ # | | +------------------ size of passphrase for a test
$ # | +--------------------------- percent completed
$ # +-------------------------------- total
# #+------------------------------------- current test
''' % (mebase, mebase, mebase, mebase,
mebase, mebase, mebase, mebase,
mebase, mebase, mebase, mebase,
mebase, mebase, mebase, mebase,
mebase, mebase, mebase, mebase,
)
parser = argparse.ArgumentParser(prog=mebase,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', '--decrypt',
action='store_true',
help='decryption mode')
group.add_argument('-e', '--encrypt',
action='store_true',
help='encryption mode')
parser.add_argument('-i', '--input',
action='store',
help='input file, default is stdin')
parser.add_argument('-m', '--msgdgst',
action='store',
default='md5',
help='message digest (md5, sha, sha1, sha256, sha512), default is md5')
parser.add_argument('-o', '--output',
action='store',
help='output file, default is stdout')
parser.add_argument('-p', '--passphrase',
action='store',
help='passphrase for encrypt/decrypt operations')
group.add_argument('-t', '--test',
action='store',
default=-1,
type=int,
help='test mode (TEST is an integer)')
parser.add_argument('-v', '--verbose',
action='count',
help='the level of verbosity')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s '+VERSION)
args = parser.parse_args()
return args
# ================================================================
# main
# ================================================================
def main():
args = _cli_opts()
if args.test > 0:
if args.input is not None:
print 'WARNING: input argument will be ignored.'
if args.passphrase is not None:
print 'WARNING: passphrase argument will be ignored.'
_runtest(args)
elif args.encrypt:
_runenc(args)
elif args.decrypt:
_rundec(args)
# ================================================================
# MAIN
# ================================================================
if __name__ == "__main__":
main()
| gpl-2.0 |
andrey-malets/web-page-replay | third_party/ipaddr/ipaddr.py | 92 | 60528 | #!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
__version__ = '2.1.10'
import struct
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def IPAddress(address, version=None):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, 4 or 6. If set, don't try to automatically
determine what the IP address type is. important for things
like IPAddress(1), which could be IPv4, '0.0.0.1', or IPv6,
'::1'.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
"""
if version:
if version == 4:
return IPv4Address(address)
elif version == 6:
return IPv6Address(address)
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def IPNetwork(address, version=None, strict=False):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given.
"""
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def v4_int_to_packed(address):
"""The binary representation of this address.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The binary representation of this address.
Raises:
ValueError: If the integer is too large to be an IPv4 IP
address.
"""
if address > _BaseV4._ALL_ONES:
raise ValueError('Address too large for IPv4')
return Bytes(struct.pack('!I', address))
def v6_int_to_packed(address):
"""The binary representation of this address.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The binary representation of this address.
"""
return Bytes(struct.pack('!QQ', address >> 64, address & (2**64 - 1)))
def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence.
"""
first = last = addresses[0]
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
else:
break
return (first, last)
def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) % 2:
return i
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
"""
if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
str(first), str(last)))
if first > last:
raise ValueError('last IP address must be greater than first')
networks = []
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = _count_righthand_zero_bits(first_int, ip_bits)
current = None
while nbits >= 0:
addend = 2**nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int:
break
prefix = _get_prefix_length(first_int, current, ip_bits)
net = ip('%s/%d' % (str(first), prefix))
networks.append(net)
if current == ip._ALL_ONES:
break
first_int = current + 1
first = IPAddress(first_int, version=first._version)
return networks
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('1.1.0.0/24')
ip2 = IPv4Network('1.1.1.0/24')
ip3 = IPv4Network('1.1.2.0/24')
ip4 = IPv4Network('1.1.3.0/24')
ip5 = IPv4Network('1.1.4.0/24')
ip6 = IPv4Network('1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array
def collapse_address_list(addresses):
"""Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4Network or IPv6Network objects.
Returns:
A list of IPv4Network or IPv6Network objects depending on what we
were passed.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseIP):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip.ip)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last) = _find_address_range(ips[i:])
i = ips.index(last) + 1
addrs.extend(summarize_address_range(first, last))
return _collapse_address_list_recursive(sorted(
addrs + nets, key=_BaseNet._get_networks_key))
# backwards compatibility
CollapseAddrList = collapse_address_list
# We need to distinguish between the string and packed-bytes representations
# of an IP address. For example, b'0::1' is the IPv4 address 48.58.58.49,
# while '0::1' is an IPv6 address.
#
# In Python 3, the native 'bytes' type already provides this functionality,
# so we use it directly. For earlier implementations where bytes is not a
# distinct type, we create a subclass of str to serve as a tag.
#
# Usage example (Python 2):
# ip = ipaddr.IPAddress(ipaddr.Bytes('xxxx'))
#
# Usage example (Python 3):
# ip = ipaddr.IPAddress(b'xxxx')
try:
if bytes is str:
raise TypeError("bytes is not a distinct type")
Bytes = bytes
except (NameError, TypeError):
class Bytes(str):
def __repr__(self):
return 'Bytes(%s)' % str.__repr__(self)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddr sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNet):
return obj._get_networks_key()
elif isinstance(obj, _BaseIP):
return obj._get_address_key()
return NotImplemented
class _IPAddrBase(object):
"""The mother class."""
def __index__(self):
return self._ip
def __int__(self):
return self._ip
def __hex__(self):
return hex(self._ip)
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return str(self)
class _BaseIP(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip < other._ip
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip > other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, int):
return NotImplemented
return IPAddress(int(self) + other, version=self._version)
def __sub__(self, other):
if not isinstance(other, int):
return NotImplemented
return IPAddress(int(self) - other, version=self._version)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
return '%s' % self._string_from_ip_int(self._ip)
def __hash__(self):
return hash(hex(long(self._ip)))
def _get_address_key(self):
return (self._version, self)
@property
def version(self):
raise NotImplementedError('BaseIP has no version')
class _BaseNet(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def iterhosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
cur = int(self.network) + 1
bcast = int(self.broadcast) - 1
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __iter__(self):
cur = int(self.network)
bcast = int(self.broadcast)
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __getitem__(self, n):
network = int(self.network)
broadcast = int(self.broadcast)
if n >= 0:
if network + n > broadcast:
raise IndexError
return IPAddress(network + n, version=self._version)
else:
n += 1
if broadcast + n < network:
raise IndexError
return IPAddress(broadcast + n, version=self._version)
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network < other.network
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network > other.network
if self.netmask != other.netmask:
return self.netmask > other.netmask
return False
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __eq__(self, other):
try:
return (self._version == other._version
and self.network == other.network
and int(self.netmask) == int(other.netmask))
except AttributeError:
if isinstance(other, _BaseIP):
return (self._version == other._version
and self._ip == other._ip)
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __str__(self):
return '%s/%s' % (str(self.ip),
str(self._prefixlen))
def __hash__(self):
return hash(int(self.network) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNet):
return (self.network <= other.network and
self.broadcast >= other.broadcast)
# dealing with another address
else:
return (int(self.network) <= int(other._ip) <=
int(self.broadcast))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network in other or self.broadcast in other or (
other.network in self or other.broadcast in self)
@property
def network(self):
x = self._cache.get('network')
if x is None:
x = IPAddress(self._ip & int(self.netmask), version=self._version)
self._cache['network'] = x
return x
@property
def broadcast(self):
x = self._cache.get('broadcast')
if x is None:
x = IPAddress(self._ip | int(self.hostmask), version=self._version)
self._cache['broadcast'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
version=self._version)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (str(self.ip), self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (str(self.ip), str(self.netmask))
@property
def with_hostmask(self):
return '%s/%s' % (str(self.ip), str(self.hostmask))
@property
def numhosts(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast) - int(self.network) + 1
@property
def version(self):
raise NotImplementedError('BaseNet has no version')
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = IPNetwork('10.1.1.0/24')
addr2 = IPNetwork('10.1.1.0/26')
addr1.address_exclude(addr2) =
[IPNetwork('10.1.1.64/26'), IPNetwork('10.1.1.128/25')]
or IPv6:
addr1 = IPNetwork('::1/32')
addr2 = IPNetwork('::1/128')
addr1.address_exclude(addr2) = [IPNetwork('::0/128'),
IPNetwork('::2/127'),
IPNetwork('::4/126'),
IPNetwork('::8/125'),
...
IPNetwork('0:0:8000::/33')]
Args:
other: An IPvXNetwork object of the same type.
Returns:
A sorted list of IPvXNetwork objects addresses which is self
minus other.
Raises:
TypeError: If self and other are of difffering address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError("%s is not a network object" % str(other))
if other not in self:
raise ValueError('%s not contained in %s' % (str(other),
str(self)))
if other == self:
return []
ret_addrs = []
# Make sure we're comparing the network of other.
other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
version=other._version)
s1, s2 = self.subnet()
while s1 != other and s2 != other:
if other in s1:
ret_addrs.append(s2)
s1, s2 = s1.subnet()
elif other in s2:
ret_addrs.append(s1)
s1, s2 = s2.subnet()
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
if s1 == other:
ret_addrs.append(s2)
elif s2 == other:
ret_addrs.append(s1)
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
return sorted(ret_addrs, key=_BaseNet._get_networks_key)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
0 if self == other
eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
1 if self > other
eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
IPv6('1080::1:200C:417A/112') >
IPv6('1080::0:200C:417A/112')
If the IP versions of self and other are different, returns:
-1 if self._version < other._version
eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
1 if self._version > other._version
eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
"""
if self._version < other._version:
return -1
if self._version > other._version:
return 1
# self._version == other._version below here:
if self.network < other.network:
return -1
if self.network > other.network:
return 1
# self.network == other.network below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
# self.network == other.network and self.netmask == other.netmask
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network, self.netmask)
def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if not prefixlen and prefixlen != 0:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int, mask=32):
"""Return prefix length from the decimal netmask.
Args:
ip_int: An integer, the IP address.
mask: The netmask. Defaults to 32.
Returns:
An integer, the prefix length.
"""
while mask:
if ip_int & 1 == 1:
break
ip_int >>= 1
mask -= 1
return mask
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), return a list with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, str(self)))
first = IPNetwork('%s/%s' % (str(self.network),
str(self._prefixlen + prefixlen_diff)),
version=self._version)
yield first
current = first
while True:
broadcast = current.broadcast
if broadcast == self.broadcast:
return
new_addr = IPAddress(int(broadcast) + 1, version=self._version)
current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
version=self._version)
yield current
def masked(self):
"""Return the network object with the host bits masked out."""
return IPNetwork('%s/%d' % (self.network, self._prefixlen),
version=self._version)
def subnet(self, prefixlen_diff=1, new_prefix=None):
"""Return a list of subnets, rather than an iterator."""
return list(self.iter_subnets(prefixlen_diff, new_prefix))
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
if self.prefixlen - prefixlen_diff < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return IPNetwork('%s/%s' % (str(self.network),
str(self.prefixlen - prefixlen_diff)),
version=self._version)
# backwards compatibility
Subnet = subnet
Supernet = supernet
AddressExclude = address_exclude
CompareNetworks = compare_networks
Contains = __contains__
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2**IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
def __init__(self, address):
self._version = 4
self._max_prefixlen = IPV4LENGTH
def _explode_shorthand_ip_string(self):
return str(self)
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError(ip_str)
packed_ip = 0
for oc in octets:
try:
packed_ip = (packed_ip << 8) | self._parse_octet(oc)
except ValueError:
raise AddressValueError(ip_str)
return packed_ip
def _parse_octet(self, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._DECIMAL_DIGITS.issuperset(octet_str):
raise ValueError
octet_int = int(octet_str, 10)
# Disallow leading zeroes, because no clear standard exists on
# whether these should be interpreted as decimal or octal.
if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):
raise ValueError
return octet_int
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
octets = []
for _ in xrange(4):
octets.insert(0, str(ip_int & 0xFF))
ip_int >>= 8
return '.'.join(octets)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def version(self):
return self._version
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in IPv4Network('240.0.0.0/4')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
return (self in IPv4Network('10.0.0.0/8') or
self in IPv4Network('172.16.0.0/12') or
self in IPv4Network('192.168.0.0/16'))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in IPv4Network('224.0.0.0/4')
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self in IPv4Network('0.0.0.0')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in IPv4Network('127.0.0.0/8')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in IPv4Network('169.254.0.0/16')
class IPv4Address(_BaseV4, _BaseIP):
"""Represent and manipulate single IPv4 Addresses."""
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
'192.168.1.1'
Additionally, an integer can be passed, so
IPv4Address('192.168.1.1') == IPv4Address(3232235777).
or, more generally
IPv4Address(int(IPv4Address('192.168.1.1'))) ==
IPv4Address('192.168.1.1')
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
"""
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if isinstance(address, Bytes):
try:
self._ip, = struct.unpack('!I', address)
except struct.error:
raise AddressValueError(address) # Wrong length.
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
self._ip = self._ip_int_from_string(addr_str)
class IPv4Network(_BaseV4, _BaseNet):
"""This class represents and manipulates 32-bit IPv4 networks.
Attributes: [examples for IPv4Network('1.2.3.4/27')]
._ip: 16909060
.ip: IPv4Address('1.2.3.4')
.network: IPv4Address('1.2.3.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast: IPv4Address('1.2.3.31')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = set((255, 254, 252, 248, 240, 224, 192, 128, 0))
def __init__(self, address, strict=False):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.168.1.1/24'
'192.168.1.1/255.255.255.0'
'192.168.1.1/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.168.1.1'
'192.168.1.1/255.255.255.255'
'192.168.1.1/32'
are also functionaly equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.168.1.1') == IPv4Network(3232235777).
or, more generally
IPv4Network(int(IPv4Network('192.168.1.1'))) ==
IPv4Network('192.168.1.1')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV4.__init__(self, address)
# Constructing from an integer or packed bytes.
if isinstance(address, (int, long, Bytes)):
self.ip = IPv4Address(address)
self._ip = self.ip._ip
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv4Address(self._ip)
if len(addr) == 2:
mask = addr[1].split('.')
if len(mask) == 4:
# We have dotted decimal netmask.
if self._is_valid_netmask(addr[1]):
self.netmask = IPv4Address(self._ip_int_from_string(
addr[1]))
elif self._is_hostmask(addr[1]):
self.netmask = IPv4Address(
self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
else:
raise NetmaskValueError('%s is not a valid netmask'
% addr[1])
self._prefixlen = self._prefix_from_ip_int(int(self.netmask))
else:
# We have a netmask in prefix length form.
if not self._is_valid_netmask(addr[1]):
raise NetmaskValueError(addr[1])
self._prefixlen = int(addr[1])
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
if self._prefixlen == (self._max_prefixlen - 1):
self.iterhosts = self.__iter__
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [int(x) for x in bits if int(x) in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
if [x for x in mask if int(x) not in self._valid_mask_octets]:
return False
if [y for idx, y in enumerate(mask) if idx > 0 and
y > mask[idx - 1]]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= self._max_prefixlen
# backwards compatibility
IsRFC1918 = lambda self: self.is_private
IsMulticast = lambda self: self.is_multicast
IsLoopback = lambda self: self.is_loopback
IsLinkLocal = lambda self: self.is_link_local
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
_ALL_ONES = (2**IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
def __init__(self, address):
self._version = 6
self._max_prefixlen = IPV6LENGTH
def _ip_int_from_string(self, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
A long, the IPv6 ip_str.
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
if len(parts) < 3:
raise AddressValueError(ip_str)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
ipv4_int = IPv4Address(parts.pop())._ip
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
if len(parts) > self._HEXTET_COUNT + 1:
raise AddressValueError(ip_str)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
try:
skip_index, = (
[i for i in xrange(1, len(parts) - 1) if not parts[i]] or
[None])
except ValueError:
# Can't have more than one '::'
raise AddressValueError(ip_str)
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
raise AddressValueError(ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
raise AddressValueError(ip_str) # :$ requires ::$
parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
raise AddressValueError(ip_str)
else:
# Otherwise, allocate the entire address to parts_hi. The endpoints
# could still be empty, but _parse_hextet() will check for that.
if len(parts) != self._HEXTET_COUNT:
raise AddressValueError(ip_str)
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0L
for i in xrange(parts_hi):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in xrange(-parts_lo, 0):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
return ip_int
except ValueError:
raise AddressValueError(ip_str)
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError
hextet_int = int(hextet_str, 16)
if hextet_int > 0xFFFF:
raise ValueError
return hextet_int
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if not ip_int and ip_int != 0:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = []
for x in range(0, 32, 4):
hextets.append('%x' % int(hex_str[x:x+4], 16))
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, _BaseNet):
ip_str = str(self.ip)
else:
ip_str = str(self)
ip_int = self._ip_int_from_string(ip_str)
parts = []
for i in xrange(self._HEXTET_COUNT):
parts.append('%04x' % (ip_int & 0xFFFF))
ip_int >>= 16
parts.reverse()
if isinstance(self, _BaseNet):
return '%s/%d' % (':'.join(parts), self.prefixlen)
return ':'.join(parts)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def version(self):
return self._version
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in IPv6Network('ff00::/8')
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self in IPv6Network('::/8') or
self in IPv6Network('100::/8') or
self in IPv6Network('200::/7') or
self in IPv6Network('400::/6') or
self in IPv6Network('800::/5') or
self in IPv6Network('1000::/4') or
self in IPv6Network('4000::/3') or
self in IPv6Network('6000::/3') or
self in IPv6Network('8000::/3') or
self in IPv6Network('A000::/3') or
self in IPv6Network('C000::/3') or
self in IPv6Network('E000::/4') or
self in IPv6Network('F000::/5') or
self in IPv6Network('F800::/6') or
self in IPv6Network('FE00::/9'))
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0 and getattr(self, '_prefixlen', 128) == 128
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1 and getattr(self, '_prefixlen', 128) == 128
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in IPv6Network('fe80::/10')
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in IPv6Network('fec0::/10')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return self in IPv6Network('fc00::/7')
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Address(_BaseV6, _BaseIP):
"""Represent and manipulate single IPv6 Addresses.
"""
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:4860::') ==
IPv6Address(42541956101370907050197289607612071936L).
or, more generally
IPv6Address(IPv6Address('2001:4860::')._ip) ==
IPv6Address('2001:4860::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if isinstance(address, Bytes):
try:
hi, lo = struct.unpack('!QQ', address)
except struct.error:
raise AddressValueError(address) # Wrong length.
self._ip = (hi << 64) | lo
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if not addr_str:
raise AddressValueError('')
self._ip = self._ip_int_from_string(addr_str)
class IPv6Network(_BaseV6, _BaseNet):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
.ip: IPv6Address('2001:658:22a:cafe:200::1')
.network: IPv6Address('2001:658:22a:cafe::')
.hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
.broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
.netmask: IPv6Address('ffff:ffff:ffff:ffff::')
.prefixlen: 64
"""
def __init__(self, address, strict=False):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the IP
and prefix/netmask.
'2001:4860::/128'
'2001:4860:0000:0000:0000:0000:0000:0000/128'
'2001:4860::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:4860::') ==
IPv6Network(42541956101370907050197289607612071936L).
or, more generally
IPv6Network(IPv6Network('2001:4860::')._ip) ==
IPv6Network('2001:4860::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV6.__init__(self, address)
# Constructing from an integer or packed bytes.
if isinstance(address, (int, long, Bytes)):
self.ip = IPv6Address(address)
self._ip = self.ip._ip
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv6Address(self._ip)
if len(addr) == 2:
if self._is_valid_netmask(addr[1]):
self._prefixlen = int(addr[1])
else:
raise NetmaskValueError(addr[1])
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
if self._prefixlen == (self._max_prefixlen - 1):
self.iterhosts = self.__iter__
def _is_valid_netmask(self, prefixlen):
"""Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask.
"""
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= self._max_prefixlen
@property
def with_netmask(self):
return self.with_prefixlen
| apache-2.0 |
AnthillTech/python-mewa-client | examples/main.py | 1 | 1354 | '''
Created on 27 lip 2014
@author: Krzysztof Langner
'''
from mewa.client import Connection
HOST_URL = "ws://mewa.cc:9001/ws"
# HOST_URL = "ws://localhost:9000/ws"
connection = Connection(HOST_URL)
def onConnected():
connection.getDevices()
connection.sendEvent("serviceA.event2", "78", True)
params = [{"type": "org.fi24.switch", "name": "switch2"}, {"type": "org.fi24.switch", "name": "switch1"}, {"type": "org.fi24.switch", "name": "switch0"}]
connection.sendMessage("device66", "serviceA.level", params)
def onEvent(timestamp, fromDevice, eventId, params):
print("received event %s from %s with params %s" % (eventId, fromDevice, params))
def onMessage(timestamp, fromDevice, msgId, params):
print(timestamp + ": received message %s from %s with params %s" % (timestamp, msgId, fromDevice, params))
def onDevicesEvent(timestamp, devices):
print(timestamp + ": Found devices:")
print(devices)
def onError(reason):
print("Error: " + reason)
def onAck():
print("ACK")
if __name__ == "__main__":
connection.onConnected = onConnected
connection.onEvent = onEvent
connection.onMessage = onMessage
connection.onDevicesEvent = onDevicesEvent
connection.onError = onError
connection.onAck = onAck
connection.connect("admin.test", "python", "l631vxqa", [""])
| bsd-2-clause |
thecodinghub/news-for-good | news/Lib/encodings/cp1258.py | 272 | 13364 | """ Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1258',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0300' # 0xCC -> COMBINING GRAVE ACCENT
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\u0309' # 0xD2 -> COMBINING HOOK ABOVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
'\u0303' # 0xDE -> COMBINING TILDE
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0301' # 0xEC -> COMBINING ACUTE ACCENT
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\u0323' # 0xF2 -> COMBINING DOT BELOW
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
'\u20ab' # 0xFE -> DONG SIGN
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
vishwaprakashmishra/xmatrix | vumi/transports/mtn_rwanda/mtn_rwanda_ussd.py | 4 | 8197 | # -*- test-case-name: vumi.transports.mtn_rwanda.tests.test_mtn_rwanda_ussd -*-
from datetime import datetime
from twisted.internet import reactor
from twisted.web import xmlrpc
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
from vumi.message import TransportUserMessage
from vumi.transports.base import Transport
from vumi.config import (
ConfigServerEndpoint, ConfigInt, ConfigDict, ConfigText,
ServerEndpointFallback)
from vumi.components.session import SessionManager
from vumi.transports.httprpc.httprpc import HttpRpcHealthResource
from vumi.utils import build_web_site
class MTNRwandaUSSDTransportConfig(Transport.CONFIG_CLASS):
"""
MTN Rwanda USSD transport configuration.
"""
twisted_endpoint = ConfigServerEndpoint(
"The listening endpoint that the remote client will connect to.",
required=True, static=True,
fallbacks=[ServerEndpointFallback()])
timeout = ConfigInt(
"No. of seconds to wait before removing a request that hasn't "
"received a response yet.",
default=30, static=True)
redis_manager = ConfigDict(
"Parameters to connect to redis with",
default={}, static=True)
session_timeout_period = ConfigInt(
"Maximum length of a USSD session",
default=600, static=True)
web_path = ConfigText(
"The path to serve this resource on.", required=True, static=True)
health_path = ConfigText(
"The path to serve the health resource on.", default='/health/',
static=True)
# TODO: Deprecate these fields when confmodel#5 is done.
host = ConfigText(
"*DEPRECATED* 'host' and 'port' fields may be used in place of the"
" 'twisted_endpoint' field.", static=True)
port = ConfigInt(
"*DEPRECATED* 'host' and 'port' fields may be used in place of the"
" 'twisted_endpoint' field.", static=True)
class RequestTimedOutError(Exception):
pass
class InvalidRequest(Exception):
pass
class MTNRwandaUSSDTransport(Transport):
transport_type = 'ussd'
xmlrpc_server = None
CONFIG_CLASS = MTNRwandaUSSDTransportConfig
ENCODING = 'UTF-8'
@inlineCallbacks
def setup_transport(self):
"""
Transport specific setup - it sets up a connection.
"""
self._requests = {}
self._requests_deferreds = {}
self.callLater = reactor.callLater
config = self.get_static_config()
self.endpoint = config.twisted_endpoint
self.timeout = config.timeout
r_prefix = "vumi.transports.mtn_rwanda:%s" % self.transport_name
self.session_manager = yield SessionManager.from_redis_config(
config.redis_manager, r_prefix,
config.session_timeout_period)
self.factory = build_web_site({
config.health_path: HttpRpcHealthResource(self),
config.web_path: MTNRwandaXMLRPCResource(self),
})
self.xmlrpc_server = yield self.endpoint.listen(self.factory)
@inlineCallbacks
def teardown_transport(self):
"""
Clean-up of setup done in setup_transport.
"""
self.session_manager.stop()
if self.xmlrpc_server is not None:
yield self.xmlrpc_server.stopListening()
def get_health_response(self):
return "OK"
def set_request(self, request_id, request_args):
self._requests[request_id] = request_args
return request_args
def get_request(self, request_id):
if request_id in self._requests:
request = self._requests[request_id]
return request
def remove_request(self, request_id):
del self._requests[request_id]
def timed_out(self, request_id):
d = self._requests_deferreds[request_id]
self.remove_request(request_id)
d.errback(RequestTimedOutError(
"Request %r timed out." % (request_id,)))
REQUIRED_INBOUND_MESSAGE_FIELDS = set([
'TransactionId', 'TransactionTime', 'MSISDN', 'USSDServiceCode',
'USSDRequestString'])
def validate_inbound_data(self, msg_params):
missing_fields = (
self.REQUIRED_INBOUND_MESSAGE_FIELDS - set(msg_params))
if missing_fields:
return False
else:
return True
@inlineCallbacks
def handle_raw_inbound_request(self, message_id, values, d):
"""
Called by the XML-RPC server when it receives a payload that
needs processing.
"""
self.timeout_request = self.callLater(self.timeout,
self.timed_out, message_id)
self._requests[message_id] = values
self._requests_deferreds[message_id] = d
if not self.validate_inbound_data(values.keys()):
self.timeout_request.cancel()
self.remove_request(message_id)
d.errback(InvalidRequest("4001: Missing Parameters"))
else:
session_id = values['TransactionId']
session = yield self.session_manager.load_session(session_id)
if session:
session_event = TransportUserMessage.SESSION_RESUME
content = values['USSDRequestString']
else:
yield self.session_manager.create_session(
session_id, from_addr=values['MSISDN'],
to_addr=values['USSDServiceCode'])
session_event = TransportUserMessage.SESSION_NEW
content = None
metadata = {
'transaction_id': values['TransactionId'],
'transaction_time': values['TransactionTime'],
}
res = yield self.publish_message(
message_id=message_id,
content=content,
from_addr=values['MSISDN'],
to_addr=values['USSDServiceCode'],
session_event=session_event,
transport_type=self.transport_type,
transport_metadata={'mtn_rwanda_ussd': metadata}
)
returnValue(res)
@inlineCallbacks
def finish_request(self, request_id, data, session_event):
request = self.get_request(request_id)
del request['USSDRequestString']
request['USSDResponseString'] = data
request['TransactionTime'] = datetime.now().isoformat()
if session_event == TransportUserMessage.SESSION_NEW:
request['action'] = 'request'
elif session_event == TransportUserMessage.SESSION_CLOSE:
request['action'] = 'end'
yield self.session_manager.clear_session(request['TransactionId'])
elif session_event == TransportUserMessage.SESSION_RESUME:
request['action'] = 'notify'
self.set_request(request_id, request)
d = self._requests_deferreds[request_id]
self.remove_request(request_id)
d.callback(request)
def handle_outbound_message(self, message):
"""
Read outbound message and do what needs to be done with them.
"""
request_id = message['in_reply_to']
if self.get_request(request_id) is None:
return self.publish_nack(user_message_id=message['message_id'],
sent_message_id=message['message_id'],
reason='Request not found')
self.timeout_request.cancel()
self.finish_request(request_id,
message.payload['content'].encode('utf-8'),
message['session_event'])
return self.publish_ack(user_message_id=request_id,
sent_message_id=request_id)
class MTNRwandaXMLRPCResource(xmlrpc.XMLRPC):
"""
A Resource object implementing XML-RPC, can be published using
twisted.web.server.Site.
"""
def __init__(self, transport):
self.transport = transport
xmlrpc.XMLRPC.__init__(self, allowNone=True)
def xmlrpc_handleUSSD(self, request_data):
request_id = Transport.generate_message_id()
d = Deferred()
self.transport.handle_raw_inbound_request(request_id, request_data, d)
return d
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Coupled_Contact/Steady_State_Single_Foundation_Sysytem_Under_Tension/CoupledHardContact_NonLinHardSoftShear/n_0.3/compare_txt.py | 637 | 2094 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# analytic_solution = sys.argv[1]
# numeric_result = sys.argv[2]
analytic_solution = 'analytic_solution.txt'
numeric_result = 'numeric_result.txt'
analytic_sol = np.loadtxt(analytic_solution)
numeric_res = np.loadtxt(numeric_result)
abs_error = abs(analytic_sol - numeric_res)
rel_error = abs_error/analytic_sol
analytic_sol = float(analytic_sol)
numeric_res = float(numeric_res)
rel_error = float(rel_error)
# print the results
case_flag=1
print headrun() , "-----------Testing results-----------------"
print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error[%]')
print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error )
if(case_flag==1):
print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \;
# find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \;
# find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \;
# find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \;
# find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \;
# sed -i "s/node\.fei/add_node.include/" main.fei
# sed -i "s/add_node\.fei/add_node.include/" main.fei
# sed -i "s/element\.fei/add_element.include/" main.fei
# sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei
# sed -i "s/constraint\.fei/add_constraint.include/" main.fei
# find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
| cc0-1.0 |
edx-solutions/edx-platform | lms/djangoapps/verify_student/management/commands/retry_failed_photo_verifications.py | 4 | 3261 | """
Django admin commands related to verify_student
"""
import logging
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification, SSPVerificationRetryConfig
log = logging.getLogger('retry_photo_verification')
class Command(BaseCommand):
"""
This method finds those PhotoVerifications with a status of
MUST_RETRY and attempts to verify them.
"""
args = "<SoftwareSecurePhotoVerification id, SoftwareSecurePhotoVerification id, ...>"
help = (
"Retries SoftwareSecurePhotoVerifications passed as "
"arguments, or if no arguments are supplied, all that "
"are in a state of 'must_retry'"
)
def add_arguments(self, parser):
parser.add_argument(
'--verification-ids',
dest='verification_ids',
action='store',
nargs='+',
type=str,
help='verifications id used to retry verification'
)
parser.add_argument(
'--args-from-database',
action='store_true',
help='Use arguments from the SSPVerificationRetryConfig model instead of the command line.',
)
def get_args_from_database(self):
""" Returns an options dictionary from the current SSPVerificationRetryConfig model. """
sspv_retry_config = SSPVerificationRetryConfig.current()
if not sspv_retry_config.enabled:
log.warning('SSPVerificationRetryConfig is disabled or empty, but --args-from-database was requested.')
return {}
# We don't need fancy shell-style whitespace/quote handling - none of our arguments are complicated
argv = sspv_retry_config.arguments.split()
parser = self.create_parser('manage.py', 'sspv_retry')
return parser.parse_args(argv).__dict__ # we want a dictionary, not a non-iterable Namespace object
def handle(self, *args, **options):
options = self.get_args_from_database() if options['args_from_database'] else options
args = options.get('verification_ids', None)
if args:
attempts_to_retry = SoftwareSecurePhotoVerification.objects.filter(
receipt_id__in=options['verification_ids']
)
log.info(u"Fetching retry verification ids from config model")
force_must_retry = True
else:
attempts_to_retry = SoftwareSecurePhotoVerification.objects.filter(status='must_retry')
force_must_retry = False
log.info(u"Attempting to retry {0} failed PhotoVerification submissions".format(len(attempts_to_retry)))
for index, attempt in enumerate(attempts_to_retry):
log.info(u"Retrying submission #{0} (ID: {1}, User: {2})".format(index, attempt.id, attempt.user))
# Set the attempts status to 'must_retry' so that we can re-submit it
if force_must_retry:
attempt.status = 'must_retry'
attempt.submit(copy_id_photo_from=attempt.copy_id_photo_from)
log.info(u"Retry result: {0}".format(attempt.status))
log.info("Done resubmitting failed photo verifications")
| agpl-3.0 |
tencia/deeptrackpy | utils.py | 1 | 10122 | import time
import sys
import os
from PIL import Image
import numpy as np
import lasagne as nn
import theano
import theano.tensor as T
import h5py
from fuel.datasets.hdf5 import H5PYDataset
from fuel.schemes import ShuffledScheme, SequentialScheme
from fuel.streams import DataStream
# runs training loop, expects data in DataH5PYStreamer format
# tr_transform and te_transform must return list or tuple, to allow
# for situations where the functions require 2+ inputs
def train_with_hdf5(data, num_epochs, train_fn, test_fn,
tr_transform = lambda x:x,
te_transform = lambda x:x,
verbose=True, train_shuffle=True,
save_params_to=None,
save_last_params=False,
last_layer=None,
use_tqdm=True,
max_per_epoch=-1):
tr_stream = data.streamer(training=True, shuffled=train_shuffle)
te_stream = data.streamer(training=False, shuffled=False)
ret = []
mve_params = None
mve = None
for epoch in range(num_epochs):
start = time.time()
tr_err, tr_batches = 0,0
iterator = tr_stream.get_epoch_iterator()
if use_tqdm:
from tqdm import tqdm
iterator = tqdm(iterator, total=data.ntrain/data.batch_size)
for imb in iterator:
if imb[0].shape[0] != data.batch_size:
continue
imb = tr_transform(imb)
if not isinstance(imb, tuple):
imb = (imb,)
tr_err += train_fn(*imb)
tr_batches += 1
if max_per_epoch > 0 and tr_batches > max_per_epoch:
break
val_err, val_batches = 0,0
iterator = te_stream.get_epoch_iterator()
if use_tqdm:
iterator = tqdm(iterator, total=data.ntest/data.batch_size)
for imb in iterator:
if imb[0].shape[0] != data.batch_size:
continue
imb = te_transform(imb)
if not isinstance(imb, tuple):
imb = (imb,)
val_err += test_fn(*imb)
val_batches += 1
if max_per_epoch > 0 and val_batches > max_per_epoch:
break
val_err /= (val_batches if val_batches > 0 else 1)
tr_err /= (tr_batches if tr_batches > 0 else 1)
if save_params_to is not None:
if mve is None or val_err < mve:
mve = val_err
mve_params = [np.copy(p) for p in (nn.layers.get_all_param_values(last_layer))]
if verbose:
print('ep {}/{} - tl {:.5f} - vl {:.5f} - t {:.3f}s'.format(
epoch, num_epochs, tr_err, val_err, time.time()-start))
ret.append((tr_err, val_err))
if save_params_to is not None:
if save_last_params:
mve_params = [np.copy(p) for p in (nn.layers.get_all_param_values(last_layer))]
save_params(mve_params, save_params_to)
return ret
# goes from raw image array (usually uint8) to floatX, square=True crops to
# size of the short edge, center=True crops at center, otherwise crop is
# random
def raw_to_floatX(imb, pixel_shift=0.5, square=True, center=False, rng=None):
rng = rng if rng else np.random
w,h = imb.shape[2], imb.shape[3] # image size
x, y = 0,0 # offsets
if square:
if w > h:
if center:
x = (w-h)/2
else:
x = rng.randint(w-h)
w=h
elif h > w:
if center:
y = (h-w)/2
else:
y = rng.randint(h-w)
h=w
return nn.utils.floatX(imb)[:,:,x:x+w,y:y+h]/ 255. - pixel_shift
# creates and hdf5 file from a dataset given a split in the form {'train':(0,n)}, etc
# appears to save in unpredictable order, so order must be verified after creation
def save_hd5py(dataset_dict, destfile, indices_dict_or_numfolds):
indices_dict = indices_dict_or_numfolds
if isinstance(indices_dict, int):
folds = indices_dict
n = max(len(it) for it in dataset_dict.values())
fold_n = n // folds
indices_dict = dict(('fold_{}'.format(i), (i*fold_n, (i+1)*fold_n)) \
for i in range(folds))
print indices_dict
f = h5py.File(destfile, mode='w')
for name, dataset in dataset_dict.iteritems():
dat = f.create_dataset(name, dataset.shape, dtype=str(dataset.dtype))
dat[...] = dataset
split_dict = dict((k, dict((name, v) for name in dataset_dict.iterkeys()))
for k,v in indices_dict.iteritems())
f.attrs['split'] = H5PYDataset.create_split_array(split_dict)
f.flush()
f.close()
# for organizing an hdf5 file for streaming
class DataH5PyStreamer:
# folds = None if dataset is separated into 'train', 'test'
# folds = (10, 3) for ex if there are 10 total folds and #3 (zero_indexed) is validation set
# folds = (10, -1) if we want to train on every fold (sets val = fold 0)
def __init__(self, h5filename, ntrain=None, ntest=None, batch_size=1, folds=None):
if folds is None:
te_sets = ('test',)
tr_sets = ('train',)
elif folds[1] == -1:
te_sets = ('fold_0',)
tr_sets = tuple(['fold_{}'.format(i) for i in range(folds[0])])
else:
te_sets = ('fold_{}'.format(folds[1]),)
tr_sets = tuple(['fold_{}'.format(i) for i in range(folds[0]) if i != folds[1]])
self.batch_size = batch_size
self.tr_data = H5PYDataset(h5filename, which_sets=tr_sets)
self.te_data = H5PYDataset(h5filename, which_sets=te_sets)
self.ntrain = ntrain if ntrain is not None else self.tr_data.num_examples
self.ntest = ntest if ntest is not None else self.te_data.num_examples
def dataset(self, training=True):
return self.tr_data if training else self.te_data
def streamer(self, training=True, shuffled=False):
n = self.ntrain if training else self.ntest
sch = ShuffledScheme(examples=n, batch_size=self.batch_size) if shuffled else \
SequentialScheme(examples=n, batch_size=self.batch_size)
return DataStream(self.tr_data if training else self.te_data, \
iteration_scheme = sch)
# helper function for building vae's
def log_likelihood(tgt, mu, ls):
return T.sum(-(np.float32(0.5 * np.log(2 * np.pi)) + ls)
- 0.5 * T.sqr(tgt - mu) / T.exp(2 * ls))
# from the array used for testing, to the kind used in Image.fromarray(..)
def get_picture_array(X, index, shift=0.5):
ch, w, h = X.shape[1], X.shape[2], X.shape[3]
ret = ((X[index]+shift)*255.).reshape(ch,w,h).transpose(2,1,0).clip(0,255).astype(np.uint8)
if ch == 1:
ret=ret.reshape(h,w)
return ret
# returns an Image with X on top, Xpr on bottom, index as requeseted or random if -1
def get_image_pair(X, Xpr,index=-1,shift=0.5):
mode = 'RGB' if X.shape[1] == 3 else 'L'
index = np.random.randint(X.shape[0]) if index == -1 else index
original_image = Image.fromarray(get_picture_array(X, index,shift=shift),mode=mode)
new_size = (original_image.size[0], original_image.size[1]*2)
new_im = Image.new(mode, new_size)
new_im.paste(original_image, (0,0))
rec_image = Image.fromarray(get_picture_array(Xpr, index,shift=shift),mode=mode)
new_im.paste(rec_image, (0,original_image.size[1]))
return new_im
# gets array (in format used for storage) from an Image
def arr_from_img_storage(im):
w,h=im.size
arr=np.asarray(im.getdata(), dtype=np.uint8)
c = np.product(arr.size) / (w*h)
return arr.reshape(h,w,c).transpose(2,1,0)
# gets array (in format used for testing) from an Image
def arr_from_img(im,shift=0.5):
w,h=im.size
arr=np.asarray(im.getdata(), dtype=theano.config.floatX)
c = np.product(arr.size) / (w*h)
return arr.reshape((h,w,c)).transpose(2,1,0) / 255. - shift
# loads params in npz (if filename is a .npz) or pickle if not
def load_params(model, fn):
if 'npz' in fn:
with np.load(fn) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
nn.layers.set_all_param_values(model, param_values)
else:
with open(fn, 'r') as re:
import pickle
nn.layers.set_all_param_values(model, pickle.load(re))
# saves params in npz (if filename is a .npz) or pickle if not
def save_params(model, fn):
if not os.path.exists(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
if 'npz' in fn:
if isinstance(model, list):
param_vals = model
else:
param_vals = nn.layers.get_all_param_values(model)
np.savez(fn, *param_vals)
else:
with open(fn, 'w') as wr:
import pickle
pickle.dump(param_vals, wr)
# reset shared variable values of accumulators to recover from NaN
def reset_accs(updates, params):
for key in updates:
if not key in params:
v = key.get_value(borrow=True)
key.set_value(np.zeros(v.shape,dtype=v.dtype))
# build loss as in (Kingma, Welling 2014) Autoencoding Variational Bayes
def build_vae_loss(input_var, l_z_mu, l_z_ls, l_x_mu_list, l_x_ls_list, l_x_list, l_x,
deterministic, binary, L):
layer_outputs = nn.layers.get_output([l_z_mu, l_z_ls] + l_x_mu_list + l_x_ls_list
+ l_x_list + [l_x], deterministic=deterministic)
z_mu = layer_outputs[0]
z_ls = layer_outputs[1]
x_mu = [] if binary else layer_outputs[2:2+L]
x_ls = [] if binary else layer_outputs[2+L:2+2*L]
x_list = layer_outputs[2:2+L] if binary else layer_outputs[2+2*L:2+3*L]
x = layer_outputs[-1]
kl_div = 0.5 * T.sum(1 + 2*z_ls - T.sqr(z_mu) - T.exp(2 * z_ls))
if binary:
logpxz = sum(nn.objectives.binary_crossentropy(x, input_var).sum()
for x in x_list) * (-1./L)
prediction = x_list[0] if deterministic else x
else:
logpxz = sum(log_likelihood(input_var.flatten(2), mu, ls)
for mu, ls in zip(x_mu, x_ls))/L
prediction = x_mu[0] if deterministic else T.sum(x_mu, axis=0)/L
loss = -1 * (logpxz + kl_div)
return loss, prediction
| mit |
beni55/sympy | sympy/external/tests/test_codegen.py | 13 | 11889 | # This tests the compilation and execution of the source code generated with
# utilities.codegen. The compilation takes place in a temporary directory that
# is removed after the test. By default the test directory is always removed,
# but this behavior can be changed by setting the environment variable
# SYMPY_TEST_CLEAN_TEMP to:
# export SYMPY_TEST_CLEAN_TEMP=always : the default behavior.
# export SYMPY_TEST_CLEAN_TEMP=success : only remove the directories of working tests.
# export SYMPY_TEST_CLEAN_TEMP=never : never remove the directories with the test code.
# When a directory is not removed, the necessary information is printed on
# screen to find the files that belong to the (failed) tests. If a test does
# not fail, py.test captures all the output and you will not see the directories
# corresponding to the successful tests. Use the --nocapture option to see all
# the output.
# All tests below have a counterpart in utilities/test/test_codegen.py. In the
# latter file, the resulting code is compared with predefined strings, without
# compilation or execution.
# All the generated Fortran code should conform with the Fortran 95 standard,
# and all the generated C code should be ANSI C, which facilitates the
# incorporation in various projects. The tests below assume that the binary cc
# is somewhere in the path and that it can compile ANSI C code.
from __future__ import print_function
from sympy.abc import x, y, z
from sympy.utilities.pytest import skip
from sympy.utilities.codegen import (codegen, make_routine, InputArgument,
Result, get_code_generator)
import sys
import os
import tempfile
import subprocess
# templates for the main program that will test the generated code.
main_template = {}
main_template['F95'] = """
program main
include "codegen.h"
integer :: result;
result = 0
%(statements)s
call exit(result)
end program
"""
main_template['C'] = """
#include "codegen.h"
#include <stdio.h>
#include <math.h>
int main() {
int result = 0;
%(statements)s
return result;
}
"""
# templates for the numerical tests
numerical_test_template = {}
numerical_test_template['C'] = """
if (fabs(%(call)s)>%(threshold)s) {
printf("Numerical validation failed: %(call)s=%%e threshold=%(threshold)s\\n", %(call)s);
result = -1;
}
"""
numerical_test_template['F95'] = """
if (abs(%(call)s)>%(threshold)s) then
write(6,"('Numerical validation failed:')")
write(6,"('%(call)s=',e15.5,'threshold=',e15.5)") %(call)s, %(threshold)s
result = -1;
end if
"""
# command sequences for supported compilers
compile_commands = {}
compile_commands['cc'] = [
"cc -c codegen.c -o codegen.o",
"cc -c main.c -o main.o",
"cc main.o codegen.o -lm -o test.exe"
]
compile_commands['gfortran'] = [
"gfortran -c codegen.f90 -o codegen.o",
"gfortran -ffree-line-length-none -c main.f90 -o main.o",
"gfortran main.o codegen.o -o test.exe"
]
compile_commands['g95'] = [
"g95 -c codegen.f90 -o codegen.o",
"g95 -ffree-line-length-huge -c main.f90 -o main.o",
"g95 main.o codegen.o -o test.exe"
]
compile_commands['ifort'] = [
"ifort -c codegen.f90 -o codegen.o",
"ifort -c main.f90 -o main.o",
"ifort main.o codegen.o -o test.exe"
]
combinations_lang_compiler = [
('C', 'cc'),
('F95', 'ifort'),
('F95', 'gfortran'),
('F95', 'g95')
]
def try_run(commands):
"""Run a series of commands and only return True if all ran fine."""
null = open(os.devnull, 'w')
for command in commands:
retcode = subprocess.call(command, stdout=null, shell=True,
stderr=subprocess.STDOUT)
if retcode != 0:
return False
return True
def run_test(label, routines, numerical_tests, language, commands, friendly=True):
"""A driver for the codegen tests.
This driver assumes that a compiler ifort is present in the PATH and that
ifort is (at least) a Fortran 90 compiler. The generated code is written in
a temporary directory, together with a main program that validates the
generated code. The test passes when the compilation and the validation
run correctly.
"""
# Check input arguments before touching the file system
language = language.upper()
assert language in main_template
assert language in numerical_test_template
# Check that evironment variable makes sense
clean = os.getenv('SYMPY_TEST_CLEAN_TEMP', 'always').lower()
if clean not in ('always', 'success', 'never'):
raise ValueError("SYMPY_TEST_CLEAN_TEMP must be one of the following: 'always', 'success' or 'never'.")
# Do all the magic to compile, run and validate the test code
# 1) prepare the temporary working directory, switch to that dir
work = tempfile.mkdtemp("_sympy_%s_test" % language, "%s_" % label)
oldwork = os.getcwd()
os.chdir(work)
# 2) write the generated code
if friendly:
# interpret the routines as a name_expr list and call the friendly
# function codegen
codegen(routines, language, "codegen", to_files=True)
else:
code_gen = get_code_generator(language, "codegen")
code_gen.write(routines, "codegen", to_files=True)
# 3) write a simple main program that links to the generated code, and that
# includes the numerical tests
test_strings = []
for fn_name, args, expected, threshold in numerical_tests:
call_string = "%s(%s)-(%s)" % (
fn_name, ",".join(str(arg) for arg in args), expected)
if language == "F95":
call_string = fortranize_double_constants(call_string)
threshold = fortranize_double_constants(str(threshold))
test_strings.append(numerical_test_template[language] % {
"call": call_string,
"threshold": threshold,
})
if language == "F95":
f_name = "main.f90"
elif language == "C":
f_name = "main.c"
else:
raise NotImplemented(
"FIXME: filename extension unknown for language: %s" % language)
with open(f_name, "w") as f:
f.write(
main_template[language] % {'statements': "".join(test_strings)})
# 4) Compile and link
compiled = try_run(commands)
# 5) Run if compiled
if compiled:
executed = try_run(["./test.exe"])
else:
executed = False
# 6) Clean up stuff
if clean == 'always' or (clean == 'success' and compiled and executed):
def safe_remove(filename):
if os.path.isfile(filename):
os.remove(filename)
safe_remove("codegen.f90")
safe_remove("codegen.c")
safe_remove("codegen.h")
safe_remove("codegen.o")
safe_remove("main.f90")
safe_remove("main.c")
safe_remove("main.o")
safe_remove("test.exe")
os.chdir(oldwork)
os.rmdir(work)
else:
print("TEST NOT REMOVED: %s" % work, file=sys.stderr)
os.chdir(oldwork)
# 7) Do the assertions in the end
assert compiled, "failed to compile %s code with:\n%s" % (
language, "\n".join(commands))
assert executed, "failed to execute %s code from:\n%s" % (
language, "\n".join(commands))
def fortranize_double_constants(code_string):
"""
Replaces every literal float with literal doubles
"""
import re
pattern_exp = re.compile('\d+(\.)?\d*[eE]-?\d+')
pattern_float = re.compile('\d+\.\d*(?!\d*d)')
def subs_exp(matchobj):
return re.sub('[eE]', 'd', matchobj.group(0))
def subs_float(matchobj):
return "%sd0" % matchobj.group(0)
code_string = pattern_exp.sub(subs_exp, code_string)
code_string = pattern_float.sub(subs_float, code_string)
return code_string
def is_feasible(language, commands):
# This test should always work, otherwise the compiler is not present.
routine = make_routine("test", x)
numerical_tests = [
("test", ( 1.0,), 1.0, 1e-15),
("test", (-1.0,), -1.0, 1e-15),
]
try:
run_test("is_feasible", [routine], numerical_tests, language, commands,
friendly=False)
return True
except AssertionError:
return False
valid_lang_commands = []
invalid_lang_compilers = []
for lang, compiler in combinations_lang_compiler:
commands = compile_commands[compiler]
if is_feasible(lang, commands):
valid_lang_commands.append((lang, commands))
else:
invalid_lang_compilers.append((lang, compiler))
# We test all language-compiler combinations, just to report what is skipped
def test_C_cc():
if ("C", 'cc') in invalid_lang_compilers:
skip("`cc' command didn't work as expected")
def test_F95_ifort():
if ("F95", 'ifort') in invalid_lang_compilers:
skip("`ifort' command didn't work as expected")
def test_F95_gfortran():
if ("F95", 'gfortran') in invalid_lang_compilers:
skip("`gfortran' command didn't work as expected")
def test_F95_g95():
if ("F95", 'g95') in invalid_lang_compilers:
skip("`g95' command didn't work as expected")
# Here comes the actual tests
def test_basic_codegen():
numerical_tests = [
("test", (1.0, 6.0, 3.0), 21.0, 1e-15),
("test", (-1.0, 2.0, -2.5), -2.5, 1e-15),
]
name_expr = [("test", (x + y)*z)]
for lang, commands in valid_lang_commands:
run_test("basic_codegen", name_expr, numerical_tests, lang, commands)
def test_intrinsic_math1_codegen():
# not included: log10
from sympy import acos, asin, atan, ceiling, cos, cosh, floor, log, ln, \
sin, sinh, sqrt, tan, tanh, N
name_expr = [
("test_fabs", abs(x)),
("test_acos", acos(x)),
("test_asin", asin(x)),
("test_atan", atan(x)),
("test_cos", cos(x)),
("test_cosh", cosh(x)),
("test_log", log(x)),
("test_ln", ln(x)),
("test_sin", sin(x)),
("test_sinh", sinh(x)),
("test_sqrt", sqrt(x)),
("test_tan", tan(x)),
("test_tanh", tanh(x)),
]
numerical_tests = []
for name, expr in name_expr:
for xval in 0.2, 0.5, 0.8:
expected = N(expr.subs(x, xval))
numerical_tests.append((name, (xval,), expected, 1e-14))
for lang, commands in valid_lang_commands:
if lang == "C":
name_expr_C = [("test_floor", floor(x)), ("test_ceil", ceiling(x))]
else:
name_expr_C = []
run_test("intrinsic_math1", name_expr + name_expr_C,
numerical_tests, lang, commands)
def test_instrinsic_math2_codegen():
# not included: frexp, ldexp, modf, fmod
from sympy import atan2, N
name_expr = [
("test_atan2", atan2(x, y)),
("test_pow", x**y),
]
numerical_tests = []
for name, expr in name_expr:
for xval, yval in (0.2, 1.3), (0.5, -0.2), (0.8, 0.8):
expected = N(expr.subs(x, xval).subs(y, yval))
numerical_tests.append((name, (xval, yval), expected, 1e-14))
for lang, commands in valid_lang_commands:
run_test("intrinsic_math2", name_expr, numerical_tests, lang, commands)
def test_complicated_codegen():
from sympy import sin, cos, tan, N
name_expr = [
("test1", ((sin(x) + cos(y) + tan(z))**7).expand()),
("test2", cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))),
]
numerical_tests = []
for name, expr in name_expr:
for xval, yval, zval in (0.2, 1.3, -0.3), (0.5, -0.2, 0.0), (0.8, 2.1, 0.8):
expected = N(expr.subs(x, xval).subs(y, yval).subs(z, zval))
numerical_tests.append((name, (xval, yval, zval), expected, 1e-12))
for lang, commands in valid_lang_commands:
run_test(
"complicated_codegen", name_expr, numerical_tests, lang, commands)
| bsd-3-clause |
Lab603/PicEncyclopedias | jni-build/jni/include/tensorflow/python/platform/test.py | 5 | 3624 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""## Unit tests
TensorFlow provides a convenience class inheriting from `unittest.TestCase`
which adds methods relevant to TensorFlow tests. Here is an example:
import tensorflow as tf
class SquareTest(tf.test.TestCase):
def testSquare(self):
with self.test_session():
x = tf.square([2, 3])
self.assertAllEqual(x.eval(), [4, 9])
if __name__ == '__main__':
tf.test.main()
`tf.test.TestCase` inherits from `unittest.TestCase` but adds a few additional
methods. We will document these methods soon.
@@main
## Utilities
@@assert_equal_graph_def
@@get_temp_dir
@@is_built_with_cuda
## Gradient checking
[`compute_gradient`](#compute_gradient) and
[`compute_gradient_error`](#compute_gradient_error) perform numerical
differentiation of graphs for comparison against registered analytic gradients.
@@compute_gradient
@@compute_gradient_error
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import device_lib
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.util.all_util import make_all
# pylint: disable=unused-import
from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase
from tensorflow.python.framework.test_util import assert_equal_graph_def
from tensorflow.python.ops.gradient_checker import compute_gradient_error
from tensorflow.python.ops.gradient_checker import compute_gradient
# pylint: enable=unused-import
import sys
if sys.version_info.major == 2:
import mock # pylint: disable=g-import-not-at-top,unused-import
else:
from unittest import mock # pylint: disable=g-import-not-at-top
# Import Benchmark class
Benchmark = googletest.Benchmark # pylint: disable=invalid-name
def main():
"""Runs all unit tests."""
return googletest.main()
def get_temp_dir():
"""Returns a temporary directory for use during tests.
There is no need to delete the directory after the test.
Returns:
The temporary directory.
"""
return googletest.GetTempDir()
def test_src_dir_path(relative_path):
"""Creates an absolute test srcdir path given a relative path.
Args:
relative_path: a path relative to tensorflow root.
e.g. "core/platform".
Returns:
An absolute path to the linked in runfiles.
"""
return googletest.test_src_dir_path(relative_path)
def is_built_with_cuda():
"""Returns whether TensorFlow was built with CUDA (GPU) support."""
return test_util.IsGoogleCudaEnabled()
def is_gpu_available():
"""Returns whether TensorFlow can access a GPU."""
return any(x.device_type == 'GPU' for x in device_lib.list_local_devices())
__all__ = make_all(__name__)
# TODO(irving,vrv): Remove once TestCase is documented
__all__.append('TestCase')
| mit |
cntnboys/410Lab6 | v1/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/tests/__init__.py | 62 | 12335 | """Tests for the 'setuptools' package"""
from unittest import TestSuite, TestCase, makeSuite, defaultTestLoader
import distutils.core, distutils.cmd
from distutils.errors import DistutilsOptionError, DistutilsPlatformError
from distutils.errors import DistutilsSetupError
import setuptools, setuptools.dist
from setuptools import Feature
from distutils.core import Extension
extract_constant, get_module_constant = None, None
from setuptools.depends import *
from distutils.version import StrictVersion, LooseVersion
from distutils.util import convert_path
import sys, os.path
def additional_tests():
import doctest, unittest
suite = unittest.TestSuite((
doctest.DocFileSuite(
os.path.join('tests', 'api_tests.txt'),
optionflags=doctest.ELLIPSIS, package='pkg_resources',
),
))
if sys.platform == 'win32':
suite.addTest(doctest.DocFileSuite('win_script_wrapper.txt'))
return suite
def makeSetup(**args):
"""Return distribution from 'setup(**args)', without executing commands"""
distutils.core._setup_stop_after = "commandline"
# Don't let system command line leak into tests!
args.setdefault('script_args',['install'])
try:
return setuptools.setup(**args)
finally:
distutils.core_setup_stop_after = None
class DependsTests(TestCase):
def testExtractConst(self):
if not extract_constant: return # skip on non-bytecode platforms
def f1():
global x,y,z
x = "test"
y = z
# unrecognized name
self.assertEqual(extract_constant(f1.func_code,'q', -1), None)
# constant assigned
self.assertEqual(extract_constant(f1.func_code,'x', -1), "test")
# expression assigned
self.assertEqual(extract_constant(f1.func_code,'y', -1), -1)
# recognized name, not assigned
self.assertEqual(extract_constant(f1.func_code,'z', -1), None)
def testFindModule(self):
self.assertRaises(ImportError, find_module, 'no-such.-thing')
self.assertRaises(ImportError, find_module, 'setuptools.non-existent')
f,p,i = find_module('setuptools.tests'); f.close()
def testModuleExtract(self):
if not get_module_constant: return # skip on non-bytecode platforms
from email import __version__
self.assertEqual(
get_module_constant('email','__version__'), __version__
)
self.assertEqual(
get_module_constant('sys','version'), sys.version
)
self.assertEqual(
get_module_constant('setuptools.tests','__doc__'),__doc__
)
def testRequire(self):
if not extract_constant: return # skip on non-bytecode platforms
req = Require('Email','1.0.3','email')
self.assertEqual(req.name, 'Email')
self.assertEqual(req.module, 'email')
self.assertEqual(req.requested_version, '1.0.3')
self.assertEqual(req.attribute, '__version__')
self.assertEqual(req.full_name(), 'Email-1.0.3')
from email import __version__
self.assertEqual(req.get_version(), __version__)
self.assert_(req.version_ok('1.0.9'))
self.assert_(not req.version_ok('0.9.1'))
self.assert_(not req.version_ok('unknown'))
self.assert_(req.is_present())
self.assert_(req.is_current())
req = Require('Email 3000','03000','email',format=LooseVersion)
self.assert_(req.is_present())
self.assert_(not req.is_current())
self.assert_(not req.version_ok('unknown'))
req = Require('Do-what-I-mean','1.0','d-w-i-m')
self.assert_(not req.is_present())
self.assert_(not req.is_current())
req = Require('Tests', None, 'tests', homepage="http://example.com")
self.assertEqual(req.format, None)
self.assertEqual(req.attribute, None)
self.assertEqual(req.requested_version, None)
self.assertEqual(req.full_name(), 'Tests')
self.assertEqual(req.homepage, 'http://example.com')
paths = [os.path.dirname(p) for p in __path__]
self.assert_(req.is_present(paths))
self.assert_(req.is_current(paths))
class DistroTests(TestCase):
def setUp(self):
self.e1 = Extension('bar.ext',['bar.c'])
self.e2 = Extension('c.y', ['y.c'])
self.dist = makeSetup(
packages=['a', 'a.b', 'a.b.c', 'b', 'c'],
py_modules=['b.d','x'],
ext_modules = (self.e1, self.e2),
package_dir = {},
)
def testDistroType(self):
self.assert_(isinstance(self.dist,setuptools.dist.Distribution))
def testExcludePackage(self):
self.dist.exclude_package('a')
self.assertEqual(self.dist.packages, ['b','c'])
self.dist.exclude_package('b')
self.assertEqual(self.dist.packages, ['c'])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1, self.e2])
self.dist.exclude_package('c')
self.assertEqual(self.dist.packages, [])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1])
# test removals from unspecified options
makeSetup().exclude_package('x')
def testIncludeExclude(self):
# remove an extension
self.dist.exclude(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2])
# add it back in
self.dist.include(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
# should not add duplicate
self.dist.include(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
def testExcludePackages(self):
self.dist.exclude(packages=['c','b','a'])
self.assertEqual(self.dist.packages, [])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1])
def testEmpty(self):
dist = makeSetup()
dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
dist = makeSetup()
dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
def testContents(self):
self.assert_(self.dist.has_contents_for('a'))
self.dist.exclude_package('a')
self.assert_(not self.dist.has_contents_for('a'))
self.assert_(self.dist.has_contents_for('b'))
self.dist.exclude_package('b')
self.assert_(not self.dist.has_contents_for('b'))
self.assert_(self.dist.has_contents_for('c'))
self.dist.exclude_package('c')
self.assert_(not self.dist.has_contents_for('c'))
def testInvalidIncludeExclude(self):
self.assertRaises(DistutilsSetupError,
self.dist.include, nonexistent_option='x'
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, nonexistent_option='x'
)
self.assertRaises(DistutilsSetupError,
self.dist.include, packages={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, packages={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.include, ext_modules={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, ext_modules={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.include, package_dir=['q']
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, package_dir=['q']
)
class FeatureTests(TestCase):
def setUp(self):
self.req = Require('Distutils','1.0.3','distutils')
self.dist = makeSetup(
features={
'foo': Feature("foo",standard=True,require_features=['baz',self.req]),
'bar': Feature("bar", standard=True, packages=['pkg.bar'],
py_modules=['bar_et'], remove=['bar.ext'],
),
'baz': Feature(
"baz", optional=False, packages=['pkg.baz'],
scripts = ['scripts/baz_it'],
libraries=[('libfoo','foo/foofoo.c')]
),
'dwim': Feature("DWIM", available=False, remove='bazish'),
},
script_args=['--without-bar', 'install'],
packages = ['pkg.bar', 'pkg.foo'],
py_modules = ['bar_et', 'bazish'],
ext_modules = [Extension('bar.ext',['bar.c'])]
)
def testDefaults(self):
self.assert_(not
Feature(
"test",standard=True,remove='x',available=False
).include_by_default()
)
self.assert_(
Feature("test",standard=True,remove='x').include_by_default()
)
# Feature must have either kwargs, removes, or require_features
self.assertRaises(DistutilsSetupError, Feature, "test")
def testAvailability(self):
self.assertRaises(
DistutilsPlatformError,
self.dist.features['dwim'].include_in, self.dist
)
def testFeatureOptions(self):
dist = self.dist
self.assert_(
('with-dwim',None,'include DWIM') in dist.feature_options
)
self.assert_(
('without-dwim',None,'exclude DWIM (default)') in dist.feature_options
)
self.assert_(
('with-bar',None,'include bar (default)') in dist.feature_options
)
self.assert_(
('without-bar',None,'exclude bar') in dist.feature_options
)
self.assertEqual(dist.feature_negopt['without-foo'],'with-foo')
self.assertEqual(dist.feature_negopt['without-bar'],'with-bar')
self.assertEqual(dist.feature_negopt['without-dwim'],'with-dwim')
self.assert_(not 'without-baz' in dist.feature_negopt)
def testUseFeatures(self):
dist = self.dist
self.assertEqual(dist.with_foo,1)
self.assertEqual(dist.with_bar,0)
self.assertEqual(dist.with_baz,1)
self.assert_(not 'bar_et' in dist.py_modules)
self.assert_(not 'pkg.bar' in dist.packages)
self.assert_('pkg.baz' in dist.packages)
self.assert_('scripts/baz_it' in dist.scripts)
self.assert_(('libfoo','foo/foofoo.c') in dist.libraries)
self.assertEqual(dist.ext_modules,[])
self.assertEqual(dist.require_features, [self.req])
# If we ask for bar, it should fail because we explicitly disabled
# it on the command line
self.assertRaises(DistutilsOptionError, dist.include_feature, 'bar')
def testFeatureWithInvalidRemove(self):
self.assertRaises(
SystemExit, makeSetup, features = {'x':Feature('x', remove='y')}
)
class TestCommandTests(TestCase):
def testTestIsCommand(self):
test_cmd = makeSetup().get_command_obj('test')
self.assert_(isinstance(test_cmd, distutils.cmd.Command))
def testLongOptSuiteWNoDefault(self):
ts1 = makeSetup(script_args=['test','--test-suite=foo.tests.suite'])
ts1 = ts1.get_command_obj('test')
ts1.ensure_finalized()
self.assertEqual(ts1.test_suite, 'foo.tests.suite')
def testDefaultSuite(self):
ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test')
ts2.ensure_finalized()
self.assertEqual(ts2.test_suite, 'bar.tests.suite')
def testDefaultWModuleOnCmdLine(self):
ts3 = makeSetup(
test_suite='bar.tests',
script_args=['test','-m','foo.tests']
).get_command_obj('test')
ts3.ensure_finalized()
self.assertEqual(ts3.test_module, 'foo.tests')
self.assertEqual(ts3.test_suite, 'foo.tests.test_suite')
def testConflictingOptions(self):
ts4 = makeSetup(
script_args=['test','-m','bar.tests', '-s','foo.tests.suite']
).get_command_obj('test')
self.assertRaises(DistutilsOptionError, ts4.ensure_finalized)
def testNoSuite(self):
ts5 = makeSetup().get_command_obj('test')
ts5.ensure_finalized()
self.assertEqual(ts5.test_suite, None)
| apache-2.0 |
zxsted/scipy | benchmarks/benchmarks/interpolate.py | 37 | 6314 | from __future__ import division, absolute_import, print_function
import numpy as np
from .common import run_monitored, set_mem_rlimit, Benchmark
try:
from scipy.stats import spearmanr
except ImportError:
pass
try:
import scipy.interpolate as interpolate
except ImportError:
pass
class Leaks(Benchmark):
unit = "relative increase with repeats"
def track_leaks(self):
set_mem_rlimit()
# Setup temp file, make it fit in memory
repeats = [2, 5, 10, 50, 200]
peak_mems = []
for repeat in repeats:
code = """
import numpy as np
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range(%(repeat)d):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
""" % dict(repeat=repeat)
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
corr, p = spearmanr(repeats, peak_mems)
if p < 0.05:
print("*"*79)
print("PROBABLE MEMORY LEAK")
print("*"*79)
else:
print("PROBABLY NO MEMORY LEAK")
return max(peak_mems) / min(peak_mems)
class BenchPPoly(Benchmark):
def setup(self):
np.random.seed(1234)
m, k = 55, 3
x = np.sort(np.random.random(m+1))
c = np.random.random((3, m))
self.pp = interpolate.PPoly(c, x)
npts = 100
self.xp = np.linspace(0, 1, npts)
def time_evaluation(self):
self.pp(self.xp)
class GridData(Benchmark):
param_names = ['n_grids', 'method']
params = [
[10j, 100j, 1000j],
['nearest', 'linear', 'cubic']
]
def setup(self, n_grids, method):
self.func = lambda x, y: x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
self.grid_x, self.grid_y = np.mgrid[0:1:n_grids, 0:1:n_grids]
self.points = np.random.rand(1000, 2)
self.values = self.func(self.points[:,0], self.points[:,1])
def time_evaluation(self, n_grids, method):
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y), method=method)
class Interpolate1d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'],
]
def setup(self, n_samples, method):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
def time_interpolate(self, n_samples, method):
interpolate.interp1d(self.x, self.y, kind=method)
class Interpolate2d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'cubic', 'quintic'],
]
def setup(self, n_samples, method):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.arange(-r_samples, r_samples, 0.25)
self.xx, self.yy = np.meshgrid(self.x, self.y)
self.z = np.sin(self.xx**2+self.yy**2)
def time_interpolate(self, n_samples, method):
interpolate.interp2d(self.x, self.y, self.z, kind=method)
class Rbf(Benchmark):
param_names = ['n_samples', 'function']
params = [
[10, 50, 100],
['multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', 'thin_plate']
]
def setup(self, n_samples, function):
self.x = np.arange(n_samples)
self.y = np.sin(self.x)
r_samples = n_samples / 2.
self.X = np.arange(-r_samples, r_samples, 0.25)
self.Y = np.arange(-r_samples, r_samples, 0.25)
self.z = np.exp(-self.X**2-self.Y**2)
def time_rbf_1d(self, n_samples, function):
interpolate.Rbf(self.x, self.y, function=function)
def time_rbf_2d(self, n_samples, function):
interpolate.Rbf(self.X, self.Y, self.z, function=function)
class UnivariateSpline(Benchmark):
param_names = ['n_samples', 'degree']
params = [
[10, 50, 100],
[3, 4, 5]
]
def setup(self, n_samples, degree):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.exp(-self.x**2) + 0.1 * np.random.randn(*self.x.shape)
def time_univariate_spline(self, n_samples, degree):
interpolate.UnivariateSpline(self.x, self.y, k=degree)
class BivariateSpline(Benchmark):
"""
Author: josef-pktd and scipy mailinglist example
'http://scipy-user.10969.n7.nabble.com/BivariateSpline-examples\
-and-my-crashing-python-td14801.html'
"""
param_names = ['n_samples']
params = [
[10, 20, 30]
]
def setup(self, n_samples):
x = np.arange(0, n_samples, 0.5)
y = np.arange(0, n_samples, 0.5)
x, y = np.meshgrid(x, y)
x = x.ravel()
y = y.ravel()
xmin = x.min()-1
xmax = x.max()+1
ymin = y.min()-1
ymax = y.max()+1
s = 1.1
self.yknots = np.linspace(ymin+s,ymax-s,10)
self.xknots = np.linspace(xmin+s,xmax-s,10)
self.z = np.sin(x) + 0.1*np.random.normal(size=x.shape)
self.x = x
self.y = y
def time_smooth_bivariate_spline(self, n_samples):
interpolate.SmoothBivariateSpline(self.x, self.y, self.z)
def time_lsq_bivariate_spline(self, n_samples):
interpolate.LSQBivariateSpline(self.x, self.y, self.z, self.xknots.flat, self.yknots.flat)
class Interpolate(Benchmark):
"""
Linear Interpolate in scipy and numpy
"""
param_names = ['n_samples', 'module']
params = [
[10, 50, 100],
['numpy', 'scipy']
]
def setup(self, n_samples, module):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
self.z = np.random.normal(size=self.x.shape)
def time_interpolate(self, n_samples, module):
if module == 'scipy':
interpolate.interp1d(self.x, self.y, kind="linear")
else:
np.interp(self.z, self.x, self.y)
| bsd-3-clause |
mikelikespie/AutobahnTestSuite | autobahntestsuite/autobahntestsuite/case/case3_2.py | 12 | 1614 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case3_2(Case):
DESCRIPTION = """Send small text message, then send again with <b>RSV = 2</b>, then send Ping."""
EXPECTATION = """Echo for first message is received, but then connection is failed immediately, since RSV must be 0, when no extension defining RSV meaning has been negoiated. The Pong is not received."""
def onOpen(self):
payload = "Hello, world!"
self.expected[Case.OK] = [("message", payload, False)]
self.expected[Case.NON_STRICT] = []
self.expectedClose = {"closedByMe":False,"closeCode":[self.p.CLOSE_STATUS_CODE_PROTOCOL_ERROR],"requireClean":False}
self.p.sendFrame(opcode = 1, payload = payload)
self.p.sendFrame(opcode = 1, payload = payload, rsv = 2)
self.p.sendFrame(opcode = 9)
self.p.killAfter(1)
| apache-2.0 |
andreyvit/pyjamas | examples/toggle/Toggle.py | 6 | 2646 | """ testint our demo slider
"""
import pyjd # dummy in pyjs
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.ButtonBase import ButtonBase
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.ToggleButton import ToggleButton
from pyjamas.ui.PushButton import PushButton
from pyjamas import DOM
from pyjamas.ui.Image import Image
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
class Toggle:
def onModuleLoad(self):
self.label = Label("Not set yet")
self.button = Button("Probe button", self)
self.image_up = Image("./images/logo.png")
self.image_up3 = Image("./images/logo.png")
self.image_down = Image("./images/logo.png")
self.image_down3 = Image("./images/logo.png")
self.toggle = ToggleButton(self.image_up, self.image_down, self)
self.toggle2 = ToggleButton("up", "down", getattr(self, "onToggleUD"))
self.push = PushButton(self.image_up3, self.image_down3)
self.vpanel = VerticalPanel()
self.togglePanel = HorizontalPanel()
self.togglePanel.setSpacing(10)
self.togglePanel.add(self.toggle)
self.togglePanel.add(self.toggle2)
self.togglePanel.add(self.push)
self.vpanel.add(self.label)
self.vpanel.add(self.button)
self.vpanel.add(self.togglePanel)
RootPanel().add(self.vpanel)
self.i = False
def onToggleUD(self, sender):
self.label.setText(" Toggle2 isdown: "+str(self.toggle2.isDown()))
def onClick(self, sender):
if sender == self.button:
if self.i:
self.i = False
text = ">>>>UP<<<<"
self.toggle.setCurrentFace(self.toggle.getUpFace())
else:
self.i = True
text = ">>>DOWN<<<"
self.toggle.setCurrentFace(self.toggle.getDownFace())
#self.label.setText("self.toggle.style_name: "+
# self.toggle.style_name+", self.toggle.getStyleName():"+
# self.toggle.getStyleName()+" ")
self.label.setText(text)
elif sender == self.toggle:
text = ">>>DOWN<<<"
if self.i: text = ">>>>UP<<<<"
self.i = not self.i
self.label.setText(text+" isdown: "+str(self.toggle.isDown()))
if __name__ == "__main__":
pyjd.setup("./public/Toggle.html")
app = Toggle()
app.onModuleLoad()
pyjd.run()
| apache-2.0 |
vaygr/ansible | docs/bin/dump_keywords.py | 27 | 2748 | #!/usr/bin/env python
import optparse
import re
from distutils.version import LooseVersion
import jinja2
import yaml
from jinja2 import Environment, FileSystemLoader
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
template_file = 'playbooks_keywords.rst.j2'
oblist = {}
clist = []
class_list = [Play, Role, Block, Task]
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options]',
description='Generate playbook keyword documentation from code and descriptions',
)
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="../templates", help="directory containing Jinja2 templates")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files")
p.add_option("-d", "--docs-source", action="store", dest="docs", default=None, help="Source for attribute docs")
(options, args) = p.parse_args()
for aclass in class_list:
aobj = aclass()
name = type(aobj).__name__
if options.docs:
with open(options.docs) as f:
docs = yaml.safe_load(f)
else:
docs = {}
# build ordered list to loop over and dict with attributes
clist.append(name)
oblist[name] = dict((x, aobj.__dict__['_attributes'][x]) for x in aobj.__dict__['_attributes'] if 'private' not in x or not x.private)
# pick up docs if they exist
for a in oblist[name]:
if a in docs:
oblist[name][a] = docs[a]
else:
oblist[name][a] = ' UNDOCUMENTED!! '
# loop is really with_ for users
if name == 'Task':
oblist[name]['with_<lookup_plugin>'] = 'DEPRECATED: use ``loop`` instead, ``with_`` used to be how loops were defined, '
'it can use any available lookup plugin to generate the item list'
# local_action is implicit with action
if 'action' in oblist[name]:
oblist[name]['local_action'] = 'Same as action but also implies ``delegate_to: localhost``'
# remove unusable (used to be private?)
for nouse in ('loop_args'):
if nouse in oblist[name]:
del oblist[name][nouse]
env = Environment(loader=FileSystemLoader(options.template_dir), trim_blocks=True,)
template = env.get_template(template_file)
outputname = options.output_dir + template_file.replace('.j2', '')
tempvars = {'oblist': oblist, 'clist': clist}
keyword_page = template.render(tempvars)
if LooseVersion(jinja2.__version__) < LooseVersion('2.10'):
# jinja2 < 2.10's indent filter indents blank lines. Cleanup
keyword_page = re.sub(' +\n', '\n', keyword_page)
with open(outputname, 'w') as f:
f.write(keyword_page)
| gpl-3.0 |
alanjw/GreenOpenERP-Win-X86 | openerp/addons/document_webdav/cache.py | 67 | 1333 | import time
import heapq
def memoize(maxsize):
"""decorator to 'memoize' a function - caching its results"""
def decorating_function(f):
cache = {} # map from key to value
heap = [] # list of keys, in LRU heap
cursize = 0 # because len() is slow
def wrapper(*args):
key = repr(args)
# performance crap
_cache=cache
_heap=heap
_heappop = heapq.heappop
_heappush = heapq.heappush
_time = time.time
_cursize = cursize
_maxsize = maxsize
if not _cache.has_key(key):
if _cursize == _maxsize:
# pop oldest element
(_,oldkey) = _heappop(_heap)
_cache.pop(oldkey)
else:
_cursize += 1
# insert this element
_cache[key] = f(*args)
_heappush(_heap,(_time(),key))
wrapper.misses += 1
else:
wrapper.hits += 1
return cache[key]
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
wrapper.hits = wrapper.misses = 0
return wrapper
return decorating_function
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JackDanger/sentry | src/sentry/utils/managers.py | 40 | 1799 | """
sentry.utils.db
~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
class InstanceManager(object):
def __init__(self, class_list=None, instances=True):
if class_list is None:
class_list = []
self.instances = instances
self.update(class_list)
def get_class_list(self):
return self.class_list
def add(self, class_path):
self.cache = None
self.class_list.append(class_path)
def remove(self, class_path):
self.cache = None
self.class_list.remove(class_path)
def update(self, class_list):
"""
Updates the class list and wipes the cache.
"""
self.cache = None
self.class_list = class_list
def all(self):
"""
Returns a list of cached instances.
"""
class_list = list(self.get_class_list())
if not class_list:
self.cache = []
return []
if self.cache is not None:
return self.cache
results = []
for cls_path in class_list:
module_name, class_name = cls_path.rsplit('.', 1)
try:
module = __import__(module_name, {}, {}, class_name)
cls = getattr(module, class_name)
if self.instances:
results.append(cls())
else:
results.append(cls)
except Exception:
logger = logging.getLogger('sentry.errors')
logger.exception('Unable to import %s', cls_path)
continue
self.cache = results
return results
| bsd-3-clause |
WillChilds-Klein/mistress-mapreduce | mrs/master.py | 4 | 34507 | # Mrs
# Copyright 2008-2012 Brigham Young University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mrs Master"""
from __future__ import division, print_function
# Note that the actual backlog may be limited by the OS--in Linux see:
# /proc/sys/net/core/somaxconn (which seems to be 128 by default)
import collections
import os
import socket
import sys
import threading
import time
from . import http
from . import registry
from . import computed_data
from . import runner
from . import tasks
from .version import __version__
try:
from xmlrpc.client import Fault, ProtocolError
except ImportError:
from xmlrpclib import Fault, ProtocolError
import logging
logger = logging.getLogger('mrs')
del logging
# Python 3 compatibility
PY3 = sys.version_info[0] == 3
if not PY3:
range = xrange
INITIAL_PEON_THREADS = 4
MAX_PEON_THREADS = 20
class MasterRunner(runner.TaskRunner):
"""A TaskRunner that assigns tasks to remote slaves.
Attributes:
idle_slaves: a set of slaves that are ready to be assigned
result_maps: a dict mapping a dataset id to the corresponding result
map, which keeps track of which slaves produced which data
"""
def __init__(self, *args):
super(MasterRunner, self).__init__(*args)
self.slaves = None
self.idle_slaves = IdleSlaves()
self.dead_slaves = set()
self.result_maps = {}
self.rpc_interface = None
self.rpc_thread = None
self.sched_pipe = None
def run(self):
for i in range(INITIAL_PEON_THREADS):
self.start_peon_thread()
self.sched_timing_stats()
self.sched_pipe, sched_write_pipe = os.pipe()
self.event_loop.register_fd(self.sched_pipe, self.read_sched_pipe)
self.slaves = Slaves(sched_write_pipe, self.chore_queue,
self.opts.mrs__timeout, self.opts.mrs__pingdelay)
try:
self.start_rpc_server()
self.event_loop.run(timeout_function=self.maintain_chore_queue)
finally:
if self.opts.mrs__runfile:
# Rewrite the runfile with a hyphen to signify that
# execution is complete.
with open(self.opts.mrs__runfile, 'w') as f:
print('-', file=f)
self.slaves.disconnect_all()
return self.exitcode
def start_rpc_server(self):
program_hash = registry.object_hash(self.program_class)
self.rpc_interface = MasterInterface(self.slaves, program_hash,
self.opts, self.args, self.jobdir)
port = getattr(self.opts, 'mrs__port', 0)
rpc_server = http.ThreadingRPCServer(('', port), self.rpc_interface)
if port == 0:
port = rpc_server.server_address[1]
self.rpc_thread = threading.Thread(target=rpc_server.serve_forever,
name='RPC Server')
self.rpc_thread.daemon = True
self.rpc_thread.start()
logger.info('Listening on port %s.' % port)
if self.opts.mrs__runfile:
with open(self.opts.mrs__runfile, 'w') as f:
print(port, file=f)
def maintain_chore_queue(self):
"""Maintains the chore_queue and returns the timeout value for poll."""
chore_queue = self.chore_queue
timeleft = chore_queue.time_to_reschedule()
if (timeleft is not None) and (timeleft <= 0):
chore_queue.reschedule()
timeleft = chore_queue.time_to_reschedule()
return timeleft
def read_sched_pipe(self):
"""Reads currently available data from sched_pipe.
The actual data is ignored--the pipe is just a mechanism for alerting
the runner that the schedule method needs to be called.
"""
os.read(self.sched_pipe, 4096)
self.schedule()
def schedule(self):
"""Check for any changed slaves and make task assignments."""
for dataset_id, task_index in self.slaves.get_failed_tasks():
self.task_lost(dataset_id, task_index)
changed_slaves = self.slaves.get_changed_slaves()
results = self.slaves.get_results()
for slave in changed_slaves:
if slave.alive():
self.dead_slaves.discard(slave)
if not slave.busy():
logger.debug('Adding slave %s to idle_slaves.' % slave.id)
self.idle_slaves.add(slave)
else:
self.dead_slaves.add(slave)
self.idle_slaves.discard(slave)
assignment = slave.current_assignment()
if assignment is not None:
dataset_id, task_index = assignment
self.task_lost(dataset_id, task_index)
for slave, dataset_id, source, urls in results:
try:
self.result_maps[dataset_id].add(slave, source)
except KeyError:
# Dataset already deleted, so this source should be removed.
self.remove_sources(dataset_id, (slave, source), delete=True)
# Note: if this is the last task in the dataset, this will wake
# up datasets. Thus this happens _after_ slaves are added to
# the idle_slaves set.
success = self.task_done(dataset_id, source, urls)
if not success:
logger.info('Ignoring a redundant result (%s, %s).' %
(dataset_id, source))
# Add one peon thread for each new active slave (minus dead slaves).
if self.peon_thread_count < MAX_PEON_THREADS:
slave_count = len(self.slaves) - len(self.dead_slaves)
new_peon_thread_count = slave_count + INITIAL_PEON_THREADS
for i in range(new_peon_thread_count - self.peon_thread_count):
self.start_peon_thread()
chore_list = []
while self.idle_slaves:
# find the next job to run
next = self.next_task()
if next is None:
# TODO: duplicate currently assigned tasks here (while adding
# a mechanism for unassigning duplicate tasks once the result
# comes back).
break
dataset_id, source = next
dataset = self.datasets[dataset_id]
slave = None
if dataset.affinity:
# Slave-task affinity: when possible, assign to the slave that
# computed the task with the same source id in the input
# dataset.
input_id = dataset.input_id
try:
input_results = self.result_maps[input_id]
except KeyError:
input_results = None
if input_results is not None:
for s in input_results.get(source):
if s in self.idle_slaves:
self.idle_slaves.remove(s)
slave = s
break
if slave is None:
slave = self.idle_slaves.pop()
if slave.busy():
logger.error('Slave %s mistakenly in idle_slaves.' % slave.id)
self.task_lost(*next)
continue
slave.prepare_assignment(next, self.datasets)
chore_item = slave.send_assignment, ()
chore_list.append(chore_item)
self.chore_queue.do_many(chore_list)
def available_workers(self):
"""Returns the total number of idle workers."""
return len(self.idle_slaves)
def make_tasklist(self, dataset):
tasklist = super(MasterRunner, self).make_tasklist(dataset)
self.result_maps[dataset.id] = ResultMap()
return tasklist
def remove_dataset(self, ds):
if isinstance(ds, computed_data.ComputedData):
delete = not ds.permanent
slave_source_list = self.result_maps[ds.id].all()
self.remove_sources(ds.id, slave_source_list, delete)
del self.result_maps[ds.id]
super(MasterRunner, self).remove_dataset(ds)
def remove_sources(self, dataset_id, slave_source_list, delete):
"""Remove a single source from a slave.
The `slave_source_list` parameter is a list of slave-source pairs.
The `delete` parameter specifies whether the file should actually be
removed from disk.
"""
items = [(slave.remove, (dataset_id, source, delete))
for slave, source in slave_source_list]
self.chore_queue.do_many(items)
def sched_timing_stats(self):
if self.opts.mrs__timing_interval > 0:
self.chore_queue.do(self.do_timing_stats,
delay=self.opts.mrs__timing_interval)
def do_timing_stats(self):
self.timing_stats()
self.sched_timing_stats()
def debug_status(self):
super(MasterRunner, self).debug_status()
print('Idle slaves:', (', '.join(str(slave.id)
for slave in self.idle_slaves._all_slaves)), file=sys.stderr)
print('Dead slaves:', (', '.join(str(slave.id)
for slave in self.dead_slaves)), file=sys.stderr)
class ResultMap(object):
"""Track which slaves produced which sources in a single dataset."""
def __init__(self):
self._dict = collections.defaultdict(list)
def add(self, slave, source):
"""Records that the given slave computed the given source."""
self._dict[source].append(slave)
def get(self, source):
"""Returns the list of slaves that computed the given source."""
return self._dict[source]
def all(self):
"""Iterate over slave, source pairs."""
for source, slave_list in self._dict.items():
for slave in slave_list:
yield slave, source
class MasterInterface(object):
"""Public XML-RPC Interface
Note that any method beginning with "xmlrpc_" will be exposed to
remote hosts.
"""
def __init__(self, slaves, program_hash, opts, args, jobdir):
"""Initialize the master's RPC interface.
Requires `master`, `program_hash` (the result of registry.object_hash
on the program class), and `opts` (which is a optparse.Values instance
containing command-line arguments on the master.
"""
self.slaves = slaves
self.program_hash = program_hash
opts_iter = vars(opts).items()
self.opts_dict = dict((k, v) for k, v in opts_iter if v is not None)
self.args = args
self.jobdir = jobdir
def xmlrpc_whoami(self, request):
"""Return the host of the connecting client.
The client can't always tell which IP address they're actually using
from the server's perspective. This solves that problem.
"""
host = request.client.host
return host
@http.uses_host
def xmlrpc_signin(self, version, cookie, slave_port, program_hash,
host=None):
"""Slave reporting for duty.
It returns the slave_id and option dictionary. Returns
(-1, '', '', {}, []) if the signin is rejected.
"""
if version != __version__:
logger.warning('Slave tried to sign in with mismatched version.')
return -1, '', '', {}, []
if self.program_hash != program_hash:
# The slaves are running different code than the master is.
logger.warning('Slave tried to sign in with nonmatching code.')
return -1, '', '', {}, []
slave = self.slaves.new_slave(host, slave_port, cookie)
if slave is None:
logger.warning('Slave tried to sign in during shutdown.')
return -1, '', '', {}, []
logger.info('New slave %s on host %s' % (slave.id, host))
return (slave.id, host, self.jobdir, self.opts_dict, self.args)
@http.uses_host
def xmlrpc_ready(self, slave_id, cookie, host=None):
"""Slave is ready for work."""
slave = self.slaves.get_slave(slave_id, cookie)
if slave is not None:
logger.debug('Slave %s ready.' % slave_id)
slave.update_timestamp()
self.slaves.slave_ready(slave)
return True
else:
logger.error('Invalid slave reported ready (host %s, id %s).'
% (host, slave_id))
return False
@http.uses_host
def xmlrpc_done(self, slave_id, dataset_id, source, urls, cookie,
host=None):
"""Slave is done with the task it was working on.
The output is available in the list of urls.
"""
slave = self.slaves.get_slave(slave_id, cookie)
if slave is not None:
logger.debug('Slave %s reported completion of task: %s, %s'
% (slave_id, dataset_id, source))
slave.update_timestamp()
self.slaves.slave_result(slave, dataset_id, source, urls)
return True
else:
logger.error('Invalid slave reported done (host %s, id %s).'
% (host, slave_id))
return False
@http.uses_host
def xmlrpc_failed(self, slave_id, dataset_id, task_index, cookie,
host=None):
"""Slave failed to complete the task it was working on."""
slave = self.slaves.get_slave(slave_id, cookie)
if slave is not None:
logger.error('Slave %s reported failure of task: %s, %s'
% (slave_id, dataset_id, task_index))
slave.update_timestamp()
self.slaves.slave_failed(slave, dataset_id, task_index)
return True
else:
logger.error('Invalid slave reported failed (host %s, id %s).'
% (host, slave_id))
return False
@http.uses_host
def xmlrpc_ping(self, slave_id, cookie, host=None):
"""Slave checking if we're still here."""
slave = self.slaves.get_slave(slave_id, cookie)
if slave is not None:
logger.debug('Received a ping from slave %s.' % slave_id)
slave.update_timestamp()
return True
else:
logger.error('Invalid slave sent ping (host %s, id %s).'
% (host, slave_id))
return False
class RemoteSlave(object):
"""The master's view of a remote slave.
The master can use this object to make assignments, check status, etc.
"""
def __init__(self, slave_id, host, port, cookie, slaves):
self.id = slave_id
self.host = host
self.port = port
self.cookie = cookie
self.slaves = slaves
self.chore_queue = slaves.chore_queue
self.pingdelay = slaves.pingdelay
uri = "http://%s:%s" % (host, port)
self._rpc = http.TimeoutServerProxy(uri, slaves.rpc_timeout)
self._rpc_lock = threading.Lock()
self._assignment = None
self._assignment_lock = threading.Lock()
self._rpc_func = None
self._rpc_args = None
# The `_state` is either 'alive', 'failed', 'exiting', or 'exited'
self._state = 'alive'
# The pinging_active variable assures that only one ping "task" is
# active at a time.
self._pinging_active = False
self._pinglock = threading.Lock()
self._schedule_ping(self.pingdelay)
self.update_timestamp()
def check_cookie(self, cookie):
return (cookie == self.cookie)
def busy(self):
"""Indicates whether the slave has a current assignment."""
return (self._assignment is not None)
def pop_assignment(self):
"""Removes and returns the current assignment."""
with self._assignment_lock:
assignment = self._assignment
self._assignment = None
return assignment
def current_assignment(self):
"""Returns the current assignment.
Note that this could change in another thread (so be careful).
You usually want pop_assignment instead.
"""
return self._assignment
def set_assignment(self, new_assignment):
"""Sets the assignment to new_assignment.
Assumes that the assignment is currently None. Returns True if the
operation succeeds.
"""
with self._assignment_lock:
if self._assignment is None:
self._assignment = new_assignment
return True
else:
return False
def clear_assignment(self, old_assignment):
"""Sets the assignment to None.
Assumes that the assignment is currently `old_assignment` or `None`.
Returns True if the old assignment was set or False if it was None.
"""
with self._assignment_lock:
if self._assignment == old_assignment:
self._assignment = None
return True
elif self._assignment is None:
return False
else:
logger.error("clear_assignment called with an invalid argument")
return False
def prepare_assignment(self, assignment, datasets):
"""Sets up an RPC request to make the slave work on the assignment.
Called from the Runner. Note that the assignment will _not_ actually
happen until `send_assignment` is subsequently called. This is the
responsibility of the caller.
"""
success = self.set_assignment(assignment)
assert success
dataset_id, task_index = assignment
logger.debug('Assigning task to slave %s: %s, %s'
% (self.id, dataset_id, task_index))
dataset = datasets[dataset_id]
task = dataset.get_task(task_index, datasets, '')
task_args = task.to_args()
with self._rpc_lock:
assert self._rpc_args is None
self._rpc_func = self._rpc.start_task
self._rpc_args = task_args + (self.cookie,)
def send_assignment(self):
with self._rpc_lock:
if not self.alive():
logger.warning('Canceling RPC call because slave %s is no'
' longer alive.' % self.id)
return
logger.debug('Sending assignment to slave %s: %s, %s'
% (self.id, self._rpc_args[2], self._rpc_args[3]))
try:
success = self._rpc_func(*self._rpc_args)
except Fault as f:
logger.error('Fault in RPC to slave %s: %s' %
(self.id, f.faultString))
success = False
except ProtocolError as e:
logger.error('Protocol error in RPC to slave %s: %s' %
(self.id, e.errmsg))
success = False
except http.ConnectionFailed:
logger.error('Connection failed in RPC to slave %s' % self.id)
success = False
if success:
self.update_timestamp()
self._rpc_func = None
self._rpc_args = None
if not success:
logger.info('Failed to assign a task to slave %s.' % self.id)
self.critical_failure()
def remove(self, dataset_id, source, delete):
with self._rpc_lock:
if self._state not in ('alive', 'exiting'):
# Note: the master may disconnect the slave while remove
# requests are still pending--this isn't really a bad thing.
return
logger.debug('Sending remove request to slave %s: %s, %s'
% (self.id, dataset_id, source))
try:
self._rpc.remove(dataset_id, source, delete, self.cookie)
success = True
except Fault as f:
logger.error('Fault in remove call to slave %s: %s'
% (self.id, f.faultString))
success = False
except ProtocolError as e:
logger.error('Protocol error in remove call to slave %s: %s'
% (self.id, e.errmsg))
success = False
except http.ConnectionFailed:
logger.error('Connection failed in remove call to slave %s' %
self.id)
success = False
if success:
self.update_timestamp()
if not success:
logger.info('Failed to remove data on slave %s.' % self.id)
self.critical_failure()
def update_timestamp(self):
"""Set the timestamp to the current time."""
if self._state in ('exiting', 'exited'):
return
if self._state == 'failed':
logger.warning('Updating timestamp of the failed slave %s.'
% self.id)
self.resurrect()
self.timestamp = time.time()
def critical_failure(self):
"""Report that a slave had a critical failure.
Note that we can get multiple failures for one slave.
"""
if self._state == 'alive':
self._state = 'failed'
logger.error('Lost slave %s (%s).' % (self.id, self.host))
self.slaves.slave_dead(self)
def alive(self):
"""Checks whether the Slave is responding."""
return self._state == 'alive'
def exited(self):
"""Checks whether the Slave has been given an exit request."""
return self._state == 'exited'
def resurrect(self):
if self._state == 'failed':
logger.warning('Resurrected slave %s (%s)' %
(self.id, self.host))
with self._pinglock:
self._state = 'alive'
restart_pinging = not self._pinging_active
if restart_pinging:
self._pinging_active = True
if restart_pinging:
self._schedule_ping(self.pingdelay)
return True
else:
return False
def ping(self):
"""Ping the slave and schedule a follow-up ping."""
# The only place where we can change from not alive to alive is in
# resurrect, which holds the pinglock to ensure that we don't
# accidentally stop pinging during a resurrect.
if not self.alive():
with self._pinglock:
if not self.alive():
self._pinging_active = False
return
delta = time.time() - self.timestamp
if delta < self.pingdelay:
self._schedule_ping(self.pingdelay - delta)
return
if not self._rpc_lock.acquire(False):
# RPC socket busy; try again later.
self._schedule_ping(self.pingdelay)
return
try:
logger.debug('Sending ping to slave %s.' % self.id)
self._rpc.ping(self.cookie)
success = True
except Fault as f:
logger.error('Fault in ping to slave %s: %s'
% (self.id, f.faultString))
success = False
except ProtocolError as e:
logger.error('Protocol error in ping to slave %s: %s'
% (self.id, e.errmsg))
success = False
except http.ConnectionFailed:
logger.error('Connection failed in ping to slave %s' % self.id)
success = False
finally:
self._rpc_lock.release()
if success:
self.update_timestamp()
self._schedule_ping(self.pingdelay)
else:
# Mark pinging as inactive _before_ setting slave as failed.
self._pinging_active = False
self.critical_failure()
def _schedule_ping(self, delay=None):
"""Schedules a ping to occur after the given delay.
Ensures that the ping keeps repeating, i.e., when a ping finishes,
a new ping is immediately scheduled.
"""
logger.debug('Scheduling a ping to slave %s.' % self.id)
self._pinging_active = True
self.chore_queue.do(self.ping, delay=delay)
def disconnect(self, write_pipe=None):
"""Disconnect the slave by sending a quit request."""
if self._state not in ('exiting', 'exited'):
self._state = 'exiting'
self.chore_queue.do(self.send_exit, (write_pipe,))
def send_exit(self, write_pipe=None):
with self._rpc_lock:
try:
logger.debug('Sending a exit request to slave %s' % self.id)
self._rpc.exit(self.cookie)
except Fault as f:
logger.error('Fault in exit to slave %s: %s'
% (self.id, f.faultString))
except ProtocolError as e:
logger.error('Protocol error in exit to slave %s: %s'
% (self.id, e.errmsg))
except http.ConnectionFailed:
logger.error('Connection failed in exit to slave %s' % self.id)
self._state = 'exited'
self._rpc = None
if write_pipe is not None:
os.write(write_pipe, b'\0')
def __repr__(self):
return ('RemoteSlave(%s, %s, %s, %s, %s)' % (self.id, self.host,
self.port, self.cookie, repr(self.slaves)))
class Slaves(object):
"""List of remote slaves."""
def __init__(self, sched_pipe, chore_queue, rpc_timeout, pingdelay):
self._sched_pipe = sched_pipe
self.chore_queue = chore_queue
self.rpc_timeout = rpc_timeout
self.pingdelay = pingdelay
self._lock = threading.Lock()
self._next_slave_id = 0
self._slaves = {}
self._accepting_new_slaves = True
# Note that collections.deque is documented to be thread-safe.
self._changed_slaves = collections.deque()
self._results = collections.deque()
self._failed_tasks = collections.deque()
def trigger_sched(self):
"""Wakes up the runner for scheduling by sending it a byte."""
os.write(self._sched_pipe, b'\0')
def get_slave(self, slave_id, cookie):
"""Find the slave associated with the given slave_id."""
with self._lock:
slave = self._slaves.get(slave_id, None)
if slave is not None and slave.check_cookie(cookie):
return slave
else:
return None
def new_slave(self, host, slave_port, cookie):
"""Add and return a new slave.
Also set slave.id for the new slave. Note that the slave will not be
added to the idle queue until push_idle is called.
"""
with self._lock:
if not self._accepting_new_slaves:
return None
slave_id = self._next_slave_id
self._next_slave_id += 1
slave = RemoteSlave(slave_id, host, slave_port, cookie, self)
self._slaves[slave_id] = slave
return slave
def slave_ready(self, slave):
if not slave.alive():
if not slave.resurrect():
return
if slave.busy():
logger.error('Slave %s reported ready but has an assignment; '
'check the slave logs for errors.' % slave.id)
self._changed_slaves.append(slave)
self.trigger_sched()
def slave_result(self, slave, dataset_id, task_index, urls):
"""Called when a slave reports a successfully completed assignment.
Note that in the case of retried timeouts, this may be called multiple
times.
"""
success = slave.clear_assignment((dataset_id, task_index))
if success:
self._results.append((slave, dataset_id, task_index, urls))
self._changed_slaves.append(slave)
self.trigger_sched()
else:
logger.error("Ignoring a possibly duplicate slave_result call.")
def slave_failed(self, slave, dataset_id, task_index):
"""Called when a slave reports a failed assignment.
Note that in the case of retried timeouts, this may be called multiple
times.
"""
success = slave.clear_assignment((dataset_id, task_index))
if success:
self._failed_tasks.append((dataset_id, task_index))
self._changed_slaves.append(slave)
self.trigger_sched()
else:
logger.error("Ignoring a possibly duplicate slave_failed call.")
def slave_dead(self, slave):
self._changed_slaves.append(slave)
self.trigger_sched()
def get_results(self):
"""Return and reset the list of results: (taskid, urls) pairs."""
results = []
while True:
try:
results.append(self._results.popleft())
except IndexError:
return results
def get_changed_slaves(self):
"""Return and reset the list of changed slaves."""
changed = set()
while True:
try:
changed.add(self._changed_slaves.pop())
except IndexError:
return changed
def get_failed_tasks(self):
"""Return and reset the list of changed slaves."""
failed_tasks = []
while True:
try:
failed_tasks.append(self._failed_tasks.popleft())
except IndexError:
return failed_tasks
def disconnect_all(self):
"""Sends an exit request to the slaves and waits for completion."""
with self._lock:
self._accepting_new_slaves = False
# Each slave writes to the write_pipe when the disconnect completes.
read_pipe, write_pipe = os.pipe()
for slave in self._slaves.values():
slave.disconnect(write_pipe)
keep_going = False
for slave in self._slaves.values():
if not slave.exited():
keep_going = True
break
while keep_going:
# The actual data read is irrelevant--this just lets us block.
os.read(read_pipe, 4096)
keep_going = False
for slave in self._slaves.values():
if not slave.exited():
keep_going = True
break
def __len__(self):
"""Returns the total number of slaves (including dead slaves)."""
return len(self._slaves)
class IdleSlaves(object):
"""A priority-queue-like container of Slave objects.
Attributes:
_host_map: A map from a host to the corresponding set of slaves.
_counter: A dictionary that maps a count c to a set of hosts with c
idle slaves.
"""
def __init__(self):
self._host_map = collections.defaultdict(set)
self._counter = collections.defaultdict(set)
self._all_slaves = set()
self._max_count = 0
def add(self, slave):
host = slave.host
# Remove the host's slave set from the counter.
slave_set = self._host_map[host]
self._remove_from_counter(host, len(slave_set))
# Add the host to the slave_set.
slave_set.add(slave)
self._all_slaves.add(slave)
# Reinsert the slave_set with its new count.
self._add_to_counter(host, len(slave_set))
def remove(self, slave):
"""Remove the given slave from the set.
If it is not a member, raise a KeyError.
"""
self._all_slaves.remove(slave)
host = slave.host
# Find the host's slave set and remove it from the counter.
slave_set = self._host_map[host]
self._remove_from_counter(host, len(slave_set))
# Remove the host from the slave_set.
slave_set.remove(slave)
# Reinsert the slave_set with its new count.
self._add_to_counter(host, len(slave_set))
def discard(self, slave):
"""Remove the given slave from the set, if present."""
self._all_slaves.discard(slave)
host = slave.host
# Find the host's slave set and remove it from the counter.
slave_set = self._host_map[host]
self._remove_from_counter(host, len(slave_set))
# Discard the host from the slave_set.
slave_set.discard(slave)
# Reinsert the slave_set with its new count.
self._add_to_counter(host, len(slave_set))
def pop(self):
"""Remove and return a slave from the most idle host."""
# Find and remove a host with the maximum number of idle slaves.
counter_set = self._counter[self._max_count]
host = counter_set.pop()
if not counter_set:
self._max_count -= 1
# Pop a slave and reinsert the host (with its new count).
slave_set = self._host_map[host]
slave = slave_set.pop()
self._all_slaves.remove(slave)
self._add_to_counter(host, len(slave_set))
return slave
def _add_to_counter(self, host, host_size):
counter_set = self._counter[host_size]
counter_set.add(host)
if host_size > self._max_count:
self._max_count = host_size
def _consistency_check(self):
assert self._max_count >= 0
max_count = 0
for count in self._counter.keys():
if self._counter[count] and count > max_count:
max_count = count
assert self._max_count == max_count
def _remove_from_counter(self, host, host_size):
if host_size != 0:
counter_set = self._counter[host_size]
counter_set.remove(host)
if self._max_count == host_size and not counter_set:
self._max_count -= 1
def __nonzero__(self):
return self._max_count > 0
def __bool__(self):
return self._max_count > 0
def __contains__(self, slave):
return slave in self._all_slaves
def __len__(self):
return len(self._all_slaves)
# vim: et sw=4 sts=4
| apache-2.0 |
jwlawson/tensorflow | tensorflow/contrib/py2tf/pyct/compiler.py | 3 | 2011 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting AST to code.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(mdan): Use six for compatibility here.
import imp
import os
import tempfile
import astor
import gast
def ast_to_source(node, indentation):
"""Return the source code of given AST."""
if isinstance(node, gast.AST):
node = gast.gast_to_ast(node)
generator = astor.codegen.SourceGenerator(indentation, False,
astor.string_repr.pretty_string)
generator.visit(node)
generator.result.append('\n')
return astor.source_repr.pretty_source(generator.result).lstrip()
def ast_to_object(node, indentation=' '):
"""Return the Python objects represented by given AST.
Compiling the AST code this way ensures that the source code is readable by
e.g. `pdb` or `inspect`.
Args:
node: The code to compile, as an AST object.
indentation: The string to use for indentation.
Returns:
A module object containing the compiled source code.
"""
source = ast_to_source(node, indentation)
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
module_name = os.path.basename(f.name[:-3])
f.write(source)
return imp.load_source(module_name, f.name)
| apache-2.0 |
sbidoul/odoo | openerp/report/common.py | 457 | 3337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
pageSize = {
'A4': (210,297),
'A5': (148.5,105)
}
odt_namespace = {
"office":"{urn:oasis:names:tc:opendocument:xmlns:office:1.0}",
"style":"{urn:oasis:names:tc:opendocument:xmlns:style:1.0}",
"text":"{urn:oasis:names:tc:opendocument:xmlns:text:1.0}",
"table":"{urn:oasis:names:tc:opendocument:xmlns:table:1.0}",
"draw":"{urn:oasis:names:tc:opendocument:xmlns:drawing:1.0}",
"fo":"{urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0}",
"xlink":"{http://www.w3.org/1999/xlink}",
"dc":"{http://purl.org/dc/elements/1.1/}",
"meta":"{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}",
"number":"{urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0}",
"svg":"{urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0}",
"chart":"{urn:oasis:names:tc:opendocument:xmlns:chart:1.0}",
"dr3d":"{urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0}",
"math":"{http://www.w3.org/1998/Math/MathML}",
"form":"{urn:oasis:names:tc:opendocument:xmlns:form:1.0}",
"script":"{urn:oasis:names:tc:opendocument:xmlns:script:1.0}",
"ooo":"{http://openoffice.org/2004/office}",
"ooow":"{http://openoffice.org/2004/writer}",
"oooc":"{http://openoffice.org/2004/calc}",
"dom":"{http://www.w3.org/2001/xml-events}" }
sxw_namespace = {
"office":"{http://openoffice.org/2000/office}",
"style":"{http://openoffice.org/2000/style}",
"text":"{http://openoffice.org/2000/text}",
"table":"{http://openoffice.org/2000/table}",
"draw":"{http://openoffice.org/2000/drawing}",
"fo":"{http://www.w3.org/1999/XSL/Format}",
"xlink":"{http://www.w3.org/1999/xlink}",
"dc":"{http://purl.org/dc/elements/1.1/}",
"meta":"{http://openoffice.org/2000/meta}",
"number":"{http://openoffice.org/2000/datastyle}",
"svg":"{http://www.w3.org/2000/svg}",
"chart":"{http://openoffice.org/2000/chart}",
"dr3d":"{http://openoffice.org/2000/dr3d}",
"math":"{http://www.w3.org/1998/Math/MathML}",
"form":"{http://openoffice.org/2000/form}",
"script":"{http://openoffice.org/2000/script}",
"ooo":"{http://openoffice.org/2004/office}",
"ooow":"{http://openoffice.org/2004/writer}",
"oooc":"{http://openoffice.org/2004/calc}",
"dom":"{http://www.w3.org/2001/xml-events}"}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yeraydiazdiaz/nonrel-blog | django/contrib/gis/tests/geogapp/tests.py | 100 | 4075 | """
Tests for geography support in PostGIS 1.5+
"""
from __future__ import absolute_import
import os
from django.contrib.gis import gdal
from django.contrib.gis.measure import D
from django.test import TestCase
from .models import City, County, Zipcode
class GeographyTest(TestCase):
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.distance(htown.point)
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
if not gdal.HAS_GDAL: return
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name' : 'Name',
'state' : 'State',
'mpoly' : 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
| bsd-3-clause |
ryanmockabee/golfr | flask/lib/python3.6/site-packages/sqlalchemy/dialects/firebird/base.py | 33 | 28291 | # firebird/base.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: firebird
:name: Firebird
Firebird Dialects
-----------------
Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):
dialect 1
This is the old syntax and behaviour, inherited from Interbase pre-6.0.
dialect 3
This is the newer and supported syntax, introduced in Interbase 6.0.
The SQLAlchemy Firebird dialect detects these versions and
adjusts its representation of SQL accordingly. However,
support for dialect 1 is not well tested and probably has
incompatibilities.
Locking Behavior
----------------
Firebird locks tables aggressively. For this reason, a DROP TABLE may
hang until other transactions are released. SQLAlchemy does its best
to release transactions as quickly as possible. The most common cause
of hanging transactions is a non-fully consumed result set, i.e.::
result = engine.execute("select * from table")
row = result.fetchone()
return
Where above, the ``ResultProxy`` has not been fully consumed. The
connection will be returned to the pool and the transactional state
rolled back once the Python garbage collector reclaims the objects
which hold onto the connection, which often occurs asynchronously.
The above use case can be alleviated by calling ``first()`` on the
``ResultProxy`` which will fetch the first row and immediately close
all remaining cursor/connection resources.
RETURNING support
-----------------
Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
raises = empl.update().returning(empl.c.id, empl.c.salary).\
where(empl.c.sales>100).\
values(dict(salary=empl.c.salary * 1.1))
print raises.fetchall()
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
"""
import datetime
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, types as sqltypes, sql, util
from sqlalchemy.sql import expression
from sqlalchemy.engine import base, default, reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC,
SMALLINT, TEXT, TIME, TIMESTAMP, Integer)
RESERVED_WORDS = set([
"active", "add", "admin", "after", "all", "alter", "and", "any", "as",
"asc", "ascending", "at", "auto", "avg", "before", "begin", "between",
"bigint", "bit_length", "blob", "both", "by", "case", "cast", "char",
"character", "character_length", "char_length", "check", "close",
"collate", "column", "commit", "committed", "computed", "conditional",
"connect", "constraint", "containing", "count", "create", "cross",
"cstring", "current", "current_connection", "current_date",
"current_role", "current_time", "current_timestamp",
"current_transaction", "current_user", "cursor", "database", "date",
"day", "dec", "decimal", "declare", "default", "delete", "desc",
"descending", "disconnect", "distinct", "do", "domain", "double",
"drop", "else", "end", "entry_point", "escape", "exception",
"execute", "exists", "exit", "external", "extract", "fetch", "file",
"filter", "float", "for", "foreign", "from", "full", "function",
"gdscode", "generator", "gen_id", "global", "grant", "group",
"having", "hour", "if", "in", "inactive", "index", "inner",
"input_type", "insensitive", "insert", "int", "integer", "into", "is",
"isolation", "join", "key", "leading", "left", "length", "level",
"like", "long", "lower", "manual", "max", "maximum_segment", "merge",
"min", "minute", "module_name", "month", "names", "national",
"natural", "nchar", "no", "not", "null", "numeric", "octet_length",
"of", "on", "only", "open", "option", "or", "order", "outer",
"output_type", "overflow", "page", "pages", "page_size", "parameter",
"password", "plan", "position", "post_event", "precision", "primary",
"privileges", "procedure", "protected", "rdb$db_key", "read", "real",
"record_version", "recreate", "recursive", "references", "release",
"reserv", "reserving", "retain", "returning_values", "returns",
"revoke", "right", "rollback", "rows", "row_count", "savepoint",
"schema", "second", "segment", "select", "sensitive", "set", "shadow",
"shared", "singular", "size", "smallint", "snapshot", "some", "sort",
"sqlcode", "stability", "start", "starting", "starts", "statistics",
"sub_type", "sum", "suspend", "table", "then", "time", "timestamp",
"to", "trailing", "transaction", "trigger", "trim", "uncommitted",
"union", "unique", "update", "upper", "user", "using", "value",
"values", "varchar", "variable", "varying", "view", "wait", "when",
"where", "while", "with", "work", "write", "year",
])
class _StringType(sqltypes.String):
"""Base for Firebird string types."""
def __init__(self, charset=None, **kw):
self.charset = charset
super(_StringType, self).__init__(**kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Firebird VARCHAR type"""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
super(CHAR, self).__init__(length=length, **kwargs)
class _FBDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
colspecs = {
sqltypes.DateTime: _FBDateTime
}
ischema_names = {
'SHORT': SMALLINT,
'LONG': INTEGER,
'QUAD': FLOAT,
'FLOAT': FLOAT,
'DATE': DATE,
'TIME': TIME,
'TEXT': TEXT,
'INT64': BIGINT,
'DOUBLE': FLOAT,
'TIMESTAMP': TIMESTAMP,
'VARYING': VARCHAR,
'CSTRING': CHAR,
'BLOB': BLOB,
}
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_TEXT(self, type_, **kw):
return "BLOB SUB_TYPE 1"
def visit_BLOB(self, type_, **kw):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
charset = getattr(type_, 'charset', None)
if charset is None:
return basic
else:
return '%s CHARACTER SET %s' % (basic, charset)
def visit_CHAR(self, type_, **kw):
basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
return self._extend_string(type_, basic)
def visit_VARCHAR(self, type_, **kw):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
return self._extend_string(type_, basic)
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosyncrasies"""
ansi_bind_rules = True
# def visit_contains_op_binary(self, binary, operator, **kw):
# cant use CONTAINING b.c. it's case insensitive.
# def visit_notcontains_op_binary(self, binary, operator, **kw):
# cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return '%s STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return '%s NOT STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).\
visit_alias(alias, asfrom=asfrom, **kwargs)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = isinstance(alias.name,
expression._truncated_label) and \
self._truncated_identifier("alias",
alias.name) or alias.name
return self.process(
alias.original, asfrom=asfrom, **kwargs) + \
" " + \
self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0])
start = self.process(func.clauses.clauses[1])
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2])
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def visit_length_func(self, function, **kw):
if self.dialect._version_two:
return "char_length" + self.function_argspec(function)
else:
return "strlen" + self.function_argspec(function)
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr, **kw)
else:
return ""
def default_from(self):
return " FROM rdb$database"
def visit_sequence(self, seq):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
if select._limit_clause is not None:
result += "FIRST %s " % self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
result += "SKIP %s " % self.process(select._offset_clause, **kw)
if select._distinct:
result += "DISTINCT "
return result
def limit_clause(self, select, **kw):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
# no syntax for these
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplemented(
"Firebird SEQUENCE doesn't support START WITH")
if create.element.increment is not None:
raise NotImplemented(
"Firebird SEQUENCE doesn't support INCREMENT BY")
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
else:
return "CREATE GENERATOR %s" % \
self.preparer.format_sequence(create.element)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
else:
return "DROP GENERATOR %s" % \
self.preparer.format_sequence(drop.element)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(
['_'])
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database" %
self.dialect.identifier_preparer.format_sequence(seq),
type_
)
class FBDialect(default.DefaultDialect):
"""Firebird dialect"""
name = 'firebird'
max_identifier_length = 31
supports_sequences = True
sequences_optional = False
supports_default_values = True
postfetch_lastrowid = False
supports_native_boolean = False
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
type_compiler = FBTypeCompiler
execution_ctx_cls = FBExecutionContext
colspecs = colspecs
ischema_names = ischema_names
construct_arguments = []
# defaults to dialect ver. 3,
# will be autodetected off upon
# first connect
_version_two = True
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = ('firebird' in self.server_version_info and
self.server_version_info >= (2, )
) or \
('interbase' in self.server_version_info and
self.server_version_info >= (6, )
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
self.ischema_names['TIMESTAMP'] = sqltypes.DATE
self.colspecs = {
sqltypes.DateTime: sqltypes.DATE
}
self.implicit_returning = self._version_two and \
self.__dict__.get('implicit_returning', True)
def normalize_name(self, name):
# Remove trailing spaces: FB uses a CHAR() type,
# that is padded with spaces
name = name and name.rstrip()
if name is None:
return None
elif name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
elif name.lower() == name:
return quoted_name(name, quote=True)
else:
return name
def denormalize_name(self, name):
if name is None:
return None
elif name.lower() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.upper()
else:
return name
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
tblqry = """
SELECT 1 AS has_table FROM rdb$database
WHERE EXISTS (SELECT rdb$relation_name
FROM rdb$relations
WHERE rdb$relation_name=?)
"""
c = connection.execute(tblqry, [self.denormalize_name(table_name)])
return c.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
"""Return ``True`` if the given sequence (generator) exists."""
genqry = """
SELECT 1 AS has_sequence FROM rdb$database
WHERE EXISTS (SELECT rdb$generator_name
FROM rdb$generators
WHERE rdb$generator_name=?)
"""
c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
return c.first() is not None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# there are two queries commonly mentioned for this.
# this one, using view_blr, is at the Firebird FAQ among other places:
# http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
# the other query is this one. It's not clear if there's really
# any difference between these two. This link:
# http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
# states them as interchangeable. Some discussion at [ticket:2898]
# SELECT DISTINCT rdb$relation_name
# FROM rdb$relation_fields
# WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
# see http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is not null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
qry = """
SELECT rdb$view_source AS view_source
FROM rdb$relations
WHERE rdb$relation_name=?
"""
rp = connection.execute(qry, [self.denormalize_name(view_name)])
row = rp.first()
if row:
return row['view_source']
else:
return None
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
keyqry = """
SELECT se.rdb$field_name AS fname
FROM rdb$relation_constraints rc
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
"""
tablename = self.denormalize_name(table_name)
# get primary key fields
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()]
return {'constrained_columns': pkfields, 'name': None}
@reflection.cache
def get_column_sequence(self, connection,
table_name, column_name,
schema=None, **kw):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
genqry = """
SELECT trigdep.rdb$depended_on_name AS fgenerator
FROM rdb$dependencies tabdep
JOIN rdb$dependencies trigdep
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
AND trigdep.rdb$depended_on_type=14
AND trigdep.rdb$dependent_type=2
JOIN rdb$triggers trig ON
trig.rdb$trigger_name=tabdep.rdb$dependent_name
WHERE tabdep.rdb$depended_on_name=?
AND tabdep.rdb$depended_on_type=0
AND trig.rdb$trigger_type=1
AND tabdep.rdb$field_name=?
AND (SELECT count(*)
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
"""
genr = connection.execute(genqry, [tablename, colname]).first()
if genr is not None:
return dict(name=self.normalize_name(genr['fgenerator']))
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
f.rdb$field_length/
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
JOIN rdb$types t
ON t.rdb$type=f.rdb$field_type AND
t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON
f.rdb$character_set_id=cs.rdb$character_set_id
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
# get the PK, used to determine the eventual associated sequence
pk_constraint = self.get_pk_constraint(connection, table_name)
pkey_cols = pk_constraint['constrained_columns']
tablename = self.denormalize_name(table_name)
# get all of the fields for this table
c = connection.execute(tblqry, [tablename])
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = self.normalize_name(row['fname'])
orig_colname = row['fname']
# get the data type
colspec = row['ftype'].rstrip()
coltype = self.ischema_names.get(colspec)
if coltype is None:
util.warn("Did not recognize type '%s' of column '%s'" %
(colspec, name))
coltype = sqltypes.NULLTYPE
elif issubclass(coltype, Integer) and row['fprec'] != 0:
coltype = NUMERIC(
precision=row['fprec'],
scale=row['fscale'] * -1)
elif colspec in ('VARYING', 'CSTRING'):
coltype = coltype(row['flen'])
elif colspec == 'TEXT':
coltype = TEXT(row['flen'])
elif colspec == 'BLOB':
if row['stype'] == 1:
coltype = TEXT()
else:
coltype = BLOB()
else:
coltype = coltype()
# does it have a default value?
defvalue = None
if row['fdefault'] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row['fdefault'].lstrip()
assert defexpr[:8].rstrip().upper() == \
'DEFAULT', "Unrecognized default value: %s" % \
defexpr
defvalue = defexpr[8:].strip()
if defvalue == 'NULL':
# Redundant
defvalue = None
col_d = {
'name': name,
'type': coltype,
'nullable': not bool(row['null_flag']),
'default': defvalue,
'autoincrement': 'auto',
}
if orig_colname.lower() == orig_colname:
col_d['quote'] = True
# if the PK is a single field, try to see if its linked to
# a sequence thru a trigger
if len(pkey_cols) == 1 and name == pkey_cols[0]:
seq_d = self.get_column_sequence(connection, tablename, name)
if seq_d is not None:
col_d['sequence'] = seq_d
cols.append(col_d)
return cols
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the details of each UK/FK of the given table
fkqry = """
SELECT rc.rdb$constraint_name AS cname,
cse.rdb$field_name AS fname,
ix2.rdb$relation_name AS targetrname,
se.rdb$field_name AS targetfname
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
tablename = self.denormalize_name(table_name)
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(lambda: {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
})
for row in c:
cname = self.normalize_name(row['cname'])
fk = fks[cname]
if not fk['name']:
fk['name'] = cname
fk['referred_table'] = self.normalize_name(row['targetrname'])
fk['constrained_columns'].append(
self.normalize_name(row['fname']))
fk['referred_columns'].append(
self.normalize_name(row['targetfname']))
return list(fks.values())
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
qry = """
SELECT ix.rdb$index_name AS index_name,
ix.rdb$unique_flag AS unique_flag,
ic.rdb$field_name AS field_name
FROM rdb$indices ix
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, ic.rdb$field_position
"""
c = connection.execute(qry, [self.denormalize_name(table_name)])
indexes = util.defaultdict(dict)
for row in c:
indexrec = indexes[row['index_name']]
if 'name' not in indexrec:
indexrec['name'] = self.normalize_name(row['index_name'])
indexrec['column_names'] = []
indexrec['unique'] = bool(row['unique_flag'])
indexrec['column_names'].append(
self.normalize_name(row['field_name']))
return list(indexes.values())
| mit |
jotes/boto | boto/sts/credentials.py | 153 | 8210 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import datetime
import boto.utils
from boto.compat import json
class Credentials(object):
"""
:ivar access_key: The AccessKeyID.
:ivar secret_key: The SecretAccessKey.
:ivar session_token: The session token that must be passed with
requests to use the temporary credentials
:ivar expiration: The timestamp for when the credentials will expire
"""
def __init__(self, parent=None):
self.parent = parent
self.access_key = None
self.secret_key = None
self.session_token = None
self.expiration = None
self.request_id = None
@classmethod
def from_json(cls, json_doc):
"""
Create and return a new Session Token based on the contents
of a JSON document.
:type json_doc: str
:param json_doc: A string containing a JSON document with a
previously saved Credentials object.
"""
d = json.loads(json_doc)
token = cls()
token.__dict__.update(d)
return token
@classmethod
def load(cls, file_path):
"""
Create and return a new Session Token based on the contents
of a previously saved JSON-format file.
:type file_path: str
:param file_path: The fully qualified path to the JSON-format
file containing the previously saved Session Token information.
"""
fp = open(file_path)
json_doc = fp.read()
fp.close()
return cls.from_json(json_doc)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'AccessKeyId':
self.access_key = value
elif name == 'SecretAccessKey':
self.secret_key = value
elif name == 'SessionToken':
self.session_token = value
elif name == 'Expiration':
self.expiration = value
elif name == 'RequestId':
self.request_id = value
else:
pass
def to_dict(self):
"""
Return a Python dict containing the important information
about this Session Token.
"""
return {'access_key': self.access_key,
'secret_key': self.secret_key,
'session_token': self.session_token,
'expiration': self.expiration,
'request_id': self.request_id}
def save(self, file_path):
"""
Persist a Session Token to a file in JSON format.
:type path: str
:param path: The fully qualified path to the file where the
the Session Token data should be written. Any previous
data in the file will be overwritten. To help protect
the credentials contained in the file, the permissions
of the file will be set to readable/writable by owner only.
"""
fp = open(file_path, 'w')
json.dump(self.to_dict(), fp)
fp.close()
os.chmod(file_path, 0o600)
def is_expired(self, time_offset_seconds=0):
"""
Checks to see if the Session Token is expired or not. By default
it will check to see if the Session Token is expired as of the
moment the method is called. However, you can supply an
optional parameter which is the number of seconds of offset
into the future for the check. For example, if you supply
a value of 5, this method will return a True if the Session
Token will be expired 5 seconds from this moment.
:type time_offset_seconds: int
:param time_offset_seconds: The number of seconds into the future
to test the Session Token for expiration.
"""
now = datetime.datetime.utcnow()
if time_offset_seconds:
now = now + datetime.timedelta(seconds=time_offset_seconds)
ts = boto.utils.parse_ts(self.expiration)
delta = ts - now
return delta.total_seconds() <= 0
class FederationToken(object):
"""
:ivar credentials: A Credentials object containing the credentials.
:ivar federated_user_arn: ARN specifying federated user using credentials.
:ivar federated_user_id: The ID of the federated user using credentials.
:ivar packed_policy_size: A percentage value indicating the size of
the policy in packed form
"""
def __init__(self, parent=None):
self.parent = parent
self.credentials = None
self.federated_user_arn = None
self.federated_user_id = None
self.packed_policy_size = None
self.request_id = None
def startElement(self, name, attrs, connection):
if name == 'Credentials':
self.credentials = Credentials()
return self.credentials
else:
return None
def endElement(self, name, value, connection):
if name == 'Arn':
self.federated_user_arn = value
elif name == 'FederatedUserId':
self.federated_user_id = value
elif name == 'PackedPolicySize':
self.packed_policy_size = int(value)
elif name == 'RequestId':
self.request_id = value
else:
pass
class AssumedRole(object):
"""
:ivar user: The assumed role user.
:ivar credentials: A Credentials object containing the credentials.
"""
def __init__(self, connection=None, credentials=None, user=None):
self._connection = connection
self.credentials = credentials
self.user = user
def startElement(self, name, attrs, connection):
if name == 'Credentials':
self.credentials = Credentials()
return self.credentials
elif name == 'AssumedRoleUser':
self.user = User()
return self.user
def endElement(self, name, value, connection):
pass
class User(object):
"""
:ivar arn: The arn of the user assuming the role.
:ivar assume_role_id: The identifier of the assumed role.
"""
def __init__(self, arn=None, assume_role_id=None):
self.arn = arn
self.assume_role_id = assume_role_id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Arn':
self.arn = value
elif name == 'AssumedRoleId':
self.assume_role_id = value
class DecodeAuthorizationMessage(object):
"""
:ivar request_id: The request ID.
:ivar decoded_message: The decoded authorization message (may be JSON).
"""
def __init__(self, request_id=None, decoded_message=None):
self.request_id = request_id
self.decoded_message = decoded_message
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'requestId':
self.request_id = value
elif name == 'DecodedMessage':
self.decoded_message = value
| mit |
MisterTea/HyperNEAT | boost_1_57_0/libs/python/pyste/tests/GCCXMLParserUT.py | 13 | 11736 | # Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import sys
sys.path.append('../src')
import unittest
import tempfile
import os.path
from Pyste import GCCXMLParser
from Pyste.declarations import *
class Tester(unittest.TestCase):
def TestConstructor(self, class_, method, visib):
self.assert_(isinstance(method, Constructor))
self.assertEqual(method.FullName(), class_.FullName() + '::' + method.name)
self.assertEqual(method.result, None)
self.assertEqual(method.visibility, visib)
self.assert_(not method.virtual)
self.assert_(not method.abstract)
self.assert_(not method.static)
def TestDefaultConstructor(self, class_, method, visib):
self.TestConstructor(class_, method, visib)
self.assert_(method.IsDefault())
def TestCopyConstructor(self, class_, method, visib):
self.TestConstructor(class_, method, visib)
self.assertEqual(len(method.parameters), 1)
param = method.parameters[0]
self.TestType(
param,
ReferenceType,
class_.FullName(),
'const %s&' % class_.FullName(),
True)
self.assert_(method.IsCopy())
def TestType(self, type_, classtype_, name, fullname, const):
self.assert_(isinstance(type_, classtype_))
self.assertEqual(type_.name, name)
self.assertEqual(type_.namespace, None)
self.assertEqual(type_.FullName(), fullname)
self.assertEqual(type_.const, const)
class ClassBaseTest(Tester):
def setUp(self):
self.base = GetDecl('Base')
def testClass(self):
'test the properties of the class Base'
self.assert_(isinstance(self.base, Class))
self.assert_(self.base.abstract)
def testFoo(self):
'test function foo in class Base'
foo = GetMember(self.base, 'foo')
self.assert_(isinstance(foo, Method))
self.assertEqual(foo.visibility, Scope.public)
self.assert_(foo.virtual)
self.assert_(foo.abstract)
self.failIf(foo.static)
self.assertEqual(foo.class_, 'test::Base')
self.failIf(foo.const)
self.assertEqual(foo.FullName(), 'test::Base::foo')
self.assertEqual(foo.result.name, 'void')
self.assertEqual(len(foo.parameters), 1)
param = foo.parameters[0]
self.TestType(param, FundamentalType, 'int', 'int', False)
self.assertEqual(foo.namespace, None)
self.assertEqual(
foo.PointerDeclaration(1), '(void (test::Base::*)(int) )&test::Base::foo')
def testX(self):
'test the member x in class Base'
x = GetMember(self.base, 'x')
self.assertEqual(x.class_, 'test::Base')
self.assertEqual(x.FullName(), 'test::Base::x')
self.assertEqual(x.namespace, None)
self.assertEqual(x.visibility, Scope.private)
self.TestType(x.type, FundamentalType, 'int', 'int', False)
self.assertEqual(x.static, False)
def testConstructors(self):
'test constructors in class Base'
constructors = GetMembers(self.base, 'Base')
for cons in constructors:
if len(cons.parameters) == 0:
self.TestDefaultConstructor(self.base, cons, Scope.public)
elif len(cons.parameters) == 1: # copy constructor
self.TestCopyConstructor(self.base, cons, Scope.public)
elif len(cons.parameters) == 2: # other constructor
intp, floatp = cons.parameters
self.TestType(intp, FundamentalType, 'int', 'int', False)
self.TestType(floatp, FundamentalType, 'float', 'float', False)
def testSimple(self):
'test function simple in class Base'
simple = GetMember(self.base, 'simple')
self.assert_(isinstance(simple, Method))
self.assertEqual(simple.visibility, Scope.protected)
self.assertEqual(simple.FullName(), 'test::Base::simple')
self.assertEqual(len(simple.parameters), 1)
param = simple.parameters[0]
self.TestType(param, ReferenceType, 'std::string', 'const std::string&', True)
self.TestType(simple.result, FundamentalType, 'bool', 'bool', False)
self.assertEqual(
simple.PointerDeclaration(1),
'(bool (test::Base::*)(const std::string&) )&test::Base::simple')
def testZ(self):
z = GetMember(self.base, 'z')
self.assert_(isinstance(z, Variable))
self.assertEqual(z.visibility, Scope.public)
self.assertEqual(z.FullName(), 'test::Base::z')
self.assertEqual(z.type.name, 'int')
self.assertEqual(z.type.const, False)
self.assert_(z.static)
class ClassTemplateTest(Tester):
def setUp(self):
self.template = GetDecl('Template<int>')
def testClass(self):
'test the properties of the Template<int> class'
self.assert_(isinstance(self.template, Class))
self.assert_(not self.template.abstract)
self.assertEqual(self.template.FullName(), 'Template<int>')
self.assertEqual(self.template.namespace, '')
self.assertEqual(self.template.name, 'Template<int>')
def testConstructors(self):
'test the automatic constructors of the class Template<int>'
constructors = GetMembers(self.template, 'Template')
for cons in constructors:
if len(cons.parameters) == 0:
self.TestDefaultConstructor(self.template, cons, Scope.public)
elif len(cons.parameters) == 1:
self.TestCopyConstructor(self.template, cons, Scope.public)
def testValue(self):
'test the class variable value'
value = GetMember(self.template, 'value')
self.assert_(isinstance(value, ClassVariable))
self.assert_(value.name, 'value')
self.TestType(value.type, FundamentalType, 'int', 'int', False)
self.assert_(not value.static)
self.assertEqual(value.visibility, Scope.public)
self.assertEqual(value.class_, 'Template<int>')
self.assertEqual(value.FullName(), 'Template<int>::value')
def testBase(self):
'test the superclasses of Template<int>'
bases = self.template.bases
self.assertEqual(len(bases), 1)
base = bases[0]
self.assert_(isinstance(base, Base))
self.assertEqual(base.name, 'test::Base')
self.assertEqual(base.visibility, Scope.protected)
class FreeFuncTest(Tester):
def setUp(self):
self.func = GetDecl('FreeFunc')
def testFunc(self):
'test attributes of FreeFunc'
self.assert_(isinstance(self.func, Function))
self.assertEqual(self.func.name, 'FreeFunc')
self.assertEqual(self.func.FullName(), 'test::FreeFunc')
self.assertEqual(self.func.namespace, 'test')
self.assertEqual(
self.func.PointerDeclaration(1),
'(const test::Base& (*)(const std::string&, int))&test::FreeFunc')
def testResult(self):
'test the return value of FreeFunc'
res = self.func.result
self.TestType(res, ReferenceType, 'test::Base', 'const test::Base&', True)
def testParameters(self):
'test the parameters of FreeFunc'
self.assertEqual(len(self.func.parameters), 2)
strp, intp = self.func.parameters
self.TestType(strp, ReferenceType, 'std::string', 'const std::string&', True)
self.assertEqual(strp.default, None)
self.TestType(intp, FundamentalType, 'int', 'int', False)
self.assertEqual(intp.default, '10')
class testFunctionPointers(Tester):
def testMethodPointer(self):
'test declaration of a pointer-to-method'
meth = GetDecl('MethodTester')
param = meth.parameters[0]
fullname = 'void (test::Base::*)(int)'
self.TestType(param, PointerType, fullname, fullname, False)
def testFunctionPointer(self):
'test declaration of a pointer-to-function'
func = GetDecl('FunctionTester')
param = func.parameters[0]
fullname = 'void (*)(int)'
self.TestType(param, PointerType, fullname, fullname, False)
# =============================================================================
# Support routines
# =============================================================================
cppcode = '''
namespace std {
class string;
}
namespace test {
class Base
{
public:
Base();
Base(const Base&);
Base(int, float);
virtual void foo(int = 0.0) = 0;
static int z;
protected:
bool simple(const std::string&);
private:
int x;
};
void MethodTester( void (Base::*)(int) );
void FunctionTester( void (*)(int) );
const Base & FreeFunc(const std::string&, int=10);
}
template <class T>
struct Template: protected test::Base
{
T value;
virtual void foo(int);
};
Template<int> __aTemplateInt;
'''
def GetXMLFile():
'''Generates an gccxml file using the code from the global cppcode.
Returns the xml's filename.'''
# write the code to a header file
tmpfile = tempfile.mktemp() + '.h'
f = file(tmpfile, 'w')
f.write(cppcode)
f.close()
# run gccxml
outfile = tmpfile + '.xml'
if os.system('gccxml "%s" "-fxml=%s"' % (tmpfile, outfile)) != 0:
raise RuntimeError, 'Error executing GCCXML.'
# read the output file into the xmlcode
f = file(outfile)
xmlcode = f.read()
#print xmlcode
f.close()
# remove the header
os.remove(tmpfile)
return outfile
def GetDeclarations():
'Uses the GCCXMLParser module to get the declarations.'
xmlfile = GetXMLFile()
declarations = GCCXMLParser.ParseDeclarations(xmlfile)
os.remove(xmlfile)
return declarations
# the declarations to be analysed
declarations = GetDeclarations()
def GetDecl(name):
'returns one of the top declarations given its name'
for decl in declarations:
if decl.name == name:
return decl
else:
raise RuntimeError, 'Declaration not found: %s' % name
def GetMember(class_, name):
'gets the member of the given class by its name'
res = None
multipleFound = False
for member in class_:
if member.name == name:
if res is not None:
multipleFound = True
break
res = member
if res is None or multipleFound:
raise RuntimeError, \
'No member or more than one member found in class %s: %s' \
% (class_.name, name)
return res
def GetMembers(class_, name):
'gets the members of the given class by its name'
res = []
for member in class_:
if member.name == name:
res.append(member)
if len(res) in (0, 1):
raise RuntimeError, \
'GetMembers: 0 or 1 members found in class %s: %s' \
% (class_.name, name)
return res
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Project-Bonfire/KOIT | Scripts/include/Help_note.py | 3 | 5439 | from Scripts.include.helper_func import *
def print_help(argv, program_argv):
"""
Gets the program arguments and prints manual for the program.
"""
print BOLD + "Usage:" + ENDC
print
print BOLD + OKBLUE + " Network parameters:" + ENDC
print
print BOLD + " -plasma_with_fpu:" + ENDC
print "\tUse Plasma with FPU, instead of normal Plasma "# + str(program_argv['plasma_with_fpu']) + "."
print
print BOLD + " -D [size]:" + ENDC
print "\tMake a network of [size]X[size]. Size can be only multiples of two. " \
+ "Default value is " + str(program_argv['network_dime']) + "."
print
print BOLD + " -P:" + ENDC
print "\tAdd parity to each router input. " \
+ "Default is " + str(program_argv['add_parity']) + "."
print
print BOLD + " -checkers:" + ENDC
print "\tAdd control part checkers to the router (for FIFO, Arbiter and LBDR). Checker outputs are not taken to the output interface. " \
+ "Default is " + str(program_argv['add_checkers']) + "."
print
print BOLD + " -NI [depth]:" + ENDC
print "\tAdd a network interface with size [depth] to each router's local port. " \
+ "Default is " + str(program_argv['add_NI']) + "."
print BOLD + " -NI_Test" + ENDC
print "\tAdds a network interface with size to each router's local port and connects a traffic generator to it instead of a PE. " \
+ "Default is " + str(program_argv['NI_Test']) + "."\
+"should be used with -SHMU"
print
print BOLD + " -FI:" + ENDC
print "\tAdd fault injector units to all the links (except the local) in the network. " \
+ "Default is " + str(program_argv['add_FI']) + "."
print
print BOLD + " -FC:" + ENDC
print "\tAdd fault classifier units to all the links (except the local) in the network. " \
+ "Default is " + str(program_argv['add_FC']) + "."
print
print BOLD + " -packet_drop:" + ENDC
print "\tAdd packet dropping capability to FIFO in case of fault injection. " \
+ "Default is " + str(program_argv['packet_drop']) + "."
print BOLD + " -packet_saving:" + ENDC
print "\tAdd advance packet dropping capability to FIFO in case of fault injection." \
+ "Default is " + str(program_argv['packet_drop']) + "."
print
print
print BOLD + OKBLUE + " Simulation parameters:" + ENDC
print BOLD + " -Rand [PIR]:" + ENDC
print "\tUse Random traffic pattern generator with packet injection rate equal to PIR (value between 0 and 1)."
print "\t\t**Cannot be used togeter with -BR.**"
print "\t\tIf neither -Rand or -BR is specified, system defaults to repeatative packets being sent from source to same destination."
print
print BOLD + " -BR [PIR]:" + ENDC
print "\tUse Bit Reversal traffic pattern generator with packet injection rate equal to PIR (value between 0 and 1)."
print "\t\t**Cannot be used togeter with -Rand.**"
print "\t\tIf neither -Rand or -BR is specified, system defaults to repeatative packets being sent from source to same destination."
print
print BOLD + " -PS [min] [max]:" + ENDC
print "\tSpecify packet size range. Default min value is 3 and defualt max value is 8."
print
print BOLD + " -sim [sim_time]:" + ENDC
print "\tSpecifies the length of simulation in clock cycles. which at this time the packet generators will stop sending packets."
print
print BOLD + " -end [end_time]:" + ENDC
print "\tSpecifies the length of simulation in nanoseconds. After this time the simulation will be stopped, even if the packet injection is still active."
print "\tIf this parameter is not specified, but -sim is specified, the default value is 1.5*sim."
print "\tIf this parameter is not specified and -sim is also not specified, it defaults to 15000 ns."
print
print BOLD + " -lat:" + ENDC
print "\tCalculate the average packet latency (in terms of clock cycles). Disables Modelsim GUI."
print
print
print BOLD + OKBLUE + " Other parameters:" + ENDC
print BOLD + " --debug:" + ENDC
print "\tIncrease verbocity of the script. Useful for debugging."
print BOLD + " --trace:" + ENDC
print "\tadds packet tracing."
print
print
print BOLD + OKBLUE + " Examples:" + ENDC
print BOLD + " Example 1:" + ENDC
print "\t " + argv[0] + " -D 4 -credit_based_FC -FC -FI -Rand 0.1 -PS 8 8 -sim 10000 -end 11000"
print "\t\tSimulates a 4X4 network "
print "\t\tflow control mechanism is credit-based with fault classfiers support"
print "\t\twith Fault Injection(40-60 clock cycle injection, i.e approx. 16 million faults per second) "
print "\t\talso generates a testbench which uses random traffic pattern generator with Packet Injection Rate of 0.1"
print "\t\tand fixed packet size of 8 and sends packets until 10000 ns and ends simulation at 11000 ns"
print BOLD + " Example 2:" + ENDC
print "\t " + argv[0] + " -D 2 -credit_based_FC -packet_drop -FC -NI_Test -SHMU -Rand 0.01 -PS 8 8 -sim 10000 -end 11000"
print "\t\tSimulates a 2X2 network "
print "\t\tflow control mechanism is credit-based with packet_drop based FIFO and LBDR and fault classfiers support"
print "\t\tAlso uses NI testers which mimic the behaviour of plasma but are much faster!"
print "\t\tIt will also have SHMU capabilities handled by the NI tester procedure"
| gpl-3.0 |
4eek/edx-platform | lms/djangoapps/discussion_api/tests/test_permissions.py | 81 | 4970 | """
Tests for discussion API permission logic
"""
import itertools
import ddt
from discussion_api.permissions import (
can_delete,
get_editable_fields,
get_initializable_comment_fields,
get_initializable_thread_fields,
)
from lms.lib.comment_client.comment import Comment
from lms.lib.comment_client.thread import Thread
from lms.lib.comment_client.user import User
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
def _get_context(requester_id, is_requester_privileged, is_cohorted=False, thread=None):
"""Return a context suitable for testing the permissions module"""
return {
"cc_requester": User(id=requester_id),
"is_requester_privileged": is_requester_privileged,
"course": CourseFactory(cohort_config={"cohorted": is_cohorted}),
"thread": thread,
}
@ddt.ddt
class GetInitializableFieldsTest(ModuleStoreTestCase):
"""Tests for get_*_initializable_fields"""
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_thread(self, is_privileged, is_cohorted):
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
is_cohorted=is_cohorted
)
actual = get_initializable_thread_fields(context)
expected = {
"abuse_flagged", "course_id", "following", "raw_body", "title", "topic_id", "type", "voted"
}
if is_privileged and is_cohorted:
expected |= {"group_id"}
self.assertEqual(actual, expected)
@ddt.data(*itertools.product([True, False], ["question", "discussion"], [True, False]))
@ddt.unpack
def test_comment(self, is_thread_author, thread_type, is_privileged):
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
thread=Thread(user_id="5" if is_thread_author else "6", thread_type=thread_type)
)
actual = get_initializable_comment_fields(context)
expected = {
"abuse_flagged", "parent_id", "raw_body", "thread_id", "voted"
}
if (is_thread_author and thread_type == "question") or is_privileged:
expected |= {"endorsed"}
self.assertEqual(actual, expected)
@ddt.ddt
class GetEditableFieldsTest(ModuleStoreTestCase):
"""Tests for get_editable_fields"""
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_thread(self, is_author, is_privileged, is_cohorted):
thread = Thread(user_id="5" if is_author else "6", type="thread")
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
is_cohorted=is_cohorted
)
actual = get_editable_fields(thread, context)
expected = {"abuse_flagged", "following", "voted"}
if is_author or is_privileged:
expected |= {"topic_id", "type", "title", "raw_body"}
if is_privileged and is_cohorted:
expected |= {"group_id"}
self.assertEqual(actual, expected)
@ddt.data(*itertools.product([True, False], [True, False], ["question", "discussion"], [True, False]))
@ddt.unpack
def test_comment(self, is_author, is_thread_author, thread_type, is_privileged):
comment = Comment(user_id="5" if is_author else "6", type="comment")
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
thread=Thread(user_id="5" if is_thread_author else "6", thread_type=thread_type)
)
actual = get_editable_fields(comment, context)
expected = {"abuse_flagged", "voted"}
if is_author or is_privileged:
expected |= {"raw_body"}
if (is_thread_author and thread_type == "question") or is_privileged:
expected |= {"endorsed"}
self.assertEqual(actual, expected)
@ddt.ddt
class CanDeleteTest(ModuleStoreTestCase):
"""Tests for can_delete"""
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_thread(self, is_author, is_privileged):
thread = Thread(user_id="5" if is_author else "6")
context = _get_context(requester_id="5", is_requester_privileged=is_privileged)
self.assertEqual(can_delete(thread, context), is_author or is_privileged)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_comment(self, is_author, is_thread_author, is_privileged):
comment = Comment(user_id="5" if is_author else "6")
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
thread=Thread(user_id="5" if is_thread_author else "6")
)
self.assertEqual(can_delete(comment, context), is_author or is_privileged)
| agpl-3.0 |
fnordahl/nova | nova/tests/unit/test_api_validation.py | 7 | 43908 | # Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
from nova.api.openstack import api_version_request as api_version
from nova.api import validation
from nova.api.validation import parameter_types
from nova import exception
from nova import test
class FakeRequest(object):
api_version_request = api_version.APIVersionRequest("2.1")
environ = {}
legacy_v2 = False
def is_legacy_v2(self):
return self.legacy_v2
class APIValidationTestCase(test.NoDBTestCase):
def check_validation_error(self, method, body, expected_detail, req=None):
if not req:
req = FakeRequest()
try:
method(body=body, req=req,)
except exception.ValidationError as ex:
self.assertEqual(400, ex.kwargs['code'])
if not re.match(expected_detail, ex.kwargs['detail']):
self.assertEqual(expected_detail, ex.kwargs['detail'],
'Exception details did not match expected')
except Exception as ex:
self.fail('An unexpected exception happens: %s' % ex)
else:
self.fail('Any exception does not happen.')
class MicroversionsSchemaTestCase(APIValidationTestCase):
def setUp(self):
super(MicroversionsSchemaTestCase, self).setUp()
schema_v21_int = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
}
}
}
schema_v20_str = copy.deepcopy(schema_v21_int)
schema_v20_str['properties']['foo'] = {'type': 'string'}
@validation.schema(schema_v20_str, '2.0', '2.0')
@validation.schema(schema_v21_int, '2.1')
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_v2compatible_request(self):
req = FakeRequest()
req.legacy_v2 = True
self.assertEqual(self.post(body={'foo': 'bar'}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Value: 1. "
"1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail, req=req)
def test_validate_v21_request(self):
req = FakeRequest()
self.assertEqual(self.post(body={'foo': 1}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Value: bar. "
"'bar' is not of type 'integer'")
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail, req=req)
class RequiredDisableTestCase(APIValidationTestCase):
def setUp(self):
super(RequiredDisableTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_required_disable(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'abc': 1}, req=FakeRequest()),
'Validation succeeded.')
class RequiredEnableTestCase(APIValidationTestCase):
def setUp(self):
super(RequiredEnableTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo']
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_required_enable(self):
self.assertEqual(self.post(body={'foo': 1},
req=FakeRequest()), 'Validation succeeded.')
def test_validate_required_enable_fails(self):
detail = "'foo' is a required property"
self.check_validation_error(self.post, body={'abc': 1},
expected_detail=detail)
class AdditionalPropertiesEnableTestCase(APIValidationTestCase):
def setUp(self):
super(AdditionalPropertiesEnableTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo'],
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_additionalProperties_enable(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': 1, 'ext': 1},
req=FakeRequest()),
'Validation succeeded.')
class AdditionalPropertiesDisableTestCase(APIValidationTestCase):
def setUp(self):
super(AdditionalPropertiesDisableTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo'],
'additionalProperties': False,
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_additionalProperties_disable(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
def test_validate_additionalProperties_disable_fails(self):
detail = "Additional properties are not allowed ('ext' was unexpected)"
self.check_validation_error(self.post, body={'foo': 1, 'ext': 1},
expected_detail=detail)
class PatternPropertiesTestCase(APIValidationTestCase):
def setUp(self):
super(PatternPropertiesTestCase, self).setUp()
schema = {
'patternProperties': {
'^[a-zA-Z0-9]{1,10}$': {
'type': 'string'
},
},
'additionalProperties': False,
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_patternProperties(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'bar'}, req=FakeRequest()))
def test_validate_patternProperties_fails(self):
detail = "Additional properties are not allowed ('__' was unexpected)"
self.check_validation_error(self.post, body={'__': 'bar'},
expected_detail=detail)
detail = "Additional properties are not allowed ('' was unexpected)"
self.check_validation_error(self.post, body={'': 'bar'},
expected_detail=detail)
detail = ("Additional properties are not allowed ('0123456789a' was"
" unexpected)")
self.check_validation_error(self.post, body={'0123456789a': 'bar'},
expected_detail=detail)
detail = "expected string or buffer"
self.check_validation_error(self.post, body={None: 'bar'},
expected_detail=detail)
class StringTestCase(APIValidationTestCase):
def setUp(self):
super(StringTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_string(self):
self.assertEqual(self.post(body={'foo': 'abc'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': ''}, req=FakeRequest()),
'Validation succeeded.')
def test_validate_string_fails(self):
detail = ("Invalid input for field/attribute foo. Value: 1."
" 1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1.5."
" 1.5 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1.5},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
class StringLengthTestCase(APIValidationTestCase):
def setUp(self):
super(StringLengthTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'minLength': 1,
'maxLength': 10,
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_string_length(self):
self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '0123456789'},
req=FakeRequest()),
'Validation succeeded.')
def test_validate_string_length_fails(self):
detail = ("Invalid input for field/attribute foo. Value: ."
" '' is too short")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0123456789a."
" '0123456789a' is too long")
self.check_validation_error(self.post, body={'foo': '0123456789a'},
expected_detail=detail)
class IntegerTestCase(APIValidationTestCase):
def setUp(self):
super(IntegerTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_integer(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '0123456789'},
req=FakeRequest()),
'Validation succeeded.')
def test_validate_integer_fails(self):
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' does not match '^[0-9]+$'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'integer', 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0xffff."
" '0xffff' does not match '^[0-9]+$'")
self.check_validation_error(self.post, body={'foo': '0xffff'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1.0."
" 1.0 is not of type 'integer', 'string'")
self.check_validation_error(self.post, body={'foo': 1.0},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1.0."
" '1.0' does not match '^[0-9]+$'")
self.check_validation_error(self.post, body={'foo': '1.0'},
expected_detail=detail)
class IntegerRangeTestCase(APIValidationTestCase):
def setUp(self):
super(IntegerRangeTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
'minimum': 1,
'maximum': 10,
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_integer_range(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': 10}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()),
'Validation succeeded.')
def test_validate_integer_range_fails(self):
detail = ("Invalid input for field/attribute foo. Value: 0."
" 0(.0)? is less than the minimum of 1")
self.check_validation_error(self.post, body={'foo': 0},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 11."
" 11(.0)? is greater than the maximum of 10")
self.check_validation_error(self.post, body={'foo': 11},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0."
" 0(.0)? is less than the minimum of 1")
self.check_validation_error(self.post, body={'foo': '0'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 11."
" 11(.0)? is greater than the maximum of 10")
self.check_validation_error(self.post, body={'foo': '11'},
expected_detail=detail)
class BooleanTestCase(APIValidationTestCase):
def setUp(self):
super(BooleanTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': parameter_types.boolean,
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_boolean(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': True}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': False}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'True'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'False'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '1'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '0'}, req=FakeRequest()))
def test_validate_boolean_fails(self):
enum_boolean = ("[True, 'True', 'TRUE', 'true', '1', 'ON', 'On',"
" 'on', 'YES', 'Yes', 'yes',"
" False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off',"
" 'off', 'NO', 'No', 'no']")
detail = ("Invalid input for field/attribute foo. Value: bar."
" 'bar' is not one of %s") % enum_boolean
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 2."
" '2' is not one of %s") % enum_boolean
self.check_validation_error(self.post, body={'foo': '2'},
expected_detail=detail)
class HostnameTestCase(APIValidationTestCase):
def setUp(self):
super(HostnameTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': parameter_types.hostname,
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_hostname(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost.localdomain.com'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my-host'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my_host'}, req=FakeRequest()))
def test_validate_hostname_fails(self):
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" 1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: my$host."
" 'my$host' does not match '^[a-zA-Z0-9-._]*$'")
self.check_validation_error(self.post, body={'foo': 'my$host'},
expected_detail=detail)
class HostnameIPaddressTestCase(APIValidationTestCase):
def setUp(self):
super(HostnameIPaddressTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': parameter_types.hostname_or_ip_address,
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_hostname_or_ip_address(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost.localdomain.com'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my-host'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my_host'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '192.168.10.100'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '2001:db8::9abc'},
req=FakeRequest()))
def test_validate_hostname_or_ip_address_fails(self):
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" 1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: my$host."
" 'my$host' does not match '^[a-zA-Z0-9-_.:]*$'")
self.check_validation_error(self.post, body={'foo': 'my$host'},
expected_detail=detail)
class NameTestCase(APIValidationTestCase):
def setUp(self):
super(NameTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': parameter_types.name,
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_name(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'm1.small'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my server'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'a'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434\u2006\ufffd'},
req=FakeRequest()))
def test_validate_name_fails(self):
detail = (u"Invalid input for field/attribute foo. Value: ."
" ' ' does not match .*")
self.check_validation_error(self.post, body={'foo': ' '},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: server."
" ' server' does not match .*")
self.check_validation_error(self.post, body={'foo': ' server'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: server ."
" 'server ' does not match .*")
self.check_validation_error(self.post, body={'foo': 'server '},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: a."
" ' a' does not match .*")
self.check_validation_error(self.post, body={'foo': ' a'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: a ."
" 'a ' does not match .*")
self.check_validation_error(self.post, body={'foo': 'a '},
expected_detail=detail)
# NOTE(stpierre): Quoting for the unicode values in the error
# messages below gets *really* messy, so we just wildcard it
# out. (e.g., '.* does not match'). In practice, we don't
# particularly care about that part of the error message.
# trailing unicode space
detail = (u"Invalid input for field/attribute foo. Value: a\xa0."
u' .* does not match .*')
self.check_validation_error(self.post, body={'foo': u'a\xa0'},
expected_detail=detail)
# non-printable unicode
detail = (u"Invalid input for field/attribute foo. Value: \uffff."
u" .* does not match .*")
self.check_validation_error(self.post, body={'foo': u'\uffff'},
expected_detail=detail)
# four-byte unicode, if supported by this python build
try:
detail = (u"Invalid input for field/attribute foo. Value: "
u"\U00010000. .* does not match .*")
self.check_validation_error(self.post, body={'foo': u'\U00010000'},
expected_detail=detail)
except ValueError:
pass
class NameWithLeadingTrailingSpacesTestCase(APIValidationTestCase):
def setUp(self):
super(NameWithLeadingTrailingSpacesTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': parameter_types.name_with_leading_trailing_spaces,
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_name(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'm1.small'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my server'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'a'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434\u2006\ufffd'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': ' abc '},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'abc abc abc'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': ' abc abc abc '},
req=FakeRequest()))
# leading unicode space
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '\xa0abc'},
req=FakeRequest()))
def test_validate_name_fails(self):
detail = (u"Invalid input for field/attribute foo. Value: ."
u" ' ' does not match .*")
self.check_validation_error(self.post, body={'foo': ' '},
expected_detail=detail)
# NOTE(stpierre): Quoting for the unicode values in the error
# messages below gets *really* messy, so we just wildcard it
# out. (e.g., '.* does not match'). In practice, we don't
# particularly care about that part of the error message.
# unicode space
detail = (u"Invalid input for field/attribute foo. Value: \xa0."
u' .* does not match .*')
self.check_validation_error(self.post, body={'foo': u'\xa0'},
expected_detail=detail)
# non-printable unicode
detail = (u"Invalid input for field/attribute foo. Value: \uffff."
u" .* does not match .*")
self.check_validation_error(self.post, body={'foo': u'\uffff'},
expected_detail=detail)
# four-byte unicode, if supported by this python build
try:
detail = (u"Invalid input for field/attribute foo. Value: "
u"\U00010000. .* does not match .*")
self.check_validation_error(self.post, body={'foo': u'\U00010000'},
expected_detail=detail)
except ValueError:
pass
class NoneTypeTestCase(APIValidationTestCase):
def setUp(self):
super(NoneTypeTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': parameter_types.none
}
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_none(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'None'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': None},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': {}},
req=FakeRequest()))
def test_validate_none_fails(self):
detail = ("Invalid input for field/attribute foo. Value: ."
" '' is not one of ['None', None, {}]")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: "
"{'key': 'val'}. {'key': 'val'} is not one of "
"['None', None, {}]")
self.check_validation_error(self.post, body={'foo': {'key': 'val'}},
expected_detail=detail)
class TcpUdpPortTestCase(APIValidationTestCase):
def setUp(self):
super(TcpUdpPortTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': parameter_types.tcp_udp_port,
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_tcp_udp_port(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 1024}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '1024'}, req=FakeRequest()))
def test_validate_tcp_udp_port_fails(self):
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'integer', 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 65536."
" 65536(.0)? is greater than the maximum of 65535")
self.check_validation_error(self.post, body={'foo': 65536},
expected_detail=detail)
class CidrFormatTestCase(APIValidationTestCase):
def setUp(self):
super(CidrFormatTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'cidr',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_cidr(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '192.168.10.0/24'},
req=FakeRequest()
))
def test_validate_cidr_fails(self):
detail = ("Invalid input for field/attribute foo."
" Value: bar."
" 'bar' is not a 'cidr'")
self.check_validation_error(self.post,
body={'foo': 'bar'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: . '' is not a 'cidr'")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 192.168.1.0. '192.168.1.0' is not a 'cidr'")
self.check_validation_error(self.post, body={'foo': '192.168.1.0'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 192.168.1.0 /24."
" '192.168.1.0 /24' is not a 'cidr'")
self.check_validation_error(self.post, body={'foo': '192.168.1.0 /24'},
expected_detail=detail)
class DatetimeTestCase(APIValidationTestCase):
def setUp(self):
super(DatetimeTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'date-time',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_datetime(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '2014-01-14T01:00:00Z'},
req=FakeRequest()
))
def test_validate_datetime_fails(self):
detail = ("Invalid input for field/attribute foo."
" Value: 2014-13-14T01:00:00Z."
" '2014-13-14T01:00:00Z' is not a 'date-time'")
self.check_validation_error(self.post,
body={'foo': '2014-13-14T01:00:00Z'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: bar. 'bar' is not a 'date-time'")
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" '1' is not a 'date-time'")
self.check_validation_error(self.post, body={'foo': '1'},
expected_detail=detail)
class UuidTestCase(APIValidationTestCase):
def setUp(self):
super(UuidTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'uuid',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_uuid(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '70a599e0-31e7-49b7-b260-868f441e862b'},
req=FakeRequest()
))
def test_validate_uuid_fails(self):
detail = ("Invalid input for field/attribute foo."
" Value: 70a599e031e749b7b260868f441e862."
" '70a599e031e749b7b260868f441e862' is not a 'uuid'")
self.check_validation_error(self.post,
body={'foo': '70a599e031e749b7b260868f441e862'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" '1' is not a 'uuid'")
self.check_validation_error(self.post, body={'foo': '1'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' is not a 'uuid'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
class UriTestCase(APIValidationTestCase):
def setUp(self):
super(UriTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'uri',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_uri(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': 'http://localhost:8774/v2/servers'},
req=FakeRequest()
))
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': 'http://[::1]:8774/v2/servers'},
req=FakeRequest()
))
def test_validate_uri_fails(self):
base_detail = ("Invalid input for field/attribute foo. Value: {0}. "
"'{0}' is not a 'uri'")
invalid_uri = 'http://localhost:8774/v2/servers##'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
invalid_uri = 'http://[fdf8:01]:8774/v2/servers'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
invalid_uri = '1'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
invalid_uri = 'abc'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
class Ipv4TestCase(APIValidationTestCase):
def setUp(self):
super(Ipv4TestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'ipv4',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_ipv4(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '192.168.0.100'},
req=FakeRequest()
))
def test_validate_ipv4_fails(self):
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' is not a 'ipv4'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: localhost."
" 'localhost' is not a 'ipv4'")
self.check_validation_error(self.post, body={'foo': 'localhost'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 2001:db8::1234:0:0:9abc."
" '2001:db8::1234:0:0:9abc' is not a 'ipv4'")
self.check_validation_error(self.post,
body={'foo': '2001:db8::1234:0:0:9abc'},
expected_detail=detail)
class Ipv6TestCase(APIValidationTestCase):
def setUp(self):
super(Ipv6TestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'ipv6',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_ipv6(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '2001:db8::1234:0:0:9abc'},
req=FakeRequest()
))
def test_validate_ipv6_fails(self):
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' is not a 'ipv6'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: localhost."
" 'localhost' is not a 'ipv6'")
self.check_validation_error(self.post, body={'foo': 'localhost'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 192.168.0.100. '192.168.0.100' is not a 'ipv6'")
self.check_validation_error(self.post, body={'foo': '192.168.0.100'},
expected_detail=detail)
class Base64TestCase(APIValidationTestCase):
def setUp(self):
super(APIValidationTestCase, self).setUp()
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'base64',
},
},
}
@validation.schema(request_body_schema=schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_base64(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'aGVsbG8gd29ybGQ='},
req=FakeRequest()))
# 'aGVsbG8gd29ybGQ=' is the base64 code of 'hello world'
def test_validate_base64_fails(self):
value = 'A random string'
detail = ("Invalid input for field/attribute foo. "
"Value: %s. '%s' is not a 'base64'") % (value, value)
self.check_validation_error(self.post, body={'foo': value},
expected_detail=detail)
| apache-2.0 |
matburt/ansible | lib/ansible/utils/shlex.py | 59 | 1275 | # (c) 2015, Marius Gedminas <marius@gedmin.as>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# alongwith Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import shlex
from ansible.compat.six import PY3
from ansible.utils.unicode import to_bytes, to_unicode
if PY3:
# shlex.split() wants Unicode (i.e. ``str``) input on Python 3
shlex_split = shlex.split
else:
# shlex.split() wants bytes (i.e. ``str``) input on Python 2
def shlex_split(s, comments=False, posix=True):
return map(to_unicode, shlex.split(to_bytes(s), comments, posix))
shlex_split.__doc__ = shlex.split.__doc__
| gpl-3.0 |
abomyi/django | tests/admin_views/admin.py | 9 | 30837 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import tempfile
from wsgiref.util import FileWrapper
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six import StringIO
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Article, BarAccount, Book,
Bookmark, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice,
City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter,
CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient,
InlineReference, InlineReferer, Inquisition, Language, Link,
MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber,
OtherStory, Paper, Parent, ParentWithDependentChildren, Person, Persona,
Picture, Pizza, Plot, PlotDetails, PluggableSearchPerson, Podcast, Post,
PrePopulatedPost, PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo,
Question, Recipe, Recommendation, Recommender, ReferencedByGenRel,
ReferencedByInline, ReferencedByParent, RelatedPrepopulated, Report,
Reservation, Restaurant, RowLevelChangePermissionModel, Section,
ShortMessage, Simple, Sketch, State, Story, StumpJoke, Subscriber,
SuperVillain, Telegram, Thing, Topping, UnchangeableObject,
UndeletableObject, UnorderedObject, UserMessenger, Villain, Vodcast,
Whatsit, Widget, Worker, WorkHour,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('content', 'date', callable_year, 'model_year',
'modeladmin_year', 'model_year_reversed')
list_filter = ('date', 'section')
view_on_site = False
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
def changelist_view(self, request):
"Test that extra_context works"
return super(ArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
def changelist_view(self, request):
"Test that extra_context works"
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected', 'sketch')
def sketch(self, obj):
# A method with the same name as a reverse accessor.
return 'list-display-sketch'
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super(PersonAdmin, self).get_changelist_formset(request,
formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super(PersonAdmin, self).get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'from@example.com',
['to@example.com']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'from@example.com',
['to@example.com']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action',
status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
save_as = True
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super(ParentAdmin, self).save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super(EmptyModelAdmin, self).get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo",
'multiline_html_allow_tags',
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
value.short_description = 'Value in $US'
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
def multiline_html_allow_tags(self, instance):
return "Multiline<br>html<br>content<br>with allow tags"
multiline_html_allow_tags.allow_tags = True
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.filter(pk=9999) # Does not exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances. Note that the CoverLetter model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(CoverLetterAdmin, self).get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(PaperAdmin, self).get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(ShortMessageAdmin, self).get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances. Note that the Telegram model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(TelegramAdmin, self).get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ["-pk"]
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ["-pk"]
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return format_html('<span style="color: #ff00ff;">{}</span>', obj.name)
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super(PluggableSearchPersonAdmin, self).get_search_results(
request, queryset, search_term
)
try:
search_term_as_int = int(search_term)
except ValueError:
pass
else:
queryset |= self.model.objects.filter(age=search_term_as_int)
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [
url(r'^extra/$',
self.extra,
name='cable_extra'),
]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2', 'slug3'))
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
'slug3': ['name'],
}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['name']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super(UndeletableObjectAdmin, self).change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super(UnchangeableObjectAdmin, self).get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown, ]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super(DependentChildAdminForm, self).clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
fields = ['name']
def add_view(self, request, *args, **kwargs):
request.is_add_view = True
return super(GetFormsetsArgumentCheckingAdmin, self).add_view(request, *args, **kwargs)
def change_view(self, request, *args, **kwargs):
request.is_add_view = False
return super(GetFormsetsArgumentCheckingAdmin, self).change_view(request, *args, **kwargs)
def get_formsets_with_inlines(self, request, obj=None):
if request.is_add_view and obj is not None:
raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view")
if not request.is_add_view and obj is None:
raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view")
return super(GetFormsetsArgumentCheckingAdmin, self).get_formsets_with_inlines(request, obj)
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline], readonly_fields=['name_property'])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(Bookmark)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
site.register(ReferencedByGenRel)
site.register(GenRelReference)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four troublesome (w.r.t escaping
# and calling force_text to avoid problems on Python 2.3) paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer)
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Recipe)
site.register(Ingredient)
site.register(NotReferenced)
site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
# Register core models we need in our tests
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
| bsd-3-clause |
Captnoord/openpli-enigma2 | lib/python/Plugins/SystemPlugins/TempFanControl/plugin.py | 31 | 10584 | from Components.ActionMap import ActionMap
from Components.Sensors import sensors
from Components.Sources.Sensor import SensorSource
from Components.Sources.StaticText import StaticText
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry
from Screens.Screen import Screen
from Plugins.Plugin import PluginDescriptor
from Components.FanControl import fancontrol
class TempFanControl(Screen, ConfigListScreen):
skin = """
<screen position="center,center" size="570,420" title="Fan Control" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="config" position="10,50" size="550,120" scrollbarMode="showOnDemand" />
<widget source="SensorTempText0" render="Label" position="10,150" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp0" render="Label" position="100,150" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText1" render="Label" position="10,170" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp1" render="Label" position="100,170" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText2" render="Label" position="10,190" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp2" render="Label" position="100,190" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText3" render="Label" position="10,210" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp3" render="Label" position="100,210" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText4" render="Label" position="10,230" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp4" render="Label" position="100,230" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText5" render="Label" position="10,250" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp5" render="Label" position="100,250" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText6" render="Label" position="10,270" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp6" render="Label" position="100,270" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorTempText7" render="Label" position="10,290" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorTemp7" render="Label" position="100,290" zPosition="1" size="100,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText0" render="Label" position="290,150" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan0" render="Label" position="380,150" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText1" render="Label" position="290,170" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan1" render="Label" position="380,170" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText2" render="Label" position="290,190" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan2" render="Label" position="380,190" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText3" render="Label" position="290,210" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan3" render="Label" position="380,210" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText4" render="Label" position="290,230" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan4" render="Label" position="380,230" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText5" render="Label" position="290,250" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan5" render="Label" position="380,250" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText6" render="Label" position="290,270" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan6" render="Label" position="380,270" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
<widget source="SensorFanText7" render="Label" position="290,290" zPosition="1" size="90,40" font="Regular;20" halign="left" valign="top" backgroundColor="#9f1313" transparent="1" />
<widget source="SensorFan7" render="Label" position="380,290" zPosition="1" size="150,20" font="Regular;19" halign="right">
<convert type="SensorToText"></convert>
</widget>
</screen>"""
def __init__(self, session, args = None):
Screen.__init__(self, session)
templist = sensors.getSensorsList(sensors.TYPE_TEMPERATURE)
tempcount = len(templist)
fanlist = sensors.getSensorsList(sensors.TYPE_FAN_RPM)
fancount = len(fanlist)
self["red"] = StaticText(_("Cancel"))
self["green"] = StaticText(_("OK"))
self["yellow"] = StaticText("")
self["blue"] = StaticText("")
for count in range(8):
if count < tempcount:
id = templist[count]
self["SensorTempText%d" % count] = StaticText(sensors.getSensorName(id))
self["SensorTemp%d" % count] = SensorSource(sensorid = id)
else:
self["SensorTempText%d" % count] = StaticText("")
self["SensorTemp%d" % count] = SensorSource()
if count < fancount:
id = fanlist[count]
self["SensorFanText%d" % count] = StaticText(sensors.getSensorName(id))
self["SensorFan%d" % count] = SensorSource(sensorid = id)
else:
self["SensorFanText%d" % count] = StaticText("")
self["SensorFan%d" % count] = SensorSource()
self.list = []
for count in range(fancontrol.getFanCount()):
self.list.append(getConfigListEntry(_("Fan %d voltage") % (count + 1), fancontrol.getConfig(count).vlt))
self.list.append(getConfigListEntry(_("Fan %d PWM") % (count + 1), fancontrol.getConfig(count).pwm))
self.list.append(getConfigListEntry(_("Standby fan %d voltage") % (count + 1), fancontrol.getConfig(count).vlt_standby))
self.list.append(getConfigListEntry(_("Standby fan %d PWM") % (count + 1), fancontrol.getConfig(count).pwm_standby))
ConfigListScreen.__init__(self, self.list, session = self.session)
#self["config"].list = self.list
#self["config"].setList(self.list)
self["config"].l.setSeperation(300)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "MenuActions"],
{
"ok": self.save,
"cancel": self.revert,
"red": self.revert,
"green": self.save,
"menu": self.closeRecursive,
}, -1)
def save(self):
for count in range(fancontrol.getFanCount()):
fancontrol.getConfig(count).vlt.save()
fancontrol.getConfig(count).pwm.save()
fancontrol.getConfig(count).vlt_standby.save()
fancontrol.getConfig(count).pwm_standby.save()
self.close()
def revert(self):
for count in range(fancontrol.getFanCount()):
fancontrol.getConfig(count).vlt.load()
fancontrol.getConfig(count).pwm.load()
fancontrol.getConfig(count).vlt_standby.load()
fancontrol.getConfig(count).pwm_standby.load()
self.close()
def main(session, **kwargs):
session.open(TempFanControl)
def startMenu(menuid):
if menuid != "system":
return []
return [(_("Temperature and fan control"), main, "tempfancontrol", 80)]
def Plugins(**kwargs):
return PluginDescriptor(name = _("Temperature and fan control"), description = _("Temperature and fan control"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = startMenu)
| gpl-2.0 |
sirMackk/ZeroNet | plugins/disabled-Zeroname-local/UiRequestPlugin.py | 6 | 1836 | import re
from Plugin import PluginManager
@PluginManager.registerTo("UiRequest")
class UiRequestPlugin(object):
def __init__(self, *args, **kwargs):
from Site import SiteManager
self.site_manager = SiteManager.site_manager
super(UiRequestPlugin, self).__init__(*args, **kwargs)
# Media request
def actionSiteMedia(self, path):
match = re.match("/media/(?P<address>[A-Za-z0-9-]+\.[A-Za-z0-9\.-]+)(?P<inner_path>/.*|$)", path)
if match: # Its a valid domain, resolve first
domain = match.group("address")
address = self.site_manager.resolveDomain(domain)
if address:
path = "/media/"+address+match.group("inner_path")
return super(UiRequestPlugin, self).actionSiteMedia(path) # Get the wrapper frame output
# Is mediarequest allowed from that referer
def isMediaRequestAllowed(self, site_address, referer):
referer_path = re.sub("http[s]{0,1}://.*?/", "/", referer).replace("/media", "") # Remove site address
referer_path = re.sub("\?.*", "", referer_path) # Remove http params
if self.isProxyRequest(): # Match to site domain
referer = re.sub("^http://zero[/]+", "http://", referer) # Allow /zero access
referer_site_address = re.match("http[s]{0,1}://(.*?)(/|$)", referer).group(1)
else: # Match to request path
referer_site_address = re.match("/(?P<address>[A-Za-z0-9\.-]+)(?P<inner_path>/.*|$)", referer_path).group("address")
if referer_site_address == site_address: # Referer site address as simple address
return True
elif self.site_manager.resolveDomain(referer_site_address) == site_address: # Referer site address as dns
return True
else: # Invalid referer
return False
| gpl-2.0 |
rickerc/cinder_audit | cinder/tests/db/test_finish_migration.py | 1 | 2226 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for finish_volume_migration."""
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests import utils as testutils
class FinishVolumeMigrationTestCase(test.TestCase):
"""Test cases for finish_volume_migration."""
def setUp(self):
super(FinishVolumeMigrationTestCase, self).setUp()
def tearDown(self):
super(FinishVolumeMigrationTestCase, self).tearDown()
def test_finish_volume_migration(self):
ctxt = context.RequestContext(user_id='user_id',
project_id='project_id',
is_admin=True)
src_volume = testutils.create_volume(ctxt, host='src',
migration_status='migrating',
status='available')
dest_volume = testutils.create_volume(ctxt, host='dest',
migration_status='target:fake',
status='available')
db.finish_volume_migration(ctxt, src_volume['id'],
dest_volume['id'])
src_volume = db.volume_get(ctxt, src_volume['id'])
expected_name = 'volume-%s' % dest_volume['id']
self.assertEqual(src_volume['_name_id'], dest_volume['id'])
self.assertEqual(src_volume['name'], expected_name)
self.assertEqual(src_volume['host'], 'dest')
self.assertEqual(src_volume['status'], 'available')
self.assertEqual(src_volume['migration_status'], None)
| apache-2.0 |
DavidNorman/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 2 | 14485 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import hashlib
import math
import os
import random
import sys
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
data_index = 0
def _hash_file(fpath):
hasher = hashlib.sha256()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(65535), b''):
hasher.update(chunk)
return hasher.hexdigest()
def word2vec_basic(log_dir):
"""Example of building, training and visualizing a word2vec model."""
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Step 1: Download the data.
# Note: Source website does not support HTTPS right now.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes, sha256=None):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if sha256 and _hash_file(local_filename) != sha256:
raise Exception('Failed to verify ' + local_filename + ' due to hash '
'mismatch. Can you get to it with a browser?')
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download(
'text8.zip',
31344016,
sha256='a6640522afe85d1963ad56c05b0ede0a0c000dddc9671758a6cc09b7a38e5232')
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = {word: index for index, (word, _) in enumerate(count)}
data = []
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - map of codes(integers) to words(strings)
data, count, unused_dictionary, reverse_dictionary = build_dataset(
vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0],
reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit
# the validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss and why choosing NCE over tf.nn.sampled_softmax_loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
# http://papers.nips.cc/paper/5165-learning-word-embeddings-efficiently-with-noise-contrastive-estimation.pdf
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all
# embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.compat.v1.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned
# "summary" variable. Feed metadata variable to session for visualizing
# the graph in TensorBoard.
_, summary, loss_val = session.run([optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000
# batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
print(
log_str,
', '.join([reverse_dictionary[nearest[k]] for k in range(top_k)]))
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in
# TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(
perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(),
'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
# All functionality is run after tf.compat.v1.app.run() (b/122547914). This
# could be split up but the methods are laid sequentially with their usage for
# clarity.
def main(unused_argv):
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
flags, unused_flags = parser.parse_known_args()
word2vec_basic(flags.log_dir)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
nzlosh/st2 | st2api/tests/unit/controllers/v1/test_kvps.py | 3 | 25338 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from st2tests.api import FunctionalTest
from st2common.models.db.auth import UserDB
from six.moves import http_client
__all__ = ["KeyValuePairControllerTestCase"]
KVP = {"name": "keystone_endpoint", "value": "http://127.0.0.1:5000/v3"}
KVP_2 = {"name": "keystone_version", "value": "v3"}
KVP_2_USER = {"name": "keystone_version", "value": "user_v3", "scope": "st2kv.user"}
KVP_2_USER_LEGACY = {"name": "keystone_version", "value": "user_v3", "scope": "user"}
KVP_3_USER = {
"name": "keystone_endpoint",
"value": "http://127.0.1.1:5000/v3",
"scope": "st2kv.user",
}
KVP_4_USER = {
"name": "customer_ssn",
"value": "123-456-7890",
"secret": True,
"scope": "st2kv.user",
}
KVP_WITH_TTL = {
"name": "keystone_endpoint",
"value": "http://127.0.0.1:5000/v3",
"ttl": 10,
}
SECRET_KVP = {"name": "secret_key1", "value": "secret_value1", "secret": True}
# value = S3cret!Value
# encrypted with st2tests/conf/st2_kvstore_tests.crypto.key.json
ENCRYPTED_KVP = {
"name": "secret_key1",
"value": (
"3030303030298D848B45A24EDCD1A82FAB4E831E3FCE6E60956817A48A180E4C040801E"
"B30170DACF79498F30520236A629912C3584847098D"
),
"encrypted": True,
}
ENCRYPTED_KVP_SECRET_FALSE = {
"name": "secret_key2",
"value": (
"3030303030298D848B45A24EDCD1A82FAB4E831E3FCE6E60956817A48A180E4C040801E"
"B30170DACF79498F30520236A629912C3584847098D"
),
"secret": True,
"encrypted": True,
}
class KeyValuePairControllerTestCase(FunctionalTest):
def test_get_all(self):
resp = self.app.get("/v1/keys")
self.assertEqual(resp.status_int, 200)
def test_get_one(self):
put_resp = self.__do_put("key1", KVP)
kvp_id = self.__get_kvp_id(put_resp)
get_resp = self.__do_get_one(kvp_id)
self.assertEqual(get_resp.status_int, 200)
self.assertEqual(self.__get_kvp_id(get_resp), kvp_id)
self.__do_delete(kvp_id)
def test_get_all_all_scope(self):
# Test which cases various scenarios which ensure non-admin users can't read / view keys
# from other users
user_db_1 = UserDB(name="user1")
user_db_2 = UserDB(name="user2")
user_db_3 = UserDB(name="user3")
# Insert some mock data
# System scoped keys
put_resp = self.__do_put(
"system1", {"name": "system1", "value": "val1", "scope": "st2kv.system"}
)
self.assertEqual(put_resp.status_int, 200)
self.assertEqual(put_resp.json["name"], "system1")
self.assertEqual(put_resp.json["scope"], "st2kv.system")
put_resp = self.__do_put(
"system2", {"name": "system2", "value": "val2", "scope": "st2kv.system"}
)
self.assertEqual(put_resp.status_int, 200)
self.assertEqual(put_resp.json["name"], "system2")
self.assertEqual(put_resp.json["scope"], "st2kv.system")
# user1 scoped keys
self.use_user(user_db_1)
put_resp = self.__do_put(
"user1", {"name": "user1", "value": "user1", "scope": "st2kv.user"}
)
self.assertEqual(put_resp.status_int, 200)
self.assertEqual(put_resp.json["name"], "user1")
self.assertEqual(put_resp.json["scope"], "st2kv.user")
self.assertEqual(put_resp.json["value"], "user1")
put_resp = self.__do_put(
"userkey", {"name": "userkey", "value": "user1", "scope": "st2kv.user"}
)
self.assertEqual(put_resp.status_int, 200)
self.assertEqual(put_resp.json["name"], "userkey")
self.assertEqual(put_resp.json["scope"], "st2kv.user")
self.assertEqual(put_resp.json["value"], "user1")
# user2 scoped keys
self.use_user(user_db_2)
put_resp = self.__do_put(
"user2", {"name": "user2", "value": "user2", "scope": "st2kv.user"}
)
self.assertEqual(put_resp.status_int, 200)
self.assertEqual(put_resp.json["name"], "user2")
self.assertEqual(put_resp.json["scope"], "st2kv.user")
self.assertEqual(put_resp.json["value"], "user2")
put_resp = self.__do_put(
"userkey", {"name": "userkey", "value": "user2", "scope": "st2kv.user"}
)
self.assertEqual(put_resp.status_int, 200)
self.assertEqual(put_resp.json["name"], "userkey")
self.assertEqual(put_resp.json["scope"], "st2kv.user")
self.assertEqual(put_resp.json["value"], "user2")
# user3 scoped keys
self.use_user(user_db_3)
put_resp = self.__do_put(
"user3", {"name": "user3", "value": "user3", "scope": "st2kv.user"}
)
self.assertEqual(put_resp.status_int, 200)
self.assertEqual(put_resp.json["name"], "user3")
self.assertEqual(put_resp.json["scope"], "st2kv.user")
self.assertEqual(put_resp.json["value"], "user3")
put_resp = self.__do_put(
"userkey", {"name": "userkey", "value": "user3", "scope": "st2kv.user"}
)
self.assertEqual(put_resp.status_int, 200)
self.assertEqual(put_resp.json["name"], "userkey")
self.assertEqual(put_resp.json["scope"], "st2kv.user")
self.assertEqual(put_resp.json["value"], "user3")
# 1. "all" scope as user1 - should only be able to view system + current user items
self.use_user(user_db_1)
resp = self.app.get("/v1/keys?scope=all")
self.assertEqual(len(resp.json), 2 + 2) # 2 system, 2 user
self.assertEqual(resp.json[0]["name"], "system1")
self.assertEqual(resp.json[0]["scope"], "st2kv.system")
self.assertEqual(resp.json[1]["name"], "system2")
self.assertEqual(resp.json[1]["scope"], "st2kv.system")
self.assertEqual(resp.json[2]["name"], "user1")
self.assertEqual(resp.json[2]["scope"], "st2kv.user")
self.assertEqual(resp.json[2]["user"], "user1")
self.assertEqual(resp.json[3]["name"], "userkey")
self.assertEqual(resp.json[3]["scope"], "st2kv.user")
self.assertEqual(resp.json[3]["user"], "user1")
# Verify user can't retrieve values for other users by manipulating "prefix"
resp = self.app.get("/v1/keys?scope=all&prefix=user2:")
self.assertEqual(resp.json, [])
resp = self.app.get("/v1/keys?scope=all&prefix=user")
self.assertEqual(len(resp.json), 2) # 2 user
self.assertEqual(resp.json[0]["name"], "user1")
self.assertEqual(resp.json[0]["scope"], "st2kv.user")
self.assertEqual(resp.json[0]["user"], "user1")
self.assertEqual(resp.json[1]["name"], "userkey")
self.assertEqual(resp.json[1]["scope"], "st2kv.user")
self.assertEqual(resp.json[1]["user"], "user1")
# 2. "all" scope user user2 - should only be able to view system + current user items
self.use_user(user_db_2)
resp = self.app.get("/v1/keys?scope=all")
self.assertEqual(len(resp.json), 2 + 2) # 2 system, 2 user
self.assertEqual(resp.json[0]["name"], "system1")
self.assertEqual(resp.json[0]["scope"], "st2kv.system")
self.assertEqual(resp.json[1]["name"], "system2")
self.assertEqual(resp.json[1]["scope"], "st2kv.system")
self.assertEqual(resp.json[2]["name"], "user2")
self.assertEqual(resp.json[2]["scope"], "st2kv.user")
self.assertEqual(resp.json[2]["user"], "user2")
self.assertEqual(resp.json[3]["name"], "userkey")
self.assertEqual(resp.json[3]["scope"], "st2kv.user")
self.assertEqual(resp.json[3]["user"], "user2")
# Verify user can't retrieve values for other users by manipulating "prefix"
resp = self.app.get("/v1/keys?scope=all&prefix=user1:")
self.assertEqual(resp.json, [])
resp = self.app.get("/v1/keys?scope=all&prefix=user")
self.assertEqual(len(resp.json), 2) # 2 user
self.assertEqual(resp.json[0]["name"], "user2")
self.assertEqual(resp.json[0]["scope"], "st2kv.user")
self.assertEqual(resp.json[0]["user"], "user2")
self.assertEqual(resp.json[1]["name"], "userkey")
self.assertEqual(resp.json[1]["scope"], "st2kv.user")
self.assertEqual(resp.json[1]["user"], "user2")
# Verify non-admon user can't retrieve key for an arbitrary users
resp = self.app.get("/v1/keys?scope=user&user=user1", expect_errors=True)
expected_error = (
'"user" attribute can only be provided by admins when RBAC is enabled'
)
self.assertEqual(resp.status_int, http_client.FORBIDDEN)
self.assertEqual(resp.json["faultstring"], expected_error)
# 3. "all" scope user user3 - should only be able to view system + current user items
self.use_user(user_db_3)
resp = self.app.get("/v1/keys?scope=all")
self.assertEqual(len(resp.json), 2 + 2) # 2 system, 2 user
self.assertEqual(resp.json[0]["name"], "system1")
self.assertEqual(resp.json[0]["scope"], "st2kv.system")
self.assertEqual(resp.json[1]["name"], "system2")
self.assertEqual(resp.json[1]["scope"], "st2kv.system")
self.assertEqual(resp.json[2]["name"], "user3")
self.assertEqual(resp.json[2]["scope"], "st2kv.user")
self.assertEqual(resp.json[2]["user"], "user3")
self.assertEqual(resp.json[3]["name"], "userkey")
self.assertEqual(resp.json[3]["scope"], "st2kv.user")
self.assertEqual(resp.json[3]["user"], "user3")
# Verify user can't retrieve values for other users by manipulating "prefix"
resp = self.app.get("/v1/keys?scope=all&prefix=user1:")
self.assertEqual(resp.json, [])
resp = self.app.get("/v1/keys?scope=all&prefix=user")
self.assertEqual(len(resp.json), 2) # 2 user
self.assertEqual(resp.json[0]["name"], "user3")
self.assertEqual(resp.json[0]["scope"], "st2kv.user")
self.assertEqual(resp.json[0]["user"], "user3")
self.assertEqual(resp.json[1]["name"], "userkey")
self.assertEqual(resp.json[1]["scope"], "st2kv.user")
self.assertEqual(resp.json[1]["user"], "user3")
# Clean up
self.__do_delete("system1")
self.__do_delete("system2")
self.use_user(user_db_1)
self.__do_delete("user1?scope=user")
self.__do_delete("userkey?scope=user")
self.use_user(user_db_2)
self.__do_delete("user2?scope=user")
self.__do_delete("userkey?scope=user")
self.use_user(user_db_3)
self.__do_delete("user3?scope=user")
self.__do_delete("userkey?scope=user")
def test_get_all_user_query_param_can_only_be_used_with_rbac(self):
resp = self.app.get("/v1/keys?user=foousera", expect_errors=True)
expected_error = (
'"user" attribute can only be provided by admins when RBAC is enabled'
)
self.assertEqual(resp.status_int, http_client.FORBIDDEN)
self.assertEqual(resp.json["faultstring"], expected_error)
def test_get_one_user_query_param_can_only_be_used_with_rbac(self):
resp = self.app.get(
"/v1/keys/keystone_endpoint?user=foousera", expect_errors=True
)
expected_error = (
'"user" attribute can only be provided by admins when RBAC is enabled'
)
self.assertEqual(resp.status_int, http_client.FORBIDDEN)
self.assertEqual(resp.json["faultstring"], expected_error)
def test_get_all_prefix_filtering(self):
put_resp1 = self.__do_put(KVP["name"], KVP)
put_resp2 = self.__do_put(KVP_2["name"], KVP_2)
self.assertEqual(put_resp1.status_int, 200)
self.assertEqual(put_resp2.status_int, 200)
# No keys with that prefix
resp = self.app.get("/v1/keys?prefix=something")
self.assertEqual(resp.json, [])
# Two keys with the provided prefix
resp = self.app.get("/v1/keys?prefix=keystone")
self.assertEqual(len(resp.json), 2)
# One key with the provided prefix
resp = self.app.get("/v1/keys?prefix=keystone_endpoint")
self.assertEqual(len(resp.json), 1)
self.__do_delete(self.__get_kvp_id(put_resp1))
self.__do_delete(self.__get_kvp_id(put_resp2))
def test_get_one_fail(self):
resp = self.app.get("/v1/keys/1", expect_errors=True)
self.assertEqual(resp.status_int, 404)
def test_put(self):
put_resp = self.__do_put("key1", KVP)
update_input = put_resp.json
update_input["value"] = "http://127.0.0.1:35357/v3"
put_resp = self.__do_put(self.__get_kvp_id(put_resp), update_input)
self.assertEqual(put_resp.status_int, 200)
self.__do_delete(self.__get_kvp_id(put_resp))
def test_put_with_scope(self):
self.app.put_json("/v1/keys/%s" % "keystone_endpoint", KVP, expect_errors=False)
self.app.put_json(
"/v1/keys/%s?scope=st2kv.system" % "keystone_version",
KVP_2,
expect_errors=False,
)
get_resp_1 = self.app.get("/v1/keys/keystone_endpoint")
self.assertTrue(get_resp_1.status_int, 200)
self.assertEqual(self.__get_kvp_id(get_resp_1), "keystone_endpoint")
get_resp_2 = self.app.get("/v1/keys/keystone_version?scope=st2kv.system")
self.assertTrue(get_resp_2.status_int, 200)
self.assertEqual(self.__get_kvp_id(get_resp_2), "keystone_version")
get_resp_3 = self.app.get("/v1/keys/keystone_version")
self.assertTrue(get_resp_3.status_int, 200)
self.assertEqual(self.__get_kvp_id(get_resp_3), "keystone_version")
self.app.delete("/v1/keys/keystone_endpoint?scope=st2kv.system")
self.app.delete("/v1/keys/keystone_version?scope=st2kv.system")
def test_put_user_scope_and_system_scope_dont_overlap(self):
self.app.put_json(
"/v1/keys/%s?scope=st2kv.system" % "keystone_version",
KVP_2,
expect_errors=False,
)
self.app.put_json(
"/v1/keys/%s?scope=st2kv.user" % "keystone_version",
KVP_2_USER,
expect_errors=False,
)
get_resp = self.app.get("/v1/keys/keystone_version?scope=st2kv.system")
self.assertEqual(get_resp.json["value"], KVP_2["value"])
get_resp = self.app.get("/v1/keys/keystone_version?scope=st2kv.user")
self.assertEqual(get_resp.json["value"], KVP_2_USER["value"])
self.app.delete("/v1/keys/keystone_version?scope=st2kv.system")
self.app.delete("/v1/keys/keystone_version?scope=st2kv.user")
def test_put_invalid_scope(self):
put_resp = self.app.put_json(
"/v1/keys/keystone_version?scope=st2", KVP_2, expect_errors=True
)
self.assertTrue(put_resp.status_int, 400)
def test_get_all_with_scope(self):
self.app.put_json(
"/v1/keys/%s?scope=st2kv.system" % "keystone_version",
KVP_2,
expect_errors=False,
)
self.app.put_json(
"/v1/keys/%s?scope=st2kv.user" % "keystone_version",
KVP_2_USER,
expect_errors=False,
)
# Note that the following two calls overwrite st2sytem and st2kv.user scoped variables with
# same name.
self.app.put_json(
"/v1/keys/%s?scope=system" % "keystone_version", KVP_2, expect_errors=False
)
self.app.put_json(
"/v1/keys/%s?scope=user" % "keystone_version",
KVP_2_USER_LEGACY,
expect_errors=False,
)
get_resp_all = self.app.get("/v1/keys?scope=all")
self.assertTrue(len(get_resp_all.json), 2)
get_resp_sys = self.app.get("/v1/keys?scope=st2kv.system")
self.assertTrue(len(get_resp_sys.json), 1)
self.assertEqual(get_resp_sys.json[0]["value"], KVP_2["value"])
get_resp_sys = self.app.get("/v1/keys?scope=system")
self.assertTrue(len(get_resp_sys.json), 1)
self.assertEqual(get_resp_sys.json[0]["value"], KVP_2["value"])
get_resp_sys = self.app.get("/v1/keys?scope=st2kv.user")
self.assertTrue(len(get_resp_sys.json), 1)
self.assertEqual(get_resp_sys.json[0]["value"], KVP_2_USER["value"])
get_resp_sys = self.app.get("/v1/keys?scope=user")
self.assertTrue(len(get_resp_sys.json), 1)
self.assertEqual(get_resp_sys.json[0]["value"], KVP_2_USER["value"])
self.app.delete("/v1/keys/keystone_version?scope=st2kv.system")
self.app.delete("/v1/keys/keystone_version?scope=st2kv.user")
def test_get_all_with_scope_and_prefix_filtering(self):
self.app.put_json(
"/v1/keys/%s?scope=st2kv.user" % "keystone_version",
KVP_2_USER,
expect_errors=False,
)
self.app.put_json(
"/v1/keys/%s?scope=st2kv.user" % "keystone_endpoint",
KVP_3_USER,
expect_errors=False,
)
self.app.put_json(
"/v1/keys/%s?scope=st2kv.user" % "customer_ssn",
KVP_4_USER,
expect_errors=False,
)
get_prefix = self.app.get("/v1/keys?scope=st2kv.user&prefix=keystone")
self.assertEqual(len(get_prefix.json), 2)
self.app.delete("/v1/keys/keystone_version?scope=st2kv.user")
self.app.delete("/v1/keys/keystone_endpoint?scope=st2kv.user")
self.app.delete("/v1/keys/customer_ssn?scope=st2kv.user")
def test_put_with_ttl(self):
put_resp = self.__do_put("key_with_ttl", KVP_WITH_TTL)
self.assertEqual(put_resp.status_int, 200)
get_resp = self.app.get("/v1/keys")
self.assertTrue(get_resp.json[0]["expire_timestamp"])
self.__do_delete(self.__get_kvp_id(put_resp))
def test_put_secret(self):
put_resp = self.__do_put("secret_key1", SECRET_KVP)
kvp_id = self.__get_kvp_id(put_resp)
get_resp = self.__do_get_one(kvp_id)
self.assertTrue(get_resp.json["encrypted"])
crypto_val = get_resp.json["value"]
self.assertNotEqual(SECRET_KVP["value"], crypto_val)
self.__do_delete(self.__get_kvp_id(put_resp))
def test_get_one_secret_no_decrypt(self):
put_resp = self.__do_put("secret_key1", SECRET_KVP)
kvp_id = self.__get_kvp_id(put_resp)
get_resp = self.app.get("/v1/keys/secret_key1")
self.assertEqual(get_resp.status_int, 200)
self.assertEqual(self.__get_kvp_id(get_resp), kvp_id)
self.assertTrue(get_resp.json["secret"])
self.assertTrue(get_resp.json["encrypted"])
self.__do_delete(kvp_id)
def test_get_one_secret_decrypt(self):
put_resp = self.__do_put("secret_key1", SECRET_KVP)
kvp_id = self.__get_kvp_id(put_resp)
get_resp = self.app.get("/v1/keys/secret_key1?decrypt=true")
self.assertEqual(get_resp.status_int, 200)
self.assertEqual(self.__get_kvp_id(get_resp), kvp_id)
self.assertTrue(get_resp.json["secret"])
self.assertFalse(get_resp.json["encrypted"])
self.assertEqual(get_resp.json["value"], SECRET_KVP["value"])
self.__do_delete(kvp_id)
def test_get_all_decrypt(self):
put_resp = self.__do_put("secret_key1", SECRET_KVP)
kvp_id_1 = self.__get_kvp_id(put_resp)
put_resp = self.__do_put("key1", KVP)
kvp_id_2 = self.__get_kvp_id(put_resp)
kvps = {"key1": KVP, "secret_key1": SECRET_KVP}
stored_kvps = self.app.get("/v1/keys?decrypt=true").json
self.assertTrue(len(stored_kvps), 2)
for stored_kvp in stored_kvps:
self.assertFalse(stored_kvp["encrypted"])
exp_kvp = kvps.get(stored_kvp["name"])
self.assertIsNotNone(exp_kvp)
self.assertEqual(exp_kvp["value"], stored_kvp["value"])
self.__do_delete(kvp_id_1)
self.__do_delete(kvp_id_2)
def test_put_encrypted_value(self):
# 1. encrypted=True, secret=True
put_resp = self.__do_put("secret_key1", ENCRYPTED_KVP)
kvp_id = self.__get_kvp_id(put_resp)
# Verify there is no secrets leakage
self.assertEqual(put_resp.status_code, 200)
self.assertEqual(put_resp.json["name"], "secret_key1")
self.assertEqual(put_resp.json["scope"], "st2kv.system")
self.assertEqual(put_resp.json["encrypted"], True)
self.assertEqual(put_resp.json["secret"], True)
self.assertEqual(put_resp.json["value"], ENCRYPTED_KVP["value"])
self.assertTrue(put_resp.json["value"] != "S3cret!Value")
self.assertTrue(len(put_resp.json["value"]) > len("S3cret!Value") * 2)
get_resp = self.__do_get_one(kvp_id + "?decrypt=True")
self.assertEqual(put_resp.json["name"], "secret_key1")
self.assertEqual(put_resp.json["scope"], "st2kv.system")
self.assertEqual(put_resp.json["encrypted"], True)
self.assertEqual(put_resp.json["secret"], True)
self.assertEqual(put_resp.json["value"], ENCRYPTED_KVP["value"])
# Verify data integrity post decryption
get_resp = self.__do_get_one(kvp_id + "?decrypt=True")
self.assertFalse(get_resp.json["encrypted"])
self.assertEqual(get_resp.json["value"], "S3cret!Value")
self.__do_delete(self.__get_kvp_id(put_resp))
# 2. encrypted=True, secret=False
# encrypted should always imply secret=True
put_resp = self.__do_put("secret_key2", ENCRYPTED_KVP_SECRET_FALSE)
kvp_id = self.__get_kvp_id(put_resp)
# Verify there is no secrets leakage
self.assertEqual(put_resp.status_code, 200)
self.assertEqual(put_resp.json["name"], "secret_key2")
self.assertEqual(put_resp.json["scope"], "st2kv.system")
self.assertEqual(put_resp.json["encrypted"], True)
self.assertEqual(put_resp.json["secret"], True)
self.assertEqual(put_resp.json["value"], ENCRYPTED_KVP["value"])
self.assertTrue(put_resp.json["value"] != "S3cret!Value")
self.assertTrue(len(put_resp.json["value"]) > len("S3cret!Value") * 2)
get_resp = self.__do_get_one(kvp_id + "?decrypt=True")
self.assertEqual(put_resp.json["name"], "secret_key2")
self.assertEqual(put_resp.json["scope"], "st2kv.system")
self.assertEqual(put_resp.json["encrypted"], True)
self.assertEqual(put_resp.json["secret"], True)
self.assertEqual(put_resp.json["value"], ENCRYPTED_KVP["value"])
# Verify data integrity post decryption
get_resp = self.__do_get_one(kvp_id + "?decrypt=True")
self.assertFalse(get_resp.json["encrypted"])
self.assertEqual(get_resp.json["value"], "S3cret!Value")
self.__do_delete(self.__get_kvp_id(put_resp))
def test_put_encrypted_value_integrity_check_failed(self):
data = copy.deepcopy(ENCRYPTED_KVP)
data["value"] = "corrupted"
put_resp = self.__do_put("secret_key1", data, expect_errors=True)
self.assertEqual(put_resp.status_code, 400)
expected_error = (
"Failed to verify the integrity of the provided value for key "
'"secret_key1".'
)
self.assertIn(expected_error, put_resp.json["faultstring"])
data = copy.deepcopy(ENCRYPTED_KVP)
data["value"] = str(data["value"][:-2])
put_resp = self.__do_put("secret_key1", data, expect_errors=True)
self.assertEqual(put_resp.status_code, 400)
expected_error = (
"Failed to verify the integrity of the provided value for key "
'"secret_key1".'
)
self.assertIn(expected_error, put_resp.json["faultstring"])
def test_put_delete(self):
put_resp = self.__do_put("key1", KVP)
self.assertEqual(put_resp.status_int, 200)
self.__do_delete(self.__get_kvp_id(put_resp))
def test_delete(self):
put_resp = self.__do_put("key1", KVP)
del_resp = self.__do_delete(self.__get_kvp_id(put_resp))
self.assertEqual(del_resp.status_int, 204)
def test_delete_fail(self):
resp = self.__do_delete("inexistentkey", expect_errors=True)
self.assertEqual(resp.status_int, 404)
@staticmethod
def __get_kvp_id(resp):
return resp.json["name"]
def __do_get_one(self, kvp_id, expect_errors=False):
return self.app.get("/v1/keys/%s" % kvp_id, expect_errors=expect_errors)
def __do_put(self, kvp_id, kvp, expect_errors=False):
return self.app.put_json(
"/v1/keys/%s" % kvp_id, kvp, expect_errors=expect_errors
)
def __do_delete(self, kvp_id, expect_errors=False):
return self.app.delete("/v1/keys/%s" % kvp_id, expect_errors=expect_errors)
| apache-2.0 |
miguelut/utmbu | mbu/api/scout.py | 1 | 1603 | from django.contrib.admin.views.decorators import staff_member_required
from django.http import JsonResponse
from django.contrib.auth.decorators import permission_required
from rest_framework.decorators import api_view
from mbu.models import Scout, ScoutCourseInstance, ScoutCourseInstanceSerializer, RegistrationStatus
__author__ = 'michael'
@permission_required('mbu.edit_scout_schedule', raise_exception=True)
@api_view(http_method_names=['GET', 'POST'])
def scout_enrollments(request, scout_id):
user = request.user
scout = Scout.objects.get(user=user)
scout_check = Scout.objects.get(pk=scout_id)
assert(scout == scout_check)
enrollments = []
if request.method == 'POST' and _reg_is_open():
for d in request.data:
enrollments.append(ScoutCourseInstance.objects.get(pk=d['id']))
scout.enrollments = enrollments
scout.save()
return JsonResponse({'data': request.data})
else:
for enrollment in scout.enrollments.all():
serializer = ScoutCourseInstanceSerializer(enrollment)
enrollments.append(serializer.data)
result = {'enrollments': enrollments}
return JsonResponse(result)
@staff_member_required
@api_view(http_method_names=['POST'])
def check_in_scouts(request, scout_id):
scout = Scout.objects.get(pk=scout_id)
scout.checked_in = True
scout.save()
result = {"scout": scout_id}
return JsonResponse(result)
def _reg_is_open():
status = RegistrationStatus.objects.first()
if status:
status = status.status
return status == 'OPEN'
| mit |
ihsanudin/odoo | addons/purchase/report/__init__.py | 380 | 1070 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Ayrx/cryptography | tests/hazmat/primitives/test_dh.py | 5 | 2669 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import pytest
from cryptography.hazmat.primitives.asymmetric import dh
def test_dh_parameternumbers():
params = dh.DHParameterNumbers(
65537, 3
)
assert params.p == 65537
assert params.g == 3
with pytest.raises(TypeError):
dh.DHParameterNumbers(
None, 3
)
with pytest.raises(TypeError):
dh.DHParameterNumbers(
65537, None
)
with pytest.raises(TypeError):
dh.DHParameterNumbers(
None, None
)
def test_dh_numbers():
params = dh.DHParameterNumbers(
65537, 3
)
public = dh.DHPublicNumbers(
1, params
)
assert public.parameter_numbers is params
assert public.y == 1
with pytest.raises(TypeError):
dh.DHPublicNumbers(
1, None
)
with pytest.raises(TypeError):
dh.DHPublicNumbers(
None, params
)
private = dh.DHPrivateNumbers(
1, public
)
assert private.public_numbers is public
assert private.x == 1
with pytest.raises(TypeError):
dh.DHPrivateNumbers(
1, None
)
with pytest.raises(TypeError):
dh.DHPrivateNumbers(
None, public
)
def test_dh_parameter_numbers_equality():
assert dh.DHParameterNumbers(65537, 3) == dh.DHParameterNumbers(65537, 3)
assert dh.DHParameterNumbers(6, 3) != dh.DHParameterNumbers(65537, 3)
assert dh.DHParameterNumbers(65537, 0) != dh.DHParameterNumbers(65537, 3)
assert dh.DHParameterNumbers(65537, 0) != object()
def test_dh_private_numbers_equality():
params = dh.DHParameterNumbers(65537, 3)
public = dh.DHPublicNumbers(1, params)
private = dh.DHPrivateNumbers(2, public)
assert private == dh.DHPrivateNumbers(2, public)
assert private != dh.DHPrivateNumbers(0, public)
assert private != dh.DHPrivateNumbers(2, dh.DHPublicNumbers(0, params))
assert private != dh.DHPrivateNumbers(
2, dh.DHPublicNumbers(1, dh.DHParameterNumbers(65537, 0))
)
assert private != object()
def test_dh_public_numbers_equality():
params = dh.DHParameterNumbers(65537, 3)
public = dh.DHPublicNumbers(1, params)
assert public == dh.DHPublicNumbers(1, params)
assert public != dh.DHPublicNumbers(0, params)
assert public != dh.DHPublicNumbers(1, dh.DHParameterNumbers(65537, 0))
assert public != object()
| bsd-3-clause |
Philippe12/external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/cros_browser_finder.py | 23 | 5002 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds CrOS browsers that can be controlled by telemetry."""
import logging
from telemetry.core import browser
from telemetry.core import possible_browser
from telemetry.core.backends.chrome import cros_browser_with_oobe
from telemetry.core.backends.chrome import cros_browser_backend
from telemetry.core.backends.chrome import cros_interface
from telemetry.core.platform import cros_platform_backend
ALL_BROWSER_TYPES = [
'cros-chrome',
'cros-chrome-guest',
'system',
'system-guest',
]
class PossibleCrOSBrowser(possible_browser.PossibleBrowser):
"""A launchable chromeos browser instance."""
def __init__(self, browser_type, finder_options, cri, is_guest):
super(PossibleCrOSBrowser, self).__init__(browser_type, finder_options)
assert browser_type in ALL_BROWSER_TYPES, \
'Please add %s to ALL_BROWSER_TYPES' % browser_type
self._cri = cri
self._is_guest = is_guest
def __repr__(self):
return 'PossibleCrOSBrowser(browser_type=%s)' % self.browser_type
def Create(self):
if self.finder_options.output_profile_path:
raise Exception("Profile generation is not currently supported on Chrome"
" OS")
browser_options = self.finder_options.browser_options
backend = cros_browser_backend.CrOSBrowserBackend(
self.browser_type, browser_options, self._cri, self._is_guest,
extensions_to_load=self.finder_options.extensions_to_load)
platform = cros_platform_backend.CrosPlatformBackend(self._cri)
if browser_options.create_browser_with_oobe:
return cros_browser_with_oobe.CrOSBrowserWithOOBE(backend, platform)
else:
return browser.Browser(backend, platform)
def SupportsOptions(self, finder_options):
if (len(finder_options.extensions_to_load) != 0) and self._is_guest:
return False
return True
def UpdateExecutableIfNeeded(self):
pass
def SelectDefaultBrowser(possible_browsers):
if cros_interface.IsRunningOnCrosDevice():
for b in possible_browsers:
if b.browser_type == 'system':
return b
return None
def CanFindAvailableBrowsers(finder_options):
return (cros_interface.IsRunningOnCrosDevice() or
finder_options.cros_remote or
cros_interface.HasSSH())
def FindAllAvailableBrowsers(finder_options):
"""Finds all available chromeos browsers, locally and remotely."""
if cros_interface.IsRunningOnCrosDevice():
return [PossibleCrOSBrowser('system', finder_options,
cros_interface.CrOSInterface(),
is_guest=False),
PossibleCrOSBrowser('system-guest', finder_options,
cros_interface.CrOSInterface(),
is_guest=True)]
if finder_options.cros_remote == None:
logging.debug('No --remote specified, will not probe for CrOS.')
return []
if not cros_interface.HasSSH():
logging.debug('ssh not found. Cannot talk to CrOS devices.')
return []
cri = cros_interface.CrOSInterface(finder_options.cros_remote,
finder_options.cros_ssh_identity)
# Check ssh
try:
cri.TryLogin()
except cros_interface.LoginException, ex:
if isinstance(ex, cros_interface.KeylessLoginRequiredException):
logging.warn('Could not ssh into %s. Your device must be configured',
finder_options.cros_remote)
logging.warn('to allow passwordless login as root.')
logging.warn('For a test-build device, pass this to your script:')
logging.warn(' --identity $(CHROMITE)/ssh_keys/testing_rsa')
logging.warn('')
logging.warn('For a developer-mode device, the steps are:')
logging.warn(' - Ensure you have an id_rsa.pub (etc) on this computer')
logging.warn(' - On the chromebook:')
logging.warn(' - Control-Alt-T; shell; sudo -s')
logging.warn(' - openssh-server start')
logging.warn(' - scp <this machine>:.ssh/id_rsa.pub /tmp/')
logging.warn(' - mkdir /root/.ssh')
logging.warn(' - chown go-rx /root/.ssh')
logging.warn(' - cat /tmp/id_rsa.pub >> /root/.ssh/authorized_keys')
logging.warn(' - chown 0600 /root/.ssh/authorized_keys')
logging.warn('There, that was easy!')
logging.warn('')
logging.warn('P.S. Please, tell your manager how INANE this is.')
from telemetry.core import browser_finder
raise browser_finder.BrowserFinderException(str(ex))
if not cri.FileExistsOnDevice('/opt/google/chrome/chrome'):
logging.warn('Could not find a chrome on ' % cri.hostname)
return [PossibleCrOSBrowser('cros-chrome', finder_options, cri,
is_guest=False),
PossibleCrOSBrowser('cros-chrome-guest', finder_options, cri,
is_guest=True)]
| bsd-3-clause |
thbuerg/Heidelberg_2017 | DeeProtein/validate.py | 1 | 1040 | """ Invoke the Model in validation mode and perform a run over the valid set."""
import argparse
import json
from DeeProtein import DeeProtein
import helpers
def main():
with open(FLAGS.config_json) as config_fobj:
config_dict = json.load(config_fobj)
# set the gpu context
if not FLAGS.gpu:
if config_dict["gpu"] == 'True':
config_dict["gpu"] = "False"
optionhandler = helpers.OptionHandler(config_dict)
model = DeeProtein(optionhandler)
model.validate()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--config_json',
type=str,
required=True,
help='Path to the config.JSON')
parser.add_argument(
'--gpu',
type=str,
default=True,
help='Whether to train in gpu context or not '
'(optional). Defaults to True.')
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print('Error, unrecognized flags:', unparsed)
exit(-1)
main()
| mit |
LeahDresd2/Leah | py/openage/convert/drs.py | 46 | 3699 | from . import util
from .util import dbg
from binascii import hexlify
from struct import Struct, unpack_from
#version of the drs file, hardcoded for now
file_version = 57
if file_version == 57:
copyright_size = 40
elif file_version == 59:
copyright_size = 60
#little endian byte-order
endianness = "< "
class DRS:
#struct drs_header {
# char copyright[copyright-size];
# char version[4];
# char ftype[12];
# int table_count;
# int file_offset; //offset of first file
#};
#
drs_header = Struct(endianness + str(copyright_size) + "s 4s 12s i i")
#struct table_info {
# char file_type;
# char file_extension[3]; //reversed extension
# int file_info_offset; //table offset
# int file_count; //number of files in table
#};
drs_table_info = Struct(endianness + "c 3s i i")
#struct file_info {
# int file_id;
# int file_data_offset;
# int file_size;
#};
drs_file_info = Struct(endianness + "i i i")
def __init__(self, fname):
self.files = {} #(extension, id): (data offset, size)
self.fname = fname
fname = util.file_get_path(fname, write = False)
f = util.file_open(fname, binary = True, write = False)
#read header
buf = f.read(DRS.drs_header.size)
self.header = DRS.drs_header.unpack(buf)
dbg("DRS header [%s]" % (fname), 1, push = "drs")
dbg("copyright: %s" % util.zstr(self.header[0]))
dbg("version: %s" % util.zstr(self.header[1]))
dbg("ftype: %s" % util.zstr(self.header[2]))
dbg("table count: %d" % (self.header[3]))
dbg("file offset: %d" % (self.header[4]))
dbg("")
#read table info
table_count = self.header[3]
table_header_buf = f.read(table_count * DRS.drs_table_info.size)
for i in range(table_count):
table_header = DRS.drs_table_info.unpack_from(table_header_buf, i * DRS.drs_table_info.size)
file_type, file_extension, file_info_offset, file_count = table_header
#flip the extension... it's stored that way...
file_extension = file_extension.decode('latin-1').lower()[::-1]
dbg("Table header [%d]" % i, 2, push = "table")
dbg("file type: 0x%s" % hexlify(file_type).decode('utf-8'))
dbg("file extension: %s" % (file_extension))
dbg("file_info_offset: %#08x" % (file_info_offset))
dbg("file_count: %d" % file_count)
dbg("")
f.seek(file_info_offset)
file_info_buf = f.read(file_count * DRS.drs_file_info.size)
for j in range(file_count):
file_header = DRS.drs_file_info.unpack_from(file_info_buf, j * DRS.drs_file_info.size)
file_id, file_data_offset, file_size = file_header
dbg("File info header [%d]" % j, 3, push = "fileinfo")
dbg("file id: %d" % (file_id))
dbg("data offset: %d" % (file_data_offset))
dbg("file size: %d" % (file_size))
dbg("")
self.files[(file_extension, file_id)] = file_data_offset, file_size
dbg(pop = "fileinfo")
dbg(pop = "table")
self.f = f
dbg(pop = "drs")
def get_file_data(self, file_extension, file_id):
file_data_offset, file_size = self.files[(file_extension, file_id)]
self.f.seek(file_data_offset)
return self.f.read(file_size)
def __repr__(self):
return "DRS file (%d tables, %d files)" % (self.header[3], len(self.files))
| gpl-3.0 |
Jorge-Rodriguez/ansible | test/units/modules/storage/netapp/test_netapp_e_ldap.py | 56 | 17659 | # (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import shutil
import tempfile
from ansible.modules.storage.netapp.netapp_e_ldap import Ldap
from units.modules.utils import ModuleTestCase, set_module_args, AnsibleFailJson, AnsibleExitJson
__metaclass__ = type
from units.compat import mock
class LdapTest(ModuleTestCase):
REQUIRED_PARAMS = {
'api_username': 'admin',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1',
'state': 'absent',
}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_ldap.request'
def setUp(self):
super(LdapTest, self).setUp()
self.temp_dir = tempfile.mkdtemp('ansible-test_netapp_e_ldap-')
self.REQUIRED_PARAMS['log_path'] = os.path.join(self.temp_dir, 'debug.log')
def tearDown(self):
super(LdapTest, self).tearDown()
shutil.rmtree(self.temp_dir)
def _make_ldap_instance(self):
self._set_args()
ldap = Ldap()
ldap.base_path = '/'
return ldap
def _set_args(self, **kwargs):
module_args = self.REQUIRED_PARAMS.copy()
module_args.update(kwargs)
set_module_args(module_args)
def test_init_defaults(self):
"""Validate a basic run with required arguments set."""
self._set_args(log_path=None,
state='present',
username='myBindAcct',
password='myBindPass',
server='ldap://example.com:384',
search_base='OU=Users,DC=example,DC=com',
role_mappings={'.*': ['storage.monitor']},
)
ldap = Ldap()
def test_init(self):
"""Validate a basic run with required arguments set."""
self._set_args(log_path=None)
ldap = Ldap()
def test_is_embedded(self):
"""Ensure we can properly detect the type of Web Services instance we're utilizing."""
self._set_args()
result = dict(runningAsProxy=False)
with mock.patch(self.REQ_FUNC, return_value=(200, result)):
ldap = Ldap()
embedded = ldap.is_embedded()
self.assertTrue(embedded)
result = dict(runningAsProxy=True)
with mock.patch(self.REQ_FUNC, return_value=(200, result)):
ldap = Ldap()
embedded = ldap.is_embedded()
self.assertFalse(embedded)
def test_is_embedded_fail(self):
"""Ensure we fail gracefully when fetching the About data."""
self._set_args()
with self.assertRaises(AnsibleFailJson):
with mock.patch(self.REQ_FUNC, side_effect=Exception):
ldap = Ldap()
ldap.is_embedded()
def test_get_full_configuration(self):
self._set_args()
resp = dict(result=None)
with mock.patch(self.REQ_FUNC, return_value=(200, resp)):
ldap = self._make_ldap_instance()
result = ldap.get_full_configuration()
self.assertEqual(resp, result)
def test_get_full_configuration_failure(self):
self._set_args()
resp = dict(result=None)
with self.assertRaises(AnsibleFailJson):
with mock.patch(self.REQ_FUNC, side_effect=Exception):
ldap = self._make_ldap_instance()
ldap.get_full_configuration()
def test_get_configuration(self):
self._set_args()
resp = dict(result=None)
with mock.patch(self.REQ_FUNC, return_value=(200, resp)):
ldap = self._make_ldap_instance()
result = ldap.get_configuration('')
self.assertEqual(resp, result)
with mock.patch(self.REQ_FUNC, return_value=(404, resp)):
ldap = self._make_ldap_instance()
result = ldap.get_configuration('')
self.assertIsNone(result)
def test_clear_configuration(self):
self._set_args()
# No changes are required if the domains are empty
config = dict(ldapDomains=[])
ldap = self._make_ldap_instance()
with mock.patch.object(ldap, 'get_full_configuration', return_value=config):
with mock.patch(self.REQ_FUNC, return_value=(204, None)):
msg, result = ldap.clear_configuration()
self.assertFalse(result)
config = dict(ldapDomains=['abc'])
# When domains exist, we need to clear
ldap = self._make_ldap_instance()
with mock.patch.object(ldap, 'get_full_configuration', return_value=config):
with mock.patch(self.REQ_FUNC, return_value=(204, None)) as req:
msg, result = ldap.clear_configuration()
self.assertTrue(result)
self.assertTrue(req.called)
# Valid check_mode makes no changes
req.reset_mock()
ldap.check_mode = True
msg, result = ldap.clear_configuration()
self.assertTrue(result)
self.assertFalse(req.called)
def test_clear_single_configuration(self):
self._set_args()
# No changes are required if the domains are empty
config = 'abc'
ldap = self._make_ldap_instance()
with mock.patch.object(ldap, 'get_configuration', return_value=config):
with mock.patch(self.REQ_FUNC, return_value=(204, None)) as req:
msg, result = ldap.clear_single_configuration()
self.assertTrue(result)
# Valid check_mode makes no changes
req.reset_mock()
ldap.check_mode = True
msg, result = ldap.clear_single_configuration()
self.assertTrue(result)
self.assertFalse(req.called)
# When domains exist, we need to clear
ldap = self._make_ldap_instance()
with mock.patch.object(ldap, 'get_configuration', return_value=None):
with mock.patch(self.REQ_FUNC, return_value=(204, None)) as req:
msg, result = ldap.clear_single_configuration()
self.assertFalse(result)
self.assertFalse(req.called)
def test_update_configuration(self):
self._set_args()
config = dict(id='abc')
body = dict(id='xyz')
ldap = self._make_ldap_instance()
with mock.patch.object(ldap, 'make_configuration', return_value=body):
with mock.patch.object(ldap, 'get_configuration', return_value=config):
with mock.patch(self.REQ_FUNC, return_value=(200, None)) as req:
msg, result = ldap.update_configuration()
self.assertTrue(result)
# Valid check_mode makes no changes
req.reset_mock()
ldap.check_mode = True
msg, result = ldap.update_configuration()
self.assertTrue(result)
self.assertFalse(req.called)
def test_update(self):
self._set_args()
ldap = self._make_ldap_instance()
with self.assertRaises(AnsibleExitJson):
with mock.patch.object(ldap, 'get_base_path', return_value='/'):
with mock.patch.object(ldap, 'update_configuration', return_value=('', True)) as update:
ldap.ldap = True
msg, result = ldap.update()
self.assertTrue(result)
self.assertTrue(update.called)
def test_update_disable(self):
self._set_args()
ldap = self._make_ldap_instance()
with self.assertRaises(AnsibleExitJson):
with mock.patch.object(ldap, 'get_base_path', return_value='/'):
with mock.patch.object(ldap, 'clear_single_configuration', return_value=('', True)) as update:
ldap.ldap = False
ldap.identifier = 'abc'
msg, result = ldap.update()
self.assertTrue(result)
self.assertTrue(update.called)
def test_update_disable_all(self):
self._set_args()
ldap = self._make_ldap_instance()
with self.assertRaises(AnsibleExitJson):
with mock.patch.object(ldap, 'get_base_path', return_value='/'):
with mock.patch.object(ldap, 'clear_configuration', return_value=('', True)) as update:
ldap.ldap = False
msg, result = ldap.update()
self.assertTrue(result)
self.assertTrue(update.called)
def test_get_configuration_failure(self):
self._set_args()
with self.assertRaises(AnsibleFailJson):
with mock.patch(self.REQ_FUNC, side_effect=Exception):
ldap = self._make_ldap_instance()
ldap.get_configuration('')
# We expect this for any code not in [200, 404]
with self.assertRaises(AnsibleFailJson):
with mock.patch(self.REQ_FUNC, return_value=(401, '')):
ldap = self._make_ldap_instance()
result = ldap.get_configuration('')
self.assertIsNone(result)
def test_make_configuration(self):
"""Validate the make_configuration method that translates Ansible params to the input body"""
data = dict(log_path=None,
state='present',
username='myBindAcct',
password='myBindPass',
server='ldap://example.com:384',
search_base='OU=Users,DC=example,DC=com',
role_mappings={'.*': ['storage.monitor']},
)
self._set_args(**data)
ldap = Ldap()
expected = dict(id='default',
bindLookupUser=dict(user=data['username'],
password=data['password'], ),
groupAttributes=['memberOf'],
ldapUrl=data['server'],
names=['example.com'],
searchBase=data['search_base'],
roleMapCollection=[{"groupRegex": ".*",
"ignoreCase": True,
"name": "storage.monitor"
}
],
userAttribute='sAMAccountName'
)
actual = ldap.make_configuration()
self.maxDiff = None
self.assertEqual(expected, actual)
#
# def test_get_config_on_demand_capable_false(self):
# """Ensure we fail correctly if ASUP is not available on this platform"""
# self._set_args()
#
# expected = dict(asupCapable=True, onDemandCapable=False)
# asup = Asup()
# # Expecting an update
# with self.assertRaisesRegexp(AnsibleFailJson, r"not supported"):
# with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
# asup.get_configuration()
#
# def test_get_config(self):
# """Validate retrieving the ASUP configuration"""
# self._set_args()
#
# expected = dict(asupCapable=True, onDemandCapable=True)
# asup = Asup()
#
# with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
# config = asup.get_configuration()
# self.assertEquals(config, expected)
#
# def test_update_configuration(self):
# """Validate retrieving the ASUP configuration"""
# self._set_args(dict(asup='present'))
#
# expected = dict()
# initial = dict(asupCapable=True,
# asupEnabled=True,
# onDemandEnabled=False,
# remoteDiagsEnabled=False,
# schedule=dict(daysOfWeek=[], dailyMinTime=0, weeklyMinTime=0, dailyMaxTime=24, weeklyMaxTime=24))
# asup = Asup()
#
# with mock.patch(self.REQ_FUNC, return_value=(200, expected)) as req:
# with mock.patch.object(asup, 'get_configuration', return_value=initial):
# updated = asup.update_configuration()
# self.assertTrue(req.called)
# self.assertTrue(updated)
#
# def test_update_configuration_asup_disable(self):
# """Validate retrieving the ASUP configuration"""
# self._set_args(dict(asup='absent'))
#
# expected = dict()
# initial = dict(asupCapable=True,
# asupEnabled=True,
# onDemandEnabled=False,
# remoteDiagsEnabled=False,
# schedule=dict(daysOfWeek=[], dailyMinTime=0, weeklyMinTime=0, dailyMaxTime=24, weeklyMaxTime=24))
# asup = Asup()
#
# with mock.patch(self.REQ_FUNC, return_value=(200, expected)) as req:
# with mock.patch.object(asup, 'get_configuration', return_value=initial):
# updated = asup.update_configuration()
# self.assertTrue(updated)
#
# self.assertTrue(req.called)
#
# # Ensure it was called with the right arguments
# called_with = req.call_args
# body = json.loads(called_with[1]['data'])
# self.assertFalse(body['asupEnabled'])
#
# def test_update_configuration_enable(self):
# """Validate retrieving the ASUP configuration"""
# self._set_args(dict(asup='enabled'))
#
# expected = dict()
# initial = dict(asupCapable=False,
# asupEnabled=False,
# onDemandEnabled=False,
# remoteDiagsEnabled=False,
# schedule=dict(daysOfWeek=[], dailyMinTime=0, weeklyMinTime=0, dailyMaxTime=24, weeklyMaxTime=24))
# asup = Asup()
#
# with mock.patch(self.REQ_FUNC, return_value=(200, expected)) as req:
# with mock.patch.object(asup, 'get_configuration', return_value=initial):
# updated = asup.update_configuration()
# self.assertTrue(updated)
#
# self.assertTrue(req.called)
#
# # Ensure it was called with the right arguments
# called_with = req.call_args
# body = json.loads(called_with[1]['data'])
# self.assertTrue(body['asupEnabled'])
# self.assertTrue(body['onDemandEnabled'])
# self.assertTrue(body['remoteDiagsEnabled'])
#
# def test_update_configuration_request_exception(self):
# """Validate exception handling when request throws an exception."""
# config_response = dict(asupEnabled=True,
# onDemandEnabled=True,
# remoteDiagsEnabled=True,
# schedule=dict(daysOfWeek=[],
# dailyMinTime=0,
# weeklyMinTime=0,
# dailyMaxTime=24,
# weeklyMaxTime=24))
#
# self._set_args(dict(state="enabled"))
# asup = Asup()
# with self.assertRaises(Exception):
# with mock.patch.object(asup, 'get_configuration', return_value=config_response):
# with mock.patch(self.REQ_FUNC, side_effect=Exception):
# asup.update_configuration()
#
# def test_init_schedule(self):
# """Validate schedule correct schedule initialization"""
# self._set_args(dict(state="enabled", active=True, days=["sunday", "monday", "tuesday"], start=20, end=24))
# asup = Asup()
#
# self.assertTrue(asup.asup)
# self.assertEquals(asup.days, ["sunday", "monday", "tuesday"]),
# self.assertEquals(asup.start, 1200)
# self.assertEquals(asup.end, 1439)
#
# def test_init_schedule_invalid(self):
# """Validate updating ASUP with invalid schedule fails test."""
# self._set_args(dict(state="enabled", active=True, start=22, end=20))
# with self.assertRaisesRegexp(AnsibleFailJson, r"start time is invalid"):
# Asup()
#
# def test_init_schedule_days_invalid(self):
# """Validate updating ASUP with invalid schedule fails test."""
# self._set_args(dict(state="enabled", active=True, days=["someday", "thataday", "nonday"]))
# with self.assertRaises(AnsibleFailJson):
# Asup()
#
# def test_update(self):
# """Validate updating ASUP with valid schedule passes"""
# initial = dict(asupCapable=True,
# onDemandCapable=True,
# asupEnabled=True,
# onDemandEnabled=False,
# remoteDiagsEnabled=False,
# schedule=dict(daysOfWeek=[], dailyMinTime=0, weeklyMinTime=0, dailyMaxTime=24, weeklyMaxTime=24))
# self._set_args(dict(state="enabled", active=True, days=["sunday", "monday", "tuesday"], start=10, end=20))
# asup = Asup()
# with self.assertRaisesRegexp(AnsibleExitJson, r"ASUP settings have been updated"):
# with mock.patch(self.REQ_FUNC, return_value=(200, dict(asupCapable=True))):
# with mock.patch.object(asup, "get_configuration", return_value=initial):
# asup.update()
| gpl-3.0 |
cliqz/socorro | socorro/unittest/external/postgresql/test_graphics_devices.py | 11 | 7594 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from nose.plugins.attrib import attr
from nose.tools import eq_, assert_raises
from socorro.external import MissingArgumentError
from socorro.external.postgresql.graphics_devices import GraphicsDevices
from .unittestbase import PostgreSQLTestCase
#==============================================================================
@attr(integration='postgres') # for nosetests
class IntegrationTestGraphicsDevices(PostgreSQLTestCase):
def tearDown(self):
""" Cleanup the database, delete tables and functions """
cursor = self.connection.cursor()
cursor.execute("""
TRUNCATE graphics_device
CASCADE
""")
self.connection.commit()
super(IntegrationTestGraphicsDevices, self).tearDown()
def _insert(self, vendor_hex, adapter_hex,
vendor_name='', adapter_name=''):
assert vendor_hex and adapter_hex
assert vendor_name or adapter_name
sql = """
INSERT INTO graphics_device (
vendor_hex,
adapter_hex,
vendor_name,
adapter_name
) VALUES (%s, %s, %s, %s)
"""
cursor = self.connection.cursor()
params = (vendor_hex, adapter_hex, vendor_name, adapter_name)
cursor.execute(sql, params)
self.connection.commit()
def test_get(self):
"""returning rows by matching vendor_hex and adapter_hex"""
api = GraphicsDevices(config=self.config)
params = {
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
}
res = api.get(**params)
res_expected = {
'hits': [],
'total': 0
}
eq_(res, res_expected)
# insert something similar
self._insert(
'0x1002', '0x0166',
vendor_name='Logitech Inc.',
adapter_name='Unknown Webcam Pro 9000'
)
self._insert(
'0x1002', '0xc064',
vendor_name='Logitech Inc.',
adapter_name='k251d DELL 6-Button mouse'
)
self._insert(
'0x1222', '0x0166',
vendor_name='Chicony Electronics Co.',
adapter_name='Unknown Webcam Pro 9000'
)
# now we should get something
res = api.get(**params)
res_expected = {
'hits': [{
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
'vendor_name': 'Logitech Inc.',
'adapter_name': 'Unknown Webcam Pro 9000'
}],
'total': 1
}
eq_(res, res_expected)
def test_get_missing_arguments(self):
"""on .get() the adapter_hex and the vendor_hex is mandatory"""
api = GraphicsDevices(config=self.config)
assert_raises(
MissingArgumentError,
api.get
)
assert_raises(
MissingArgumentError,
api.get,
adapter_hex='something'
)
assert_raises(
MissingArgumentError,
api.get,
vendor_hex='something'
)
assert_raises(
MissingArgumentError,
api.get,
vendor_hex='something',
adapter_hex='' # empty!
)
assert_raises(
MissingArgumentError,
api.get,
vendor_hex='', # empty!
adapter_hex='something'
)
def test_post_insert(self):
payload = [
{
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
'vendor_name': 'Logitech Inc.',
'adapter_name': 'Unknown Webcam Pro 9000'
},
]
api = GraphicsDevices(config=self.config)
res = api.post(data=json.dumps(payload))
eq_(res, True)
cursor = self.connection.cursor()
cursor.execute("""
select vendor_hex, adapter_hex, vendor_name, adapter_name
from graphics_device
order by vendor_hex, adapter_hex
""")
expect = []
keys = 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name'
for row in cursor.fetchall():
expect.append(dict(zip(keys, row)))
eq_(expect, payload)
def test_post_update(self):
self._insert(
'0x1002', '0x0166',
vendor_name='Logitech Inc.',
adapter_name='Unknown Webcam Pro 9000'
)
payload = [
{
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
'vendor_name': 'Logitech Inc.',
'adapter_name': 'Known Webcam Pro 10000' # the change
}
]
api = GraphicsDevices(config=self.config)
res = api.post(data=json.dumps(payload))
eq_(res, True)
cursor = self.connection.cursor()
cursor.execute("""
select vendor_hex, adapter_hex, vendor_name, adapter_name
from graphics_device
order by vendor_hex, adapter_hex
""")
expect = []
keys = 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name'
for row in cursor.fetchall():
expect.append(dict(zip(keys, row)))
eq_(expect, payload)
def test_post_upsert(self):
"""on .post() every item you send in the payload causes an upsert"""
# first, insert something that we're going have to do nothing with
# or do an "upsert"
self._insert(
'0x1002', '0x0166',
vendor_name='Logitech Inc.',
adapter_name='Unknown Webcam Pro 9000'
)
self._insert(
'0x1222', '0x0166',
vendor_name='Chicony Electronics Co.',
adapter_name='Unknown Webcam Pro 9000'
)
# note, this is conveniently sorted by
# vendor_hex followed by adapter_hex
payload = [
{
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
'vendor_name': 'Logitech Inc.',
'adapter_name': 'Unknown Webcam Pro 9000'
},
{
'vendor_hex': '0x1222',
'adapter_hex': '0x0166',
'vendor_name': 'Chicony Electronics Co.',
'adapter_name': 'Something else'
},
{
'vendor_hex': '0x1333',
'adapter_hex': '0x0177',
'vendor_name': 'IBM',
'adapter_name': ''
},
]
api = GraphicsDevices(config=self.config)
res = api.post(data=json.dumps(payload))
eq_(res, True)
cursor = self.connection.cursor()
cursor.execute("""
select vendor_hex, adapter_hex, vendor_name, adapter_name
from graphics_device
order by vendor_hex, adapter_hex
""")
expect = []
keys = 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name'
for row in cursor.fetchall():
expect.append(dict(zip(keys, row)))
eq_(expect, payload)
def test_post_fail(self):
payload = [
{
'rubbish': 'Crap'
},
]
api = GraphicsDevices(config=self.config)
res = api.post(data=json.dumps(payload))
eq_(res, False)
| mpl-2.0 |
repotvsupertuga/tvsupertuga.repository | instal/script.module.liveresolver/lib/liveresolver/resolvers/filmon.py | 10 | 1483 | # -*- coding: utf-8 -*-
import re,urlparse,json,xbmcgui
from liveresolver.modules import client
from liveresolver.modules.log_utils import log
def resolve(url):
try:
if '/vod/' in url:
url = re.compile('/(\d+)').findall(url)[-1]
url = 'http://www.filmon.com/vod/info/%s' % url
elif '/tv/' in url:
url = url.replace('/tv/', '/channel/')
elif not '/channel/' in url:
raise Exception()
headers = {'X-Requested-With': 'XMLHttpRequest'}
log('Filmon: Getting cookie...')
cookie = client.request(url, output='cookie')
log('Filmon: Getting channel id...')
cid = client.request(url, headers=headers)
cid = json.loads(cid)['id']
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
url = 'http://www.filmon.com/ajax/getChannelInfo?channel_id=%s' % cid
log('Filmon: Getting streams...')
result = client.request(url, cookie=cookie, headers=headers)
result = json.loads(result)
try:
result = result['streams']
except:
result = result['data']['streams']
result = [i[1] for i in result.items()]
log('Filmon: Selecting stream url...')
url = [(i['url'], int(i['watch-timeout'])) for i in result]
url = [i for i in url if '.m3u8' in i[0]]
url.sort()
url = url[-1][0]
return url
except:
return | gpl-2.0 |
barbuza/django | django/contrib/gis/db/backends/oracle/operations.py | 307 | 9866 | """
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.db.backends.oracle.base import Database
from django.db.backends.oracle.operations import DatabaseOperations
from django.utils import six
DEFAULT_TOLERANCE = '0.05'
class SDOOperator(SpatialOperator):
sql_template = "%(func)s(%(lhs)s, %(rhs)s) = 'TRUE'"
class SDODistance(SpatialOperator):
sql_template = "SDO_GEOM.SDO_DISTANCE(%%(lhs)s, %%(rhs)s, %s) %%(op)s %%%%s" % DEFAULT_TOLERANCE
class SDODWithin(SpatialOperator):
sql_template = "SDO_WITHIN_DISTANCE(%(lhs)s, %(rhs)s, %%s) = 'TRUE'"
class SDODisjoint(SpatialOperator):
sql_template = "SDO_GEOM.RELATE(%%(lhs)s, 'DISJOINT', %%(rhs)s, %s) = 'DISJOINT'" % DEFAULT_TOLERANCE
class SDORelate(SpatialOperator):
sql_template = "SDO_RELATE(%(lhs)s, %(rhs)s, 'mask=%(mask)s') = 'TRUE'"
def check_relate_argument(self, arg):
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
if not isinstance(arg, six.string_types) or not mask_regex.match(arg):
raise ValueError('Invalid SDO_RELATE mask: "%s"' % arg)
def as_sql(self, connection, lookup, template_params, sql_params):
template_params['mask'] = sql_params.pop()
return super(SDORelate, self).as_sql(connection, lookup, template_params, sql_params)
class OracleOperations(BaseSpatialOperations, DatabaseOperations):
name = 'oracle'
oracle = True
disallowed_aggregates = (aggregates.Collect, aggregates.Extent3D, aggregates.MakeLine)
Adapter = OracleSpatialAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'SDO_GEOM.SDO_AREA'
gml = 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent = 'SDO_AGGR_MBR'
intersection = 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_geom = 'SDO_UTIL.GETNUMELEM'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
gis_operators = {
'contains': SDOOperator(func='SDO_CONTAINS'),
'coveredby': SDOOperator(func='SDO_COVEREDBY'),
'covers': SDOOperator(func='SDO_COVERS'),
'disjoint': SDODisjoint(),
'intersects': SDOOperator(func='SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'equals': SDOOperator(func='SDO_EQUAL'),
'exact': SDOOperator(func='SDO_EQUAL'),
'overlaps': SDOOperator(func='SDO_OVERLAPS'),
'same_as': SDOOperator(func='SDO_EQUAL'),
'relate': SDORelate(), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches': SDOOperator(func='SDO_TOUCH'),
'within': SDOOperator(func='SDO_INSIDE'),
'distance_gt': SDODistance(op='>'),
'distance_gte': SDODistance(op='>='),
'distance_lt': SDODistance(op='<'),
'distance_lte': SDODistance(op='<='),
'dwithin': SDODWithin(),
}
truncate_params = {'relate': None}
def geo_quote_name(self, name):
return super(OracleOperations, self).geo_quote_name(name).upper()
def get_db_converters(self, expression):
converters = super(OracleOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
geometry_fields = (
'PointField', 'GeometryField', 'LineStringField',
'PolygonField', 'MultiPointField', 'MultiLineStringField',
'MultiPolygonField', 'GeometryCollectionField', 'GeomField',
'GMLField',
)
if internal_type in geometry_fields:
converters.append(self.convert_textfield_value)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
def convert_extent(self, clob, srid):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read(), srid)
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def convert_geom(self, value, geo_field):
if value:
if isinstance(value, Database.LOB):
value = value.read()
return Geometry(value, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL name.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import OracleGeometryColumns
return OracleGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys
return OracleSpatialRefSys
def modify_insert_params(self, placeholders, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888
"""
# This code doesn't work for bulk insert cases.
assert len(placeholders) == 1
return [[param for pholder, param
in six.moves.zip(placeholders[0], params[0]) if pholder != 'NULL'], ]
| bsd-3-clause |
jeremiahyan/odoo | addons/sale_timesheet/tests/test_project_overview.py | 1 | 8777 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sale_timesheet.tests.test_reporting import TestReporting
from odoo.tools import float_compare
from odoo.tests import tagged
@tagged('-at_install', 'post_install')
class TestSaleProject(TestReporting):
def test_project_overview_by_project(self):
rounding = self.env.company.currency_id.rounding
so_line_deliver_global_project = self.env['sale.order.line'].create({
'name': self.product_delivery_timesheet2.name,
'product_id': self.product_delivery_timesheet2.id,
'product_uom_qty': 50,
'product_uom': self.product_delivery_timesheet2.uom_id.id,
'price_unit': self.product_delivery_timesheet2.list_price,
'order_id': self.sale_order_2.id,
})
self.sale_order_2.action_confirm()
project_so = self.so_line_order_project.project_id
# log timesheet for billable time
timesheet1 = self._log_timesheet_manager(project_so, 10, so_line_deliver_global_project.task_id)
task_so = self.so_line_order_project.task_id
# logged some timesheets: on project only, then on tasks with different employees
timesheet2 = self._log_timesheet_user(project_so, 2)
timesheet3 = self._log_timesheet_user(project_so, 3, task_so)
timesheet4 = self._log_timesheet_manager(project_so, 1, task_so)
# create a task which is not linked to sales order and fill non-billable timesheet
task = self.env['project.task'].create({
'name': 'Task',
'project_id': project_so.id,
'allow_billable': False,
'sale_line_id': False
})
timesheet5 = self._log_timesheet_user(project_so, 5, task)
# invoice the Sales Order SO2
context = {
"active_model": 'sale.order',
"active_ids": [self.sale_order_2.id],
"active_id": self.sale_order_2.id,
'open_invoices': True,
}
payment = self.env['sale.advance.payment.inv'].create({
'advance_payment_method': 'delivered',
})
action_invoice = payment.with_context(context).create_invoices()
invoice = self.env['account.move'].browse(action_invoice['res_id'])
invoice.action_post()
# simulate the auto creation of the SO line for expense, like we confirm a vendor bill.
so_line_expense = self.env['sale.order.line'].create({
'name': self.product_expense.name,
'product_id': self.product_expense.id,
'product_uom_qty': 0.0,
'product_uom': self.product_expense.uom_id.id,
'price_unit': self.product_expense.list_price, # reinvoice at sales price
'order_id': self.sale_order_2.id,
'is_expense': True,
})
expense = self.env['account.analytic.line'].create({
'name': 'expense on project_so',
'account_id': project_so.analytic_account_id.id,
'so_line': so_line_expense.id,
'employee_id': self.employee_user.id,
'unit_amount': 4,
'amount': 4 * self.product_expense.list_price * -1,
'product_id': self.product_expense.id,
'product_uom_id': self.product_expense.uom_id.id,
})
other_revenues = self.env['account.analytic.line'].create({
'name': 'pther revenues on project_so',
'account_id': project_so.analytic_account_id.id,
'employee_id': self.employee_user.id,
'unit_amount': 1,
'amount': self.product_expense.list_price,
'product_id': self.product_expense.id,
'product_uom_id': self.product_expense.uom_id.id,
})
view_id = self.env.ref('sale_timesheet.project_timesheet_action_client_timesheet_plan').id
vals = self.env['project.project']._qweb_prepare_qcontext(view_id, [['id', '=', project_so.id]])
dashboard_value = timesheet2.unit_amount + timesheet3.unit_amount + timesheet4.unit_amount + timesheet5.unit_amount + timesheet1.unit_amount
project_so_timesheet_sold_unit = timesheet3.unit_amount + timesheet4.unit_amount
project_rate_non_billable = timesheet5.unit_amount / dashboard_value * 100
project_rate_non_billable_project = timesheet2.unit_amount / dashboard_value * 100
project_rate_billable_time = timesheet1.unit_amount / dashboard_value * 100
project_rate_billable_fixed = project_so_timesheet_sold_unit / dashboard_value * 100
project_rate_total = project_rate_non_billable + project_rate_non_billable_project + project_rate_billable_time + project_rate_billable_fixed
project_invoiced = self.so_line_order_project.price_unit * self.so_line_order_project.product_uom_qty * timesheet1.unit_amount
project_timesheet_cost = timesheet2.amount + timesheet3.amount + timesheet4.amount + timesheet5.amount + timesheet1.amount
self.assertEqual(float_compare(vals['dashboard']['time']['non_billable'], timesheet5.unit_amount, precision_rounding=rounding), 0, "The hours non-billable should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['non_billable_project'], timesheet2.unit_amount, precision_rounding=rounding), 0, "The hours non-billable-project should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['billable_time'], timesheet1.unit_amount, precision_rounding=rounding), 0, "The hours billable-time should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['billable_fixed'], project_so_timesheet_sold_unit, precision_rounding=rounding), 0, "The hours billable-fixed should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['total'], dashboard_value, precision_rounding=rounding), 0, "The total hours should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['non_billable'], project_rate_non_billable, precision_rounding=rounding), 0, "The rate non-billable should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['non_billable_project'], project_rate_non_billable_project, precision_rounding=rounding), 0, "The rate non-billable-project should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['billable_time'], project_rate_billable_time, precision_rounding=rounding), 0, "The rate billable-time should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['billable_fixed'], project_rate_billable_fixed, precision_rounding=rounding), 0, "The rate billable-fixed should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['total'], project_rate_total, precision_rounding=rounding), 0, "The total rates should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['invoiced'], project_invoiced, precision_rounding=rounding), 0, "The amount invoiced should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['cost'], project_timesheet_cost, precision_rounding=rounding), 0, "The amount cost should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['expense_cost'], expense.amount, precision_rounding=rounding), 0, "The amount expense-cost should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['other_revenues'], other_revenues.amount, precision_rounding=rounding), 0, "The amount of the other revenues should be equal to the created other_revenues account analytic line")
self.assertEqual(float_compare(vals['dashboard']['profit']['total'], project_invoiced + project_timesheet_cost + expense.amount + other_revenues.amount, precision_rounding=rounding), 0, "The total amount should be the sum of the SO2 line and the created other_revenues account analytic line")
self.assertEqual(float_compare(vals['repartition_employee_max'], 11.0, precision_rounding=rounding), 0, "The amount of repartition-employee-max should be the one from SO2 line")
| gpl-3.0 |
EqAfrica/machinekit | nosetests/test_netcmd.py | 1 | 3441 | #!/usr/bin/env python
from nose import with_setup
from machinekit.nosetests.realtime import setup_module,teardown_module
from machinekit.nosetests.support import fnear
from machinekit import hal
import os
def test_component_creation():
global c1,c2
c1 = hal.Component("c1")
c1.newpin("s32out", hal.HAL_S32, hal.HAL_OUT, init=42)
c1.newpin("s32in", hal.HAL_S32, hal.HAL_IN)
c1.newpin("s32io", hal.HAL_S32, hal.HAL_IO)
c1.newpin("floatout", hal.HAL_FLOAT, hal.HAL_OUT, init=42)
c1.newpin("floatin", hal.HAL_FLOAT, hal.HAL_IN)
c1.newpin("floatio", hal.HAL_FLOAT, hal.HAL_IO)
c1.ready()
c2 = hal.Component("c2")
c2.newpin("s32out", hal.HAL_S32, hal.HAL_OUT, init=4711)
c2.newpin("s32in", hal.HAL_S32, hal.HAL_IN)
c2.newpin("s32io", hal.HAL_S32, hal.HAL_IO)
c2.newpin("floatout", hal.HAL_FLOAT, hal.HAL_OUT, init=4711)
c2.newpin("floatin", hal.HAL_FLOAT, hal.HAL_IN)
c2.newpin("floatio", hal.HAL_FLOAT, hal.HAL_IO)
c2.ready()
def test_net_existing_signal_with_bad_type():
hal.new_sig("f", hal.HAL_FLOAT)
try:
hal.net("f", "c1.s32out")
raise "should not happen"
except TypeError:
pass
del hal.signals["f"]
def test_net_match_nonexistant_signals():
try:
hal.net("nosuchsig", "c1.s32out","c2.s32out")
raise "should not happen"
except TypeError:
pass
def test_net_pin2pin():
try:
hal.net("c1.s32out","c2.s32out")
#TypeError: net: 'c1.s32out' is a pin - first argument must be a signal name
raise "should not happen"
except TypeError:
pass
def test_net_existing_signal():
hal.new_sig("s32", hal.HAL_S32)
assert hal.pins["c1.s32out"].linked == False
hal.net("s32", "c1.s32out")
assert hal.pins["c1.s32out"].linked == True
hal.new_sig("s32too", hal.HAL_S32)
try:
hal.net("s32too", "c1.s32out")
raise "should not happen"
except RuntimeError:
pass
del hal.signals["s32"]
def test_new_sig():
floatsig1 = hal.new_sig("floatsig1", hal.HAL_FLOAT)
try:
hal.new_sig("floatsig1", hal.HAL_FLOAT)
# RuntimeError: Failed to create signal floatsig1: HAL: ERROR: duplicate signal 'floatsig1'
raise "should not happen"
except RuntimeError:
pass
try:
hal.new_sig(32423 *32432, hal.HAL_FLOAT)
raise "should not happen"
except TypeError:
pass
try:
hal.new_sig(None, hal.HAL_FLOAT)
raise "should not happen"
except TypeError:
pass
try:
hal.new_sig("badtype", 1234)
raise "should not happen"
except TypeError:
pass
def test_check_net_args():
try:
hal.net()
except TypeError:
pass
try:
hal.net(None, "c1.s32out")
except TypeError:
pass
try:
hal.net("c1.s32out")
# TypeError: net: 'c1.s32out' is a pin - first argument must be a signal name
except TypeError:
pass
assert "noexiste" not in hal.signals
hal.net("noexiste", "c1.s32out")
assert "noexiste" in hal.signals
ne = hal.signals["noexiste"]
assert ne.writers == 1
assert ne.readers == 0
assert ne.bidirs == 0
try:
hal.net("floatsig1", "c1.s32out")
raise "should not happen"
except RuntimeError:
pass
(lambda s=__import__('signal'):
s.signal(s.SIGTERM, s.SIG_IGN))()
| lgpl-2.1 |
dongjoon-hyun/tensorflow | tensorflow/python/kernel_tests/weights_broadcast_test.py | 8 | 9719 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for broadcast rules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import test
def _test_values(shape):
return np.reshape(np.cumsum(np.ones(shape), dtype=np.int32), newshape=shape)
class AssertBroadcastableTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values):
static_op = weights_broadcast_ops.assert_broadcastable(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
static_op.run()
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
def testScalar(self):
self._test_valid(weights=5, values=_test_values((3, 2, 4)))
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)))
def test1x1xN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)),
values=_test_values((3, 2, 4)))
def test1xNx1(self):
self._test_valid(
weights=np.asarray((5, 11)).reshape((1, 2, 1)),
values=_test_values((3, 2, 4)))
def test1xNxN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)),
values=_test_values((3, 2, 4)))
def testNx1x1(self):
self._test_valid(
weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)),
values=_test_values((3, 2, 4)))
def testNx1xN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4)),
values=_test_values((3, 2, 4)))
def testNxNxN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4)),
values=_test_values((3, 2, 4)))
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.assert_broadcastable(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
class BroadcastWeightsTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values, expected):
static_op = weights_broadcast_ops.broadcast_weights(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
self.assertAllEqual(expected, static_op.eval())
self.assertAllEqual(expected, dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
}))
def testScalar(self):
self._test_valid(
weights=5,
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
def test1x1xN(self):
weights = np.asarray((5, 7, 11, 3)).reshape((1, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 2, 1)))
def test1xNx1(self):
weights = np.asarray((5, 11)).reshape((1, 2, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 4)))
def test1xNxN(self):
weights = np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 1)))
def testNx1x1(self):
weights = np.asarray((5, 7, 11)).reshape((3, 1, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 4)))
def testNx1xN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 1)))
def testNxNxN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4))
self._test_valid(
weights=weights, values=_test_values((3, 2, 4)), expected=weights)
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.broadcast_weights(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
if __name__ == '__main__':
test.main()
| apache-2.0 |
sarvex/tensorflow | tensorflow/python/ops/clip_ops.py | 10 | 16102 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for clipping (gradient, weight) tensors to min/max values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
@tf_export("clip_by_value")
@dispatch.add_dispatch_support
def clip_by_value(t, clip_value_min, clip_value_max,
name=None):
"""Clips tensor values to a specified min and max.
Given a tensor `t`, this operation returns a tensor of the same type and
shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
Any values less than `clip_value_min` are set to `clip_value_min`. Any values
greater than `clip_value_max` are set to `clip_value_max`.
Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for
correct results.
For example:
Basic usage passes a scalar as the min and max value.
>>> t = tf.constant([[-10., -1., 0.], [0., 2., 10.]])
>>> t2 = tf.clip_by_value(t, clip_value_min=-1, clip_value_max=1)
>>> t2.numpy()
array([[-1., -1., 0.],
[ 0., 1., 1.]], dtype=float32)
The min and max can be the same size as `t`, or broadcastable to that size.
>>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])
>>> clip_min = [[2],[1]]
>>> t3 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)
>>> t3.numpy()
array([[ 2., 2., 10.],
[ 1., 1., 10.]], dtype=float32)
Broadcasting fails, intentionally, if you would expand the dimensions of `t`
>>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])
>>> clip_min = [[[2, 1]]] # Has a third axis
>>> t4 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)
Traceback (most recent call last):
...
InvalidArgumentError: Incompatible shapes: [2,3] vs. [1,1,2]
It throws a `TypeError` if you try to clip an `int` to a `float` value
(`tf.cast` the input to `float` first).
>>> t = tf.constant([[1, 2], [3, 4]], dtype=tf.int32)
>>> t5 = tf.clip_by_value(t, clip_value_min=-3.1, clip_value_max=3.1)
Traceback (most recent call last):
...
TypeError: Cannot convert ...
Args:
t: A `Tensor` or `IndexedSlices`.
clip_value_min: The minimum value to clip to. A scalar `Tensor` or one that
is broadcastable to the shape of `t`.
clip_value_max: The maximum value to clip to. A scalar `Tensor` or one that
is broadcastable to the shape of `t`.
name: A name for the operation (optional).
Returns:
A clipped `Tensor` or `IndexedSlices`.
Raises:
`tf.errors.InvalidArgumentError`: If the clip tensors would trigger array
broadcasting that would make the returned tensor larger than the input.
TypeError: If dtype of the input is `int32` and dtype of
the `clip_value_min` or `clip_value_max` is `float32`
"""
with ops.name_scope(name, "clip_by_value",
[t, clip_value_min, clip_value_max]) as name:
values = ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t, name="t")
# Go through list of tensors, for each value in each tensor clip
t_min = math_ops.minimum(values, clip_value_max)
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
values.shape.assert_is_compatible_with(t_min.shape)
t_max = math_ops.maximum(t_min, clip_value_min, name=name)
values.shape.assert_is_compatible_with(t_max.shape)
if isinstance(t, ops.IndexedSlices):
t_max = ops.IndexedSlices(t_max, t.indices, t.dense_shape)
return t_max
# TODO(scottzhu): switch to use new implementation in 2 weeks.
# return gen_math_ops.clip_by_value(
# t, clip_value_min, clip_value_max, name=name)
# TODO(scottzhu): switch to use new implementation in 2 weeks.
# @ops.RegisterGradient("ClipByValue")
def _clip_by_value_grad(op, grad):
"""Returns grad of clip_by_value."""
x = op.inputs[0]
y = op.inputs[1]
z = op.inputs[2]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
sz = array_ops.shape(z)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xymask = math_ops.less(x, y)
xzmask = math_ops.greater(x, z)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
rx, rz = gen_array_ops.broadcast_gradient_args(sx, sz)
xgrad = array_ops.where(math_ops.logical_or(xymask, xzmask), zeros, grad)
ygrad = array_ops.where(xymask, grad, zeros)
zgrad = array_ops.where(xzmask, grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
gz = array_ops.reshape(math_ops.reduce_sum(zgrad, rz), sz)
return (gx, gy, gz)
@tf_export("clip_by_norm")
@dispatch.add_dispatch_support
def clip_by_norm(t, clip_norm, axes=None, name=None):
"""Clips tensor values to a maximum L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,
along the dimensions given in `axes`. Specifically, in the default case
where all dimensions are used for calculation, if the L2-norm of `t` is
already less than or equal to `clip_norm`, then `t` is not modified. If
the L2-norm is greater than `clip_norm`, then this operation returns a
tensor of the same type and shape as `t` with its values set to:
`t * clip_norm / l2norm(t)`
In this case, the L2-norm of the output tensor is `clip_norm`.
As another example, if `t` is a matrix and `axes == [1]`, then each row
of the output will have L2-norm less than or equal to `clip_norm`. If
`axes == [0]` instead, each column of the output will be clipped.
Code example:
>>> some_nums = tf.constant([[1, 2, 3, 4, 5]], dtype=tf.float32)
>>> tf.clip_by_norm(some_nums, 2.0).numpy()
array([[0.26967996, 0.5393599 , 0.80903983, 1.0787199 , 1.3483998 ]],
dtype=float32)
This operation is typically used to clip gradients before applying them with
an optimizer. Most gradient data is a collection of different shaped tensors
for different parts of the model. Thus, this is a common usage:
```
# Get your gradients after training
loss_value, grads = grad(model, features, labels)
# Apply some clipping
grads = [tf.clip_by_norm(g, norm)
for g in grads]
# Continue on with training
optimizer.apply_gradients(grads)
```
Args:
t: A `Tensor` or `IndexedSlices`. This must be a floating point type.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value, also
floating point
axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions
to use for computing the L2-norm. If `None` (the default), uses all
dimensions.
name: A name for the operation (optional).
Returns:
A clipped `Tensor` or `IndexedSlices`.
Raises:
ValueError: If the clip_norm tensor is not a 0-D scalar tensor.
TypeError: If dtype of the input is not a floating point or
complex type.
"""
with ops.name_scope(name, "clip_by_norm", [t, clip_norm]) as name:
values = ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t, name="t")
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2sum = math_ops.reduce_sum(values * values, axes, keepdims=True)
pred = l2sum > 0
# Two-tap tf.where trick to bypass NaN gradients
l2sum_safe = array_ops.where(pred, l2sum, array_ops.ones_like(l2sum))
l2norm = array_ops.where(pred, math_ops.sqrt(l2sum_safe), l2sum)
intermediate = values * clip_norm
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
values.shape.assert_is_compatible_with(intermediate.shape)
values_clip = array_ops.identity(
intermediate / math_ops.maximum(l2norm, clip_norm), name=name)
if isinstance(t, ops.IndexedSlices):
return ops.IndexedSlices(values_clip, t.indices, t.dense_shape)
return values_clip
@tf_export("linalg.global_norm", v1=["linalg.global_norm", "global_norm"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("global_norm")
def global_norm(t_list, name=None):
"""Computes the global norm of multiple tensors.
Given a tuple or list of tensors `t_list`, this operation returns the
global norm of the elements in all tensors in `t_list`. The global norm is
computed as:
`global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`
Any entries in `t_list` that are of type None are ignored.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
name: A name for the operation (optional).
Returns:
A 0-D (scalar) `Tensor` of type `float`.
Raises:
TypeError: If `t_list` is not a sequence.
"""
if (not isinstance(t_list, collections_abc.Sequence) or
isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
with ops.name_scope(name, "global_norm", t_list) as name:
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
half_squared_norms = []
for v in values:
if v is not None:
with ops.colocate_with(v):
half_squared_norms.append(gen_nn_ops.l2_loss(v))
half_squared_norm = math_ops.reduce_sum(array_ops.stack(half_squared_norms))
norm = math_ops.sqrt(
half_squared_norm *
constant_op.constant(2.0, dtype=half_squared_norm.dtype),
name="global_norm")
return norm
@tf_export("clip_by_global_norm")
@dispatch.add_dispatch_support
def clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):
"""Clips values of multiple tensors by the ratio of the sum of their norms.
Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,
this operation returns a list of clipped tensors `list_clipped`
and the global norm (`global_norm`) of all tensors in `t_list`. Optionally,
if you've already computed the global norm for `t_list`, you can specify
the global norm with `use_norm`.
To perform the clipping, the values `t_list[i]` are set to:
t_list[i] * clip_norm / max(global_norm, clip_norm)
where:
global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))
If `clip_norm > global_norm` then the entries in `t_list` remain as they are,
otherwise they're all shrunk by the global ratio.
If `global_norm == infinity` then the entries in `t_list` are all set to `NaN`
to signal that an error occurred.
Any of the entries of `t_list` that are of type `None` are ignored.
This is the correct way to perform gradient clipping (Pascanu et al., 2012).
However, it is slower than `clip_by_norm()` because all the parameters must be
ready before the clipping operation can be performed.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.
use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global
norm to use. If not provided, `global_norm()` is used to compute the norm.
name: A name for the operation (optional).
Returns:
list_clipped: A list of `Tensors` of the same type as `list_t`.
global_norm: A 0-D (scalar) `Tensor` representing the global norm.
Raises:
TypeError: If `t_list` is not a sequence.
References:
On the difficulty of training Recurrent Neural Networks:
[Pascanu et al., 2012](http://proceedings.mlr.press/v28/pascanu13.html)
([pdf](http://proceedings.mlr.press/v28/pascanu13.pdf))
"""
if (not isinstance(t_list, collections_abc.Sequence) or
isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
if use_norm is None:
use_norm = global_norm(t_list, name)
with ops.name_scope(name, "clip_by_global_norm",
t_list + [clip_norm]) as name:
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale_for_finite = clip_norm * math_ops.minimum(
1.0 / use_norm,
constant_op.constant(1.0, dtype=use_norm.dtype) / clip_norm)
# If use_norm is any finite number, this is a no-op. For inf/-inf/NaN,
# this will make scale NaN.
scale = scale_for_finite + (use_norm - use_norm)
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
values_clipped = []
for i, v in enumerate(values):
if v is None:
values_clipped.append(None)
else:
with ops.colocate_with(v):
values_clipped.append(
array_ops.identity(v * scale, name="%s_%d" % (name, i)))
list_clipped = [
ops.IndexedSlices(c_v, t.indices, t.dense_shape)
if isinstance(t, ops.IndexedSlices)
else c_v
for (c_v, t) in zip(values_clipped, t_list)]
return list_clipped, use_norm
@deprecation.deprecated(
date=None,
instructions="clip_by_average_norm is deprecated in TensorFlow 2.0. Please "
"use clip_by_norm(t, clip_norm * tf.cast(tf.size(t), tf.float32), name) "
"instead.")
@tf_export(v1=["clip_by_average_norm"])
@dispatch.add_dispatch_support
def clip_by_average_norm(t, clip_norm, name=None):
"""Clips tensor values to a maximum average L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its average L2-norm is less than or equal to
`clip_norm`. Specifically, if the average L2-norm is already less than or
equal to `clip_norm`, then `t` is not modified. If the average L2-norm is
greater than `clip_norm`, then this operation returns a tensor of the same
type and shape as `t` with its values set to:
`t * clip_norm / l2norm_avg(t)`
In this case, the average L2-norm of the output tensor is `clip_norm`.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_average_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm per element, clip elements by ratio of clip_norm to
# L2-norm per element
n_element = math_ops.cast(array_ops.size(t), dtypes.float32)
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))
tclip = array_ops.identity(
t * clip_norm * math_ops.minimum(
l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm),
name=name)
return tclip
| apache-2.0 |
joke2k/faker | setup.py | 1 | 2446 | #!/usr/bin/env python
import os
from pathlib import Path
from setuptools import find_packages, setup
here = Path(__file__).resolve().parent
README = (here / 'README.rst').read_text(encoding='utf-8')
VERSION = (here / 'VERSION').read_text(encoding='utf-8').strip()
excluded_packages = ["docs", "tests", "tests.*"]
if not os.environ.get('READTHEDOCS', False):
excluded_packages += ["faker.sphinx", "faker.sphinx.*"]
# this module can be zip-safe if the zipimporter implements iter_modules or if
# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
try:
import pkgutil
import zipimport
zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
except AttributeError:
zip_safe = False
setup(
name='Faker',
version=VERSION,
description="Faker is a Python package that generates fake data for you.",
long_description=README,
entry_points={
'console_scripts': ['faker=faker.cli:execute_from_command_line'],
'pytest11': ['faker = faker.contrib.pytest.plugin'],
},
classifiers=[
# See https://pypi.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
keywords='faker fixtures data test mock generator',
author='joke2k',
author_email='joke2k@gmail.com',
url='https://github.com/joke2k/faker',
license='MIT License',
packages=find_packages(exclude=excluded_packages),
platforms=["any"],
zip_safe=zip_safe,
python_requires=">=3.6",
install_requires=[
"python-dateutil>=2.4",
"text-unidecode==1.3",
],
)
| mit |
prakxys/flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/jinja2/tests.py | 638 | 3444 | # -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, mapping_types
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, mapping_types)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, (int, float, complex))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'mapping': test_mapping,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped
}
| apache-2.0 |
ar7z1/ansible | contrib/inventory/stacki.py | 27 | 6239 | #!/usr/bin/env python
# Copyright (c) 2016, Hugh Ma <hugh.ma@flextronics.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# Stacki inventory script
# Configure stacki.yml with proper auth information and place in the following:
# - ../inventory/stacki.yml
# - /etc/stacki/stacki.yml
# - /etc/ansible/stacki.yml
# The stacki.yml file can contain entries for authentication information
# regarding the Stacki front-end node.
#
# use_hostnames uses hostname rather than interface ip as connection
#
#
"""
Example Usage:
List Stacki Nodes
$ ./stack.py --list
Example Configuration:
---
stacki:
auth:
stacki_user: admin
stacki_password: abc12345678910
stacki_endpoint: http://192.168.200.50/stack
use_hostnames: false
"""
import argparse
import os
import sys
import yaml
from distutils.version import StrictVersion
import json
try:
import requests
except:
sys.exit('requests package is required for this inventory script')
CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']
def stack_auth(params):
endpoint = params['stacki_endpoint']
auth_creds = {'USERNAME': params['stacki_user'],
'PASSWORD': params['stacki_password']}
client = requests.session()
client.get(endpoint)
init_csrf = client.cookies['csrftoken']
header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf,
'Content-type': 'application/x-www-form-urlencoded'}
login_endpoint = endpoint + "/login"
login_req = client.post(login_endpoint, data=auth_creds, headers=header)
csrftoken = login_req.cookies['csrftoken']
sessionid = login_req.cookies['sessionid']
auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid)
return client, auth_creds
def stack_build_header(auth_creds):
header = {'csrftoken': auth_creds['CSRFTOKEN'],
'X-CSRFToken': auth_creds['CSRFTOKEN'],
'sessionid': auth_creds['SESSIONID'],
'Content-type': 'application/json'}
return header
def stack_host_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
headers=header)
return json.loads(stack_r.json())
def stack_net_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
headers=header)
return json.loads(stack_r.json())
def format_meta(hostdata, intfdata, config):
use_hostnames = config['use_hostnames']
meta = dict(all=dict(hosts=list()),
frontends=dict(hosts=list()),
backends=dict(hosts=list()),
_meta=dict(hostvars=dict()))
# Iterate through list of dicts of hosts and remove
# environment key as it causes conflicts
for host in hostdata:
del host['environment']
meta['_meta']['hostvars'][host['host']] = host
meta['_meta']['hostvars'][host['host']]['interfaces'] = list()
# @bbyhuy to improve readability in next iteration
for intf in intfdata:
if intf['host'] in meta['_meta']['hostvars']:
meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf)
if intf['default'] is True:
meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip']
if not use_hostnames:
meta['all']['hosts'].append(intf['ip'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['ip'])
else:
meta['frontends']['hosts'].append(intf['ip'])
else:
meta['all']['hosts'].append(intf['host'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['host'])
else:
meta['frontends']['hosts'].append(intf['host'])
return meta
def parse_args():
parser = argparse.ArgumentParser(description='Stacki Inventory Module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active hosts')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
sys.exit('requests>=2.4.3 is required for this inventory script')
try:
config_files = CONFIG_FILES
config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml')
config = None
for cfg_file in config_files:
if os.path.isfile(cfg_file):
stream = open(cfg_file, 'r')
config = yaml.safe_load(stream)
break
if not config:
sys.stderr.write("No config file found at {0}\n".format(config_files))
sys.exit(1)
client, auth_creds = stack_auth(config['stacki']['auth'])
header = stack_build_header(auth_creds)
host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client)
intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client)
final_meta = format_meta(host_list, intf_list, config)
print(json.dumps(final_meta, indent=4))
except Exception as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
daanwierstra/pybrain | pybrain/rl/learners/search/incrementalcomplexity/incrementalcomplexity.py | 1 | 1767 | __author__ = 'Tom Schaul, tom@idsia.ch'
# TODO: inheritance!
class IncrementalComplexitySearch(object):
""" Draft of an OOPS-inspired search that incrementally expands the search space
and the allocated time (to a population of search processes). """
def __init__(self, initSearchProcess, maxPhases = 10, searchSteps = 50, desiredFitness = None):
self.maxPhases = maxPhases
self.searchSteps = searchSteps
self.desiredFitness = desiredFitness
self.processes = [initSearchProcess]
self.phase = 0
def optimize(self, **args):
while self.phase <= self.maxPhases and not self.problemSolved():
self._onePhase(**args)
# increase the number of processes
for p in self.processes[:]:
self.processes.append(p.newSimilarInstance())
self.increaseSearchSpace()
self.phase += 1
# return best evolvable
best = -1e100
for p in self.processes:
if p.bestFitness > best:
best = p.bestFitness
res = p.evolvable
return res
def _onePhase(self, verbose = True, **args):
if verbose:
print 'Phase', self.phase
for p in self.processes:
p.search(self.searchSteps, **args)
if verbose:
print '', p.bestFitness, p.evolvable.weightLengths
def increaseSearchSpace(self):
for p in self.processes:
p.increaseMaxComplexity()
def problemSolved(self):
if self.desiredFitness != None:
for p in self.processes:
if p.bestFitness > self.desiredFitness:
return True
return False | bsd-3-clause |
leonhong/thrift | lib/py/src/protocol/TBinaryProtocol.py | 62 | 6475 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from TProtocol import *
from struct import pack, unpack
class TBinaryProtocol(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def __init__(self, trans, strictRead=False, strictWrite=True):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name)
self.writeI32(seqid)
else:
self.writeString(name)
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP);
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack("!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack("!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack("!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack("!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack("!d", dub)
self.trans.write(buff)
def writeString(self, str):
self.writeI32(len(str))
self.trans.write(str)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(TProtocolException.BAD_VERSION, 'Bad version in readMessageBegin: %d' % (sz))
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString()
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(TProtocolException.BAD_VERSION, 'No protocol version header')
name = self.trans.readAll(sz)
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
val, = unpack('!b', buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
val, = unpack('!h', buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
val, = unpack('!i', buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
val, = unpack('!q', buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def readString(self):
len = self.readI32()
str = self.trans.readAll(len)
return str
class TBinaryProtocolFactory:
def __init__(self, strictRead=False, strictWrite=True):
self.strictRead = strictRead
self.strictWrite = strictWrite
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite)
return prot
class TBinaryProtocolAccelerated(TBinaryProtocol):
"""C-Accelerated version of TBinaryProtocol.
This class does not override any of TBinaryProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastbinary module doesn't work for some
reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
NOTE: This code was contributed by an external developer.
The internal Thrift team has reviewed and tested it,
but we cannot guarantee that it is production-ready.
Please feel free to report bugs and/or success stories
to the public mailing list.
"""
pass
class TBinaryProtocolAcceleratedFactory:
def getProtocol(self, trans):
return TBinaryProtocolAccelerated(trans)
| apache-2.0 |
arunhotra/tensorflow | tensorflow/python/summary/impl/directory_watcher_test.py | 5 | 3056 | """Tests for directory_watcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.summary.impl import directory_watcher
class _ByteLoader(object):
"""A loader that loads individual bytes from a file."""
def __init__(self, path):
self._f = open(path)
def Load(self):
while True:
byte = self._f.read(1)
if byte:
yield byte
else:
return
class DirectoryWatcherTest(test_util.TensorFlowTestCase):
def setUp(self):
# Put everything in a directory so it's easier to delete.
self._directory = os.path.join(self.get_temp_dir(), 'monitor_dir')
os.mkdir(self._directory)
self._watcher = directory_watcher.DirectoryWatcher(
self._directory, _ByteLoader)
def tearDown(self):
shutil.rmtree(self._directory)
def _WriteToFile(self, filename, data):
path = os.path.join(self._directory, filename)
with open(path, 'a') as f:
f.write(data)
def assertWatcherYields(self, values):
self.assertEqual(list(self._watcher.Load()), values)
def testRaisesWithBadArguments(self):
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher(None, lambda x: [])
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher('asdf', None)
def testEmptyDirectory(self):
self.assertWatcherYields([])
def testSingleWrite(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
def testMultipleWrites(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
self._WriteToFile('a', 'xyz')
self.assertWatcherYields(['x', 'y', 'z'])
def testMultipleLoads(self):
self._WriteToFile('a', 'a')
self._watcher.Load()
self._watcher.Load()
self.assertWatcherYields(['a'])
def testMultipleFilesAtOnce(self):
self._WriteToFile('b', 'b')
self._WriteToFile('a', 'a')
self.assertWatcherYields(['a', 'b'])
def testFinishesLoadingFileWhenSwitchingToNewFile(self):
self._WriteToFile('a', 'a')
# Empty the iterator.
self.assertEquals(['a'], list(self._watcher.Load()))
self._WriteToFile('a', 'b')
self._WriteToFile('b', 'c')
# The watcher should finish its current file before starting a new one.
self.assertWatcherYields(['b', 'c'])
def testIntermediateEmptyFiles(self):
self._WriteToFile('a', 'a')
self._WriteToFile('b', '')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
def testFileFilter(self):
self._watcher = directory_watcher.DirectoryWatcher(
self._directory, _ByteLoader,
path_filter=lambda path: 'do_not_watch_me' not in path)
self._WriteToFile('a', 'a')
self._WriteToFile('do_not_watch_me', 'b')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
larsmans/numpy | numpy/matlib.py | 68 | 9569 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# need * as we're copying the numpy namespace
from numpy import *
__version__ = np.__version__
__all__ = np.__all__[:] # copy numpy namespace
__all__ += ['rand', 'randn', 'repmat']
def empty(shape, dtype=None, order='C'):
"""
Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major) or
Fortran (column-major) order in memory.
See Also
--------
empty_like, zeros
Notes
-----
`empty`, unlike `zeros`, does not set the matrix values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307],
[ 7.39337286e-309, 3.22135945e-309]]) #random
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0],
[ 6586976, 22740995]]) #random
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[ 1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[ 0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n,dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1]+n*[0], dtype=dtype)
b = empty((n, n), dtype=dtype)
b.flat = a
return b
def eye(n,M=None, k=0, dtype=float):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
return asmatrix(np.eye(n, M, k, dtype))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.rand
Examples
--------
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[ 0.68340382, 0.67926887, 0.83271405],
[ 0.00793551, 0.20468222, 0.95253525]]) #random
>>> np.matlib.rand((2, 3))
matrix([[ 0.84682055, 0.73626594, 0.11308016],
[ 0.85429008, 0.3294825 , 0.89139555]]) #random
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[ 0.46898646, 0.15163588, 0.95188261],
[ 0.59208621, 0.09561818, 0.00583606]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, random.randn
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
``sigma * np.matlib.randn(...) + mu``
Examples
--------
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-0.09542833]]) #random
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.16198284, 0.0194571 , 0.18312985],
[-0.7509172 , 1.61055 , 0.45298599]]) #random
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922],
[ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1, 1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
return c.reshape(rows, cols)
| bsd-3-clause |
shanemcd/ansible | lib/ansible/modules/cloud/amazon/elb_target_group_facts.py | 8 | 9841 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elb_target_group_facts
short_description: Gather facts about ELB target groups in AWS
description:
- Gather facts about ELB target groups in AWS
version_added: "2.4"
author: Rob White (@wimnat)
options:
load_balancer_arn:
description:
- The Amazon Resource Name (ARN) of the load balancer.
required: false
target_group_arns:
description:
- The Amazon Resource Names (ARN) of the target groups.
required: false
names:
description:
- The names of the target groups.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all target groups
- elb_target_group_facts:
# Gather facts about the target group attached to a particular ELB
- elb_target_group_facts:
load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
# Gather facts about a target groups named 'tg1' and 'tg2'
- elb_target_group_facts:
names:
- tg1
- tg2
'''
RETURN = '''
target_groups:
description: a list of target groups
returned: always
type: complex
contains:
deregistration_delay_timeout_seconds:
description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
returned: always
type: int
sample: 300
health_check_interval_seconds:
description: The approximate amount of time, in seconds, between health checks of an individual target.
returned: always
type: int
sample: 30
health_check_path:
description: The destination for the health check request.
returned: always
type: string
sample: /index.html
health_check_port:
description: The port to use to connect with the target.
returned: always
type: string
sample: traffic-port
health_check_protocol:
description: The protocol to use to connect with the target.
returned: always
type: string
sample: HTTP
health_check_timeout_seconds:
description: The amount of time, in seconds, during which no response means a failed health check.
returned: always
type: int
sample: 5
healthy_threshold_count:
description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
returned: always
type: int
sample: 5
load_balancer_arns:
description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
returned: always
type: list
sample: []
matcher:
description: The HTTP codes to use when checking for a successful response from a target.
returned: always
type: dict
sample: {
"http_code": "200"
}
port:
description: The port on which the targets are listening.
returned: always
type: int
sample: 80
protocol:
description: The protocol to use for routing traffic to the targets.
returned: always
type: string
sample: HTTP
stickiness_enabled:
description: Indicates whether sticky sessions are enabled.
returned: always
type: bool
sample: true
stickiness_lb_cookie_duration_seconds:
description: Indicates whether sticky sessions are enabled.
returned: always
type: int
sample: 86400
stickiness_type:
description: The type of sticky sessions.
returned: always
type: string
sample: lb_cookie
tags:
description: The tags attached to the target group.
returned: always
type: dict
sample: "{
'Tag': 'Example'
}"
target_group_arn:
description: The Amazon Resource Name (ARN) of the target group.
returned: always
type: string
sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211"
target_group_name:
description: The name of the target group.
returned: always
type: string
sample: mytargetgroup
unhealthy_threshold_count:
description: The number of consecutive health check failures required before considering the target unhealthy.
returned: always
type: int
sample: 2
vpc_id:
description: The ID of the VPC for the targets.
returned: always
type: string
sample: vpc-0123456
'''
import traceback
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def get_target_group_attributes(connection, module, target_group_arn):
try:
target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Replace '.' with '_' in attribute key names to make it more Ansibley
for k, v in target_group_attributes.items():
target_group_attributes[k.replace('.', '_')] = v
del target_group_attributes[k]
return target_group_attributes
def get_target_group_tags(connection, module, target_group_arn):
try:
return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def list_target_groups(connection, module):
load_balancer_arn = module.params.get("load_balancer_arn")
target_group_arns = module.params.get("target_group_arns")
names = module.params.get("names")
try:
target_group_paginator = connection.get_paginator('describe_target_groups')
if not load_balancer_arn and not target_group_arns and not names:
target_groups = target_group_paginator.paginate().build_full_result()
if load_balancer_arn:
target_groups = target_group_paginator.paginate(LoadBalancerArn=load_balancer_arn).build_full_result()
if target_group_arns:
target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result()
if names:
target_groups = target_group_paginator.paginate(Names=names).build_full_result()
except ClientError as e:
if e.response['Error']['Code'] == 'TargetGroupNotFound':
module.exit_json(target_groups=[])
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except NoCredentialsError as e:
module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
# Get the attributes and tags for each target group
for target_group in target_groups['TargetGroups']:
target_group.update(get_target_group_attributes(connection, module, target_group['TargetGroupArn']))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']]
# Get tags for each target group
for snaked_target_group in snaked_target_groups:
snaked_target_group['tags'] = get_target_group_tags(connection, module, snaked_target_group['target_group_arn'])
module.exit_json(target_groups=snaked_target_groups)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
load_balancer_arn=dict(type='str'),
target_group_arns=dict(type='list'),
names=dict(type='list')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=['load_balancer_arn', 'target_group_arns', 'names'],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_target_groups(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
marteinn/wagtail-alt-generator | wagtailaltgenerator/tests/demosite/settings.py | 1 | 2042 | #!/usr/bin/env python
import os
DEBUG = False
TIME_ZONE = "Europe/Stockholm"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3"}}
SECRET_KEY = "not needed"
USE_TZ = True
LANGUAGE_CODE = "en"
INSTALLED_APPS = [
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.sites",
"django.contrib.admin",
"django.contrib.messages",
"wagtail.core",
"wagtail.sites",
"wagtail.users",
"wagtail.images",
"wagtail.documents",
"taggit",
"wagtailaltgenerator",
"wagtailaltgenerator.tests.demopages",
"wagtailaltgenerator.tests.demosite",
]
ROOT_URLCONF = "wagtailaltgenerator.tests.demosite.urls"
MIDDLEWARE = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"wagtail.core.middleware.SiteMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
)
ALT_GENERATOR_MIN_CONFIDENCE = 0
COMPUTER_VISION_API_KEY = getattr(os.environ, "COMPUTER_VISION_API_KEY", None)
COMPUTER_VISION_REGION = "canada"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
}
}
]
| mit |
gkarlin/django-jenkins | build/Django/django/middleware/gzip.py | 225 | 2140 | import re
from django.utils.text import compress_sequence, compress_string
from django.utils.cache import patch_vary_headers
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if not response.streaming and len(response.content) < 200:
return response
patch_vary_headers(response, ('Accept-Encoding',))
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
# MSIE have issues with gzipped response of various content types.
if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
ctype = response.get('Content-Type', '').lower()
if not ctype.startswith("text/") or "javascript" in ctype:
return response
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
if response.streaming:
# Delete the `Content-Length` header for streaming content, because
# we won't know the compressed size until we stream it.
response.streaming_content = compress_sequence(response.streaming_content)
del response['Content-Length']
else:
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(response.content)
if len(compressed_content) >= len(response.content):
return response
response.content = compressed_content
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
response['Content-Encoding'] = 'gzip'
return response
| lgpl-3.0 |
txemi/ansible | lib/ansible/modules/cloud/ovirt/ovirt_networks.py | 25 | 9576 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_networks
short_description: Module to manage logical networks in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage logical networks in oVirt"
options:
name:
description:
- "Name of the the network to manage."
required: true
state:
description:
- "Should the network be present or absent"
choices: ['present', 'absent']
default: present
data_center:
description:
- "Datacenter name where network reside."
description:
description:
- "Description of the network."
comment:
description:
- "Comment of the network."
vlan_tag:
description:
- "Specify VLAN tag."
vm_network:
description:
- "If I(True) network will be marked as network for VM."
- "VM network carries traffic relevant to the virtual machine."
mtu:
description:
- "Maximum transmission unit (MTU) of the network."
clusters:
description:
- "List of dictionaries describing how the network is managed in specific cluster."
- "C(name) - Cluster name."
- "C(assigned) - I(true) if the network should be assigned to cluster. Default is I(true)."
- "C(required) - I(true) if the network must remain operational for all hosts associated with this network."
- "C(display) - I(true) if the network should marked as display network."
- "C(migration) - I(true) if the network should marked as migration network."
- "C(gluster) - I(true) if the network should marked as gluster network."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create network
- ovirt_networks:
data_center: mydatacenter
name: mynetwork
vlan_tag: 1
vm_network: true
# Remove network
- ovirt_networks:
state: absent
name: mynetwork
'''
RETURN = '''
id:
description: "ID of the managed network"
returned: "On success if network is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
network:
description: "Dictionary of all the network attributes. Network attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/network."
returned: "On success if network is found."
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class NetworksModule(BaseModule):
def build_entity(self):
return otypes.Network(
name=self._module.params['name'],
comment=self._module.params['comment'],
description=self._module.params['description'],
data_center=otypes.DataCenter(
name=self._module.params['data_center'],
) if self._module.params['data_center'] else None,
vlan=otypes.Vlan(
self._module.params['vlan_tag'],
) if self._module.params['vlan_tag'] else None,
usages=[
otypes.NetworkUsage.VM if self._module.params['vm_network'] else None
] if self._module.params['vm_network'] is not None else None,
mtu=self._module.params['mtu'],
)
def update_check(self, entity):
return (
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('vlan_tag'), getattr(entity.vlan, 'id', None)) and
equal(self._module.params.get('vm_network'), True if entity.usages else False) and
equal(self._module.params.get('mtu'), entity.mtu)
)
class ClusterNetworksModule(BaseModule):
def __init__(self, network_id, cluster_network, *args, **kwargs):
super(ClusterNetworksModule, self).__init__(*args, **kwargs)
self._network_id = network_id
self._cluster_network = cluster_network
def build_entity(self):
return otypes.Network(
id=self._network_id,
name=self._module.params['name'],
required=self._cluster_network.get('required'),
display=self._cluster_network.get('display'),
usages=[
otypes.NetworkUsage(usage)
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
] if (
self._cluster_network.get('display') is not None or
self._cluster_network.get('gluster') is not None or
self._cluster_network.get('migration') is not None
) else None,
)
def update_check(self, entity):
return (
equal(self._cluster_network.get('required'), entity.required) and
equal(self._cluster_network.get('display'), entity.display) and
equal(
sorted([
usage
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
]),
sorted([
str(usage)
for usage in getattr(entity, 'usages', [])
# VM + MANAGEMENT is part of root network
if usage != otypes.NetworkUsage.VM and usage != otypes.NetworkUsage.MANAGEMENT
]),
)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
data_center=dict(default=None, required=True),
name=dict(default=None, required=True),
description=dict(default=None),
comment=dict(default=None),
vlan_tag=dict(default=None, type='int'),
vm_network=dict(default=None, type='bool'),
mtu=dict(default=None, type='int'),
clusters=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
networks_service = connection.system_service().networks_service()
networks_module = NetworksModule(
connection=connection,
module=module,
service=networks_service,
)
state = module.params['state']
network = networks_module.search_entity(
search_params={
'name': module.params['name'],
'datacenter': module.params['data_center'],
},
)
if state == 'present':
ret = networks_module.create(entity=network)
# Update clusters networks:
if module.params.get('clusters') is not None:
for param_cluster in module.params.get('clusters'):
cluster = search_by_name(clusters_service, param_cluster.get('name'))
if cluster is None:
raise Exception("Cluster '%s' was not found." % param_cluster.get('name'))
cluster_networks_service = clusters_service.service(cluster.id).networks_service()
cluster_networks_module = ClusterNetworksModule(
network_id=ret['id'],
cluster_network=param_cluster,
connection=connection,
module=module,
service=cluster_networks_service,
)
if param_cluster.get('assigned', True):
ret = cluster_networks_module.create()
else:
ret = cluster_networks_module.remove()
elif state == 'absent':
ret = networks_module.remove(entity=network)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
jparicka/twitter-tools | profiles/models.py | 1 | 1328 | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
class Profile(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length=100, blank=True, verbose_name="name", db_index=True)
headline = models.CharField(max_length=512, blank=True, verbose_name="name", db_index=True)
secret = models.CharField(max_length=100, blank=True, verbose_name="secret_key", db_index=True)
country = models.CharField(max_length=10, blank=True, verbose_name="country", db_index=True)
language = models.CharField(max_length=10, blank=True, verbose_name="language", db_index=True)
mobile = models.CharField(max_length=20, blank=True, verbose_name="mobile_number_1")
picture = models.URLField(blank=True, verbose_name="picture")
oauth_token = models.CharField(max_length=200, blank=True)
oauth_secret = models.CharField(max_length=200, blank=True)
street_address_1 = models.CharField(max_length=100, blank=True, verbose_name="street_address_1")
street_address_2 = models.CharField(max_length=100, blank=True, verbose_name="street_address_2")
street_address_3 = models.CharField(max_length=100, blank=True, verbose_name="street_address_3")
initial_assessment = models.BooleanField(default=False)
| mit |
ankurankan/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
giggsey/SickRage | lib/hachoir_core/field/basic_field_set.py | 74 | 4776 | from hachoir_core.field import Field, FieldError
from hachoir_core.stream import InputStream
from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN, MIDDLE_ENDIAN
from hachoir_core.event_handler import EventHandler
class ParserError(FieldError):
"""
Error raised by a field set.
@see: L{FieldError}
"""
pass
class MatchError(FieldError):
"""
Error raised by a field set when the stream content doesn't
match to file format.
@see: L{FieldError}
"""
pass
class BasicFieldSet(Field):
_event_handler = None
is_field_set = True
endian = None
def __init__(self, parent, name, stream, description, size):
# Sanity checks (preconditions)
assert not parent or issubclass(parent.__class__, BasicFieldSet)
assert issubclass(stream.__class__, InputStream)
# Set field set size
if size is None and self.static_size:
assert isinstance(self.static_size, (int, long))
size = self.static_size
# Set Field attributes
self._parent = parent
self._name = name
self._size = size
self._description = description
self.stream = stream
self._field_array_count = {}
# Set endian
if not self.endian:
assert parent and parent.endian
self.endian = parent.endian
if parent:
# This field set is one of the root leafs
self._address = parent.nextFieldAddress()
self.root = parent.root
assert id(self.stream) == id(parent.stream)
else:
# This field set is the root
self._address = 0
self.root = self
self._global_event_handler = None
# Sanity checks (post-conditions)
assert self.endian in (BIG_ENDIAN, LITTLE_ENDIAN, MIDDLE_ENDIAN)
if (self._size is not None) and (self._size <= 0):
raise ParserError("Invalid parser '%s' size: %s" % (self.path, self._size))
def reset(self):
self._field_array_count = {}
def createValue(self):
return None
def connectEvent(self, event_name, handler, local=True):
assert event_name in (
# Callback prototype: def f(field)
# Called when new value is already set
"field-value-changed",
# Callback prototype: def f(field)
# Called when field size is already set
"field-resized",
# A new field has been inserted in the field set
# Callback prototype: def f(index, new_field)
"field-inserted",
# Callback prototype: def f(old_field, new_field)
# Called when new field is already in field set
"field-replaced",
# Callback prototype: def f(field, new_value)
# Called to ask to set new value
"set-field-value"
), "Event name %r is invalid" % event_name
if local:
if self._event_handler is None:
self._event_handler = EventHandler()
self._event_handler.connect(event_name, handler)
else:
if self.root._global_event_handler is None:
self.root._global_event_handler = EventHandler()
self.root._global_event_handler.connect(event_name, handler)
def raiseEvent(self, event_name, *args):
# Transfer event to local listeners
if self._event_handler is not None:
self._event_handler.raiseEvent(event_name, *args)
# Transfer event to global listeners
if self.root._global_event_handler is not None:
self.root._global_event_handler.raiseEvent(event_name, *args)
def setUniqueFieldName(self, field):
key = field._name[:-2]
try:
self._field_array_count[key] += 1
except KeyError:
self._field_array_count[key] = 0
field._name = key + "[%u]" % self._field_array_count[key]
def readFirstFields(self, number):
"""
Read first number fields if they are not read yet.
Returns number of new added fields.
"""
number = number - self.current_length
if 0 < number:
return self.readMoreFields(number)
else:
return 0
def createFields(self):
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
def getField(self, key, const=True):
raise NotImplementedError()
def nextFieldAddress(self):
raise NotImplementedError()
def getFieldIndex(self, field):
raise NotImplementedError()
def readMoreFields(self, number):
raise NotImplementedError()
| gpl-3.0 |
pradhanta/drunken-robot | tools/profilegen/sforce/tooling.py | 4 | 1605 | from base import SforceBaseClient
import re
class SforceToolingClient(SforceBaseClient):
def __init__(self, wsdl, *args, **kwargs):
super(SforceToolingClient, self).__init__(wsdl, *args, **kwargs)
header = self.generateHeader('SessionHeader')
header.sessionId = kwargs['sid']
self.setSessionHeader(header)
msurl = kwargs['metadata_server_url']
msurl = re.sub('/m/', '/T/', msurl)
self._setEndpoint(msurl)
self._setHeaders('')
def addOverlayAction(self, payload, **kwargs):
result = self._handleResultTyping(self._sforce.service.compileClasses(payload))
return result
def getOverlayActions(self, class_or_trigger_id):
query_string = "Select Id, Line, Iteration, ExpirationDate, IsDumpingHeap from ApexExecutionOverlayAction Where ExecutableEntityId = '{0}'".format(class_or_trigger_id)
result = self._handleResultTyping(self._sforce.service.query(query_string))
return result
def createOverlayAction(self, payload):
#print self._sforce
# result = self._sforce.service.create(payload)
# return result
payload = {'key1': 'value1', 'key2': 'value2'}
r = requests.post("http://na1.salesforce.com/services/data/v26.0/tooling/sobjects/TraceFlag/.", data=payload)
def describe(self):
result = None
try:
result = self._handleResultTyping(self.describeGlobal())
print 'result is: ', result
except Exception, e:
print e.message
print self.getLastRequest()
return result | apache-2.0 |
rcatwood/Savu | savu/data/data_structures/data_type.py | 1 | 4089 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: data_type
:platform: Unix
:synopsis: A module containing classes for different input data types other
than hdf5.
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
import os
import numpy as np
import fabio
class DataTypes(object):
def __getitem__(self, index):
""" Override __getitem__ and map to the relevant files """
raise NotImplementedError("__getitem__ must be implemented.")
def get_shape(self):
""" Get full stiched shape of a stack of files"""
raise NotImplementedError("get_shape must be implemented.")
class FabIO(DataTypes):
""" This class loads any of the FabIO python module supported image
formats. """
def __init__(self, folder, Data, dim, shape=None):
self._data_obj = Data
self.nFrames = None
self.start_file = fabio.open(self.__get_file_name(folder))
self.frame_dim = dim
self.image_shape = (self.start_file.dim2, self.start_file.dim1)
if shape is None:
self.shape = (self.nFrames,)
else:
self.shape = shape
def __getitem__(self, index):
size = [len(np.arange(i.start, i.stop, i.step)) for i in index]
data = np.empty(size)
tiffidx = [i for i in range(len(index)) if i not in self.frame_dim]
# print "original = ", index
index, frameidx = self.__get_indices(index, size)
for i in range(len(frameidx)):
# print "amended = ", index[i]
# print frameidx[i], [index[i][n] for n in tiffidx]
data[index[i]] = \
self.start_file.getframe(self.start_no + frameidx[i])\
.data[[index[i][n] for n in tiffidx]]
return data
def __get_file_name(self, folder):
import re
files = os.listdir(folder)
self.nFrames = len(files)
fname = sorted(files)[0]
self.start_no = [int(s) for s in re.findall(r'\d+', fname)][-1]
print "start number is", self.start_no
return folder + "/" + fname
def get_shape(self):
return self.shape + self.image_shape
def __get_idx(self, dim, sl, shape):
c = int(np.prod(shape[0:dim]))
r = int(np.prod(shape[dim+1:]))
values = np.arange(sl.start, sl.stop, sl.step)
return np.ravel(np.kron(values, np.ones((r, c))))
def __get_indices(self, index, size):
""" Get the indices for the new data array and the file numbers. """
sub_idx = np.array(index)[np.array(self.frame_dim)]
sub_size = [size[i] for i in self.frame_dim]
idx_list = []
for dim in range(len(sub_idx)):
idx = self.__get_idx(dim, sub_idx[dim], sub_size)
idx_list.append(idx.astype(int))
lshape = idx_list[0].shape[0]
index = np.tile(index, (lshape, 1))
frameidx = np.zeros(lshape)
for dim in range(len(sub_idx)):
start = index[0][self.frame_dim[dim]].start
index[:, self.frame_dim[dim]] = \
[slice(i-start, i-start+1, 1) for i in idx_list[dim]]
frameidx[:] += idx_list[dim]*np.prod(self.shape[dim+1:])
return index.tolist(), frameidx.astype(int)
class Map_3d_to_4d_h5(DataTypes):
""" This class converts a 3D dataset to a 4D dataset. """
def __init__(self, backing_file, shape):
self.shape = shape
def __getitem__(self, index):
print index
def get_shape(self):
return self.shape
| gpl-3.0 |
pablolizardo/dotfiles | inkscape/symbols/generate.py | 1 | 1934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
inkscapeSymbolGenerator: A inkscape symbol library generator
Copyright (C) 2015 Xavi Julián Olmos
See the file LICENSE for copying permission.
"""
import sys, os
import logging
from optparse import OptionParser
####Objetivo
#If select all merge all files in a single SVG and then
#If select file, clean it with SVGO
#Remove styles then
#Save
####Deberes
# Compactar el formato del script los ifs ver si existen alternativas
# Utilizar paths de python - construccion de paths pythonica.
# Utilizar el nombre del archivo.
# Buscar regex para eliminar etiquetas en cleanSVGStyles()
# Añadir contenido a un fichero.
# Migrar de OptionParser to https://docs.python.org/3/library/optparse.html
def cleanSVGStyles(file):
print('Cleaning SVG....')
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('--folder', help='Folder to generate icons from', dest='folder')
optp.add_option('--file', help='File to generate icon from', dest='file')
optp.add_option('-o', '--output', help='Output file', dest='output')
opts, args = optp.parse_args()
if opts.folder is None:
if opts.file is None:
optp.error('At list one value for file or folder is needed')
else:
if opts.output is None:
os.system('svgo ' + opts.file)
else:
os.system('svgo ' + opts.file + ' -o ' + opts.output)
else:
if opts.file is None:
if opts.output is None:
os.system('svgo ' + opts.folder)
else:
os.system('cat ' + opts.folder + '/*.svg > ' + opts.output)
os.system('svgo ' + opts.output + ' -o ' + opts.output)
cleanSVGStyles(opts.output)
else:
optp.error('File and folder cannot exist')
| gpl-2.0 |
andrewcbennett/iris | lib/iris/tests/unit/fileformats/pp/test_load.py | 5 | 1935 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.pp.load` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import iris.fileformats.pp as pp
from iris.tests import mock
class Test_load(tests.IrisTest):
def test_call_structure(self):
# Check that the load function calls the two necessary utility
# functions.
extract_result = mock.Mock()
interpret_patch = mock.patch('iris.fileformats.pp._interpret_fields',
autospec=True, return_value=iter([]))
field_gen_patch = mock.patch('iris.fileformats.pp._field_gen',
autospec=True,
return_value=extract_result)
with interpret_patch as interpret, field_gen_patch as field_gen:
pp.load('mock', read_data=True)
interpret.assert_called_once_with(extract_result)
field_gen.assert_called_once_with('mock', read_data_bytes=True)
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
willemneal/Docky | lib/werkzeug/http.py | 317 | 33404 | # -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import time, gmtime
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
try:
from urllib2 import parse_http_list as _parse_list_header
except ImportError: # pragma: no cover
from urllib.request import parse_http_list as _parse_list_header
from datetime import datetime, timedelta
from hashlib import md5
import base64
from werkzeug._internal import _cookie_quote, _make_cookie_domain, \
_cookie_parse_impl
from werkzeug._compat import to_unicode, iteritems, text_type, \
string_types, try_coerce_native, to_bytes, PY2, \
integer_types
# incorrect
_cookie_charset = 'latin1'
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*(%s|[^\s;=]+)\s*(?:=\s*(%s|[^;]+))?\s*' %
(_quoted_string_re, _quoted_string_re))
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_hop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailer', 'transfer-encoding',
'upgrade'
])
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required', # see RFC 6585
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones
"""
if isinstance(data, bytes):
return data
return data.encode('latin1') #XXX: utf8 fallback?
def bytes_to_wsgi(data):
assert isinstance(data, bytes), 'data must be bytes'
if isinstance(data, str):
return data
else:
return data.decode('latin1')
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` arugment):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
#XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b'basic':
try:
username, password = base64.b64decode(auth_info).split(b':', 1)
except Exception as e:
return
return Authorization('basic', {'username': bytes_to_wsgi(username),
'password': bytes_to_wsgi(password)})
elif auth_type == b'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'response':
if not key in auth_map:
return
if 'qop' in auth_map:
if not auth_map.get('nc') or not auth_map.get('cnonce'):
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or '=' not in value:
return None
ranges = []
last_end = 0
units, rng = value.split('=', 1)
units = units.strip().lower()
for item in rng.split(','):
item = item.strip()
if '-' not in item:
return None
if item.startswith('-'):
if last_end < 0:
return None
begin = int(item)
end = None
last_end = -1
elif '-' in item:
begin, end = item.split('-', 1)
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or '').strip().split(None, 1)
except ValueError:
return None
if '/' not in rangedef:
return None
rng, length = rangedef.split('/', 1)
if length == '*':
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == '*':
return ContentRange(units, None, None, length, on_update=on_update)
elif '-' not in rng:
return None
start, stop = rng.split('-', 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('w/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag[:2] in ('w/', 'W/'):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, '-')
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, ' ')
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, string_types):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
unmodified = if_none_match.contains_raw(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _hop_by_hop_headers
def parse_cookie(header, charset='utf-8', errors='replace', cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
elif header is None:
header = ''
# If the value is an unicode string it's mangled through latin1. This
# is done because on PEP 3333 on Python 3 all headers are assumed latin1
# which however is incorrect for cookies, which are sent in page encoding.
# As a result we
if isinstance(header, text_type):
header = header.encode('latin1', 'replace')
if cls is None:
cls = TypeConversionDict
def _parse_pairs():
for key, val in _cookie_parse_impl(header):
key = to_unicode(key, charset, errors, allow_none_charset=True)
val = to_unicode(val, charset, errors, allow_none_charset=True)
yield try_coerce_native(key), val
return cls(_parse_pairs())
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False,
charset='utf-8', sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
buf = [key + b'=' + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in ((b'Domain', domain, True),
(b'Expires', expires, False,),
(b'Max-Age', max_age, False),
(b'Secure', secure, None),
(b'HttpOnly', httponly, None),
(b'Path', path, False)):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b'=' + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b'; '.join(buf)
if not PY2:
rv = rv.decode('latin1')
return rv
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependency fun
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
RequestCacheControl
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import MIMEAccept, CharsetAccept, \
LanguageAccept, Headers
from werkzeug.urls import iri_to_uri
| mit |
fxfitz/ansible | test/units/modules/network/dellos10/dellos10_module.py | 60 | 2530 | # (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class TestDellos10Module(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['updates']), result['updates'])
else:
self.assertEqual(commands, result['updates'], result['updates'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
foodszhang/kbengine | kbe/src/lib/python/Lib/test/test_textwrap.py | 80 | 36255 | #
# Test suite for the textwrap module.
#
# Original tests written by Greg Ward <gward@python.net>.
# Converted to PyUnit by Peter Hansen <peter@engcorp.com>.
# Currently maintained by Greg Ward.
#
# $Id$
#
import unittest
from textwrap import TextWrapper, wrap, fill, dedent, indent, shorten
class BaseTestCase(unittest.TestCase):
'''Parent class with utility methods for textwrap tests.'''
def show(self, textin):
if isinstance(textin, list):
result = []
for i in range(len(textin)):
result.append(" %d: %r" % (i, textin[i]))
result = "\n".join(result) if result else " no lines"
elif isinstance(textin, str):
result = " %s\n" % repr(textin)
return result
def check(self, result, expect):
self.assertEqual(result, expect,
'expected:\n%s\nbut got:\n%s' % (
self.show(expect), self.show(result)))
def check_wrap(self, text, width, expect, **kwargs):
result = wrap(text, width, **kwargs)
self.check(result, expect)
def check_split(self, text, expect):
result = self.wrapper._split(text)
self.assertEqual(result, expect,
"\nexpected %r\n"
"but got %r" % (expect, result))
class WrapTestCase(BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper(width=45)
def test_simple(self):
# Simple case: just words, spaces, and a bit of punctuation
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_wrap(text, 12,
["Hello there,",
"how are you",
"this fine",
"day? I'm",
"glad to hear",
"it!"])
self.check_wrap(text, 42,
["Hello there, how are you this fine day?",
"I'm glad to hear it!"])
self.check_wrap(text, 80, [text])
def test_empty_string(self):
# Check that wrapping the empty string returns an empty list.
self.check_wrap("", 6, [])
self.check_wrap("", 6, [], drop_whitespace=False)
def test_empty_string_with_initial_indent(self):
# Check that the empty string is not indented.
self.check_wrap("", 6, [], initial_indent="++")
self.check_wrap("", 6, [], initial_indent="++", drop_whitespace=False)
def test_whitespace(self):
# Whitespace munging and end-of-sentence detection
text = """\
This is a paragraph that already has
line breaks. But some of its lines are much longer than the others,
so it needs to be wrapped.
Some lines are \ttabbed too.
What a mess!
"""
expect = ["This is a paragraph that already has line",
"breaks. But some of its lines are much",
"longer than the others, so it needs to be",
"wrapped. Some lines are tabbed too. What a",
"mess!"]
wrapper = TextWrapper(45, fix_sentence_endings=True)
result = wrapper.wrap(text)
self.check(result, expect)
result = wrapper.fill(text)
self.check(result, '\n'.join(expect))
text = "\tTest\tdefault\t\ttabsize."
expect = [" Test default tabsize."]
self.check_wrap(text, 80, expect)
text = "\tTest\tcustom\t\ttabsize."
expect = [" Test custom tabsize."]
self.check_wrap(text, 80, expect, tabsize=4)
def test_fix_sentence_endings(self):
wrapper = TextWrapper(60, fix_sentence_endings=True)
# SF #847346: ensure that fix_sentence_endings=True does the
# right thing even on input short enough that it doesn't need to
# be wrapped.
text = "A short line. Note the single space."
expect = ["A short line. Note the single space."]
self.check(wrapper.wrap(text), expect)
# Test some of the hairy end cases that _fix_sentence_endings()
# is supposed to handle (the easy stuff is tested in
# test_whitespace() above).
text = "Well, Doctor? What do you think?"
expect = ["Well, Doctor? What do you think?"]
self.check(wrapper.wrap(text), expect)
text = "Well, Doctor?\nWhat do you think?"
self.check(wrapper.wrap(text), expect)
text = 'I say, chaps! Anyone for "tennis?"\nHmmph!'
expect = ['I say, chaps! Anyone for "tennis?" Hmmph!']
self.check(wrapper.wrap(text), expect)
wrapper.width = 20
expect = ['I say, chaps!', 'Anyone for "tennis?"', 'Hmmph!']
self.check(wrapper.wrap(text), expect)
text = 'And she said, "Go to hell!"\nCan you believe that?'
expect = ['And she said, "Go to',
'hell!" Can you',
'believe that?']
self.check(wrapper.wrap(text), expect)
wrapper.width = 60
expect = ['And she said, "Go to hell!" Can you believe that?']
self.check(wrapper.wrap(text), expect)
text = 'File stdio.h is nice.'
expect = ['File stdio.h is nice.']
self.check(wrapper.wrap(text), expect)
def test_wrap_short(self):
# Wrapping to make short lines longer
text = "This is a\nshort paragraph."
self.check_wrap(text, 20, ["This is a short",
"paragraph."])
self.check_wrap(text, 40, ["This is a short paragraph."])
def test_wrap_short_1line(self):
# Test endcases
text = "This is a short line."
self.check_wrap(text, 30, ["This is a short line."])
self.check_wrap(text, 30, ["(1) This is a short line."],
initial_indent="(1) ")
def test_hyphenated(self):
# Test breaking hyphenated words
text = ("this-is-a-useful-feature-for-"
"reformatting-posts-from-tim-peters'ly")
self.check_wrap(text, 40,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 41,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 42,
["this-is-a-useful-feature-for-reformatting-",
"posts-from-tim-peters'ly"])
def test_hyphenated_numbers(self):
# Test that hyphenated numbers (eg. dates) are not broken like words.
text = ("Python 1.0.0 was released on 1994-01-26. Python 1.0.1 was\n"
"released on 1994-02-15.")
self.check_wrap(text, 30, ['Python 1.0.0 was released on',
'1994-01-26. Python 1.0.1 was',
'released on 1994-02-15.'])
self.check_wrap(text, 40, ['Python 1.0.0 was released on 1994-01-26.',
'Python 1.0.1 was released on 1994-02-15.'])
text = "I do all my shopping at 7-11."
self.check_wrap(text, 25, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 27, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 29, ["I do all my shopping at 7-11."])
def test_em_dash(self):
# Test text with em-dashes
text = "Em-dashes should be written -- thus."
self.check_wrap(text, 25,
["Em-dashes should be",
"written -- thus."])
# Probe the boundaries of the properly written em-dash,
# ie. " -- ".
self.check_wrap(text, 29,
["Em-dashes should be written",
"-- thus."])
expect = ["Em-dashes should be written --",
"thus."]
self.check_wrap(text, 30, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 36,
["Em-dashes should be written -- thus."])
# The improperly written em-dash is handled too, because
# it's adjacent to non-whitespace on both sides.
text = "You can also do--this or even---this."
expect = ["You can also do",
"--this or even",
"---this."]
self.check_wrap(text, 15, expect)
self.check_wrap(text, 16, expect)
expect = ["You can also do--",
"this or even---",
"this."]
self.check_wrap(text, 17, expect)
self.check_wrap(text, 19, expect)
expect = ["You can also do--this or even",
"---this."]
self.check_wrap(text, 29, expect)
self.check_wrap(text, 31, expect)
expect = ["You can also do--this or even---",
"this."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 35, expect)
# All of the above behaviour could be deduced by probing the
# _split() method.
text = "Here's an -- em-dash and--here's another---and another!"
expect = ["Here's", " ", "an", " ", "--", " ", "em-", "dash", " ",
"and", "--", "here's", " ", "another", "---",
"and", " ", "another!"]
self.check_split(text, expect)
text = "and then--bam!--he was gone"
expect = ["and", " ", "then", "--", "bam!", "--",
"he", " ", "was", " ", "gone"]
self.check_split(text, expect)
def test_unix_options (self):
# Test that Unix-style command-line options are wrapped correctly.
# Both Optik (OptionParser) and Docutils rely on this behaviour!
text = "You should use the -n option, or --dry-run in its long form."
self.check_wrap(text, 20,
["You should use the",
"-n option, or --dry-",
"run in its long",
"form."])
self.check_wrap(text, 21,
["You should use the -n",
"option, or --dry-run",
"in its long form."])
expect = ["You should use the -n option, or",
"--dry-run in its long form."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 34, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 38, expect)
expect = ["You should use the -n option, or --dry-",
"run in its long form."]
self.check_wrap(text, 39, expect)
self.check_wrap(text, 41, expect)
expect = ["You should use the -n option, or --dry-run",
"in its long form."]
self.check_wrap(text, 42, expect)
# Again, all of the above can be deduced from _split().
text = "the -n option, or --dry-run or --dryrun"
expect = ["the", " ", "-n", " ", "option,", " ", "or", " ",
"--dry-", "run", " ", "or", " ", "--dryrun"]
self.check_split(text, expect)
def test_funky_hyphens (self):
# Screwy edge cases cooked up by David Goodger. All reported
# in SF bug #596434.
self.check_split("what the--hey!", ["what", " ", "the", "--", "hey!"])
self.check_split("what the--", ["what", " ", "the--"])
self.check_split("what the--.", ["what", " ", "the--."])
self.check_split("--text--.", ["--text--."])
# When I first read bug #596434, this is what I thought David
# was talking about. I was wrong; these have always worked
# fine. The real problem is tested in test_funky_parens()
# below...
self.check_split("--option", ["--option"])
self.check_split("--option-opt", ["--option-", "opt"])
self.check_split("foo --option-opt bar",
["foo", " ", "--option-", "opt", " ", "bar"])
def test_punct_hyphens(self):
# Oh bother, SF #965425 found another problem with hyphens --
# hyphenated words in single quotes weren't handled correctly.
# In fact, the bug is that *any* punctuation around a hyphenated
# word was handled incorrectly, except for a leading "--", which
# was special-cased for Optik and Docutils. So test a variety
# of styles of punctuation around a hyphenated word.
# (Actually this is based on an Optik bug report, #813077).
self.check_split("the 'wibble-wobble' widget",
['the', ' ', "'wibble-", "wobble'", ' ', 'widget'])
self.check_split('the "wibble-wobble" widget',
['the', ' ', '"wibble-', 'wobble"', ' ', 'widget'])
self.check_split("the (wibble-wobble) widget",
['the', ' ', "(wibble-", "wobble)", ' ', 'widget'])
self.check_split("the ['wibble-wobble'] widget",
['the', ' ', "['wibble-", "wobble']", ' ', 'widget'])
def test_funky_parens (self):
# Second part of SF bug #596434: long option strings inside
# parentheses.
self.check_split("foo (--option) bar",
["foo", " ", "(--option)", " ", "bar"])
# Related stuff -- make sure parens work in simpler contexts.
self.check_split("foo (bar) baz",
["foo", " ", "(bar)", " ", "baz"])
self.check_split("blah (ding dong), wubba",
["blah", " ", "(ding", " ", "dong),",
" ", "wubba"])
def test_drop_whitespace_false(self):
# Check that drop_whitespace=False preserves whitespace.
# SF patch #1581073
text = " This is a sentence with much whitespace."
self.check_wrap(text, 10,
[" This is a", " ", "sentence ",
"with ", "much white", "space."],
drop_whitespace=False)
def test_drop_whitespace_false_whitespace_only(self):
# Check that drop_whitespace=False preserves a whitespace-only string.
self.check_wrap(" ", 6, [" "], drop_whitespace=False)
def test_drop_whitespace_false_whitespace_only_with_indent(self):
# Check that a whitespace-only string gets indented (when
# drop_whitespace is False).
self.check_wrap(" ", 6, [" "], drop_whitespace=False,
initial_indent=" ")
def test_drop_whitespace_whitespace_only(self):
# Check drop_whitespace on a whitespace-only string.
self.check_wrap(" ", 6, [])
def test_drop_whitespace_leading_whitespace(self):
# Check that drop_whitespace does not drop leading whitespace (if
# followed by non-whitespace).
# SF bug #622849 reported inconsistent handling of leading
# whitespace; let's test that a bit, shall we?
text = " This is a sentence with leading whitespace."
self.check_wrap(text, 50,
[" This is a sentence with leading whitespace."])
self.check_wrap(text, 30,
[" This is a sentence with", "leading whitespace."])
def test_drop_whitespace_whitespace_line(self):
# Check that drop_whitespace skips the whole line if a non-leading
# line consists only of whitespace.
text = "abcd efgh"
# Include the result for drop_whitespace=False for comparison.
self.check_wrap(text, 6, ["abcd", " ", "efgh"],
drop_whitespace=False)
self.check_wrap(text, 6, ["abcd", "efgh"])
def test_drop_whitespace_whitespace_only_with_indent(self):
# Check that initial_indent is not applied to a whitespace-only
# string. This checks a special case of the fact that dropping
# whitespace occurs before indenting.
self.check_wrap(" ", 6, [], initial_indent="++")
def test_drop_whitespace_whitespace_indent(self):
# Check that drop_whitespace does not drop whitespace indents.
# This checks a special case of the fact that dropping whitespace
# occurs before indenting.
self.check_wrap("abcd efgh", 6, [" abcd", " efgh"],
initial_indent=" ", subsequent_indent=" ")
def test_split(self):
# Ensure that the standard _split() method works as advertised
# in the comments
text = "Hello there -- you goof-ball, use the -b option!"
result = self.wrapper._split(text)
self.check(result,
["Hello", " ", "there", " ", "--", " ", "you", " ", "goof-",
"ball,", " ", "use", " ", "the", " ", "-b", " ", "option!"])
def test_break_on_hyphens(self):
# Ensure that the break_on_hyphens attributes work
text = "yaba daba-doo"
self.check_wrap(text, 10, ["yaba daba-", "doo"],
break_on_hyphens=True)
self.check_wrap(text, 10, ["yaba", "daba-doo"],
break_on_hyphens=False)
def test_bad_width(self):
# Ensure that width <= 0 is caught.
text = "Whatever, it doesn't matter."
self.assertRaises(ValueError, wrap, text, 0)
self.assertRaises(ValueError, wrap, text, -1)
def test_no_split_at_umlaut(self):
text = "Die Empf\xe4nger-Auswahl"
self.check_wrap(text, 13, ["Die", "Empf\xe4nger-", "Auswahl"])
def test_umlaut_followed_by_dash(self):
text = "aa \xe4\xe4-\xe4\xe4"
self.check_wrap(text, 7, ["aa \xe4\xe4-", "\xe4\xe4"])
class MaxLinesTestCase(BaseTestCase):
text = "Hello there, how are you this fine day? I'm glad to hear it!"
def test_simple(self):
self.check_wrap(self.text, 12,
["Hello [...]"],
max_lines=0)
self.check_wrap(self.text, 12,
["Hello [...]"],
max_lines=1)
self.check_wrap(self.text, 12,
["Hello there,",
"how [...]"],
max_lines=2)
self.check_wrap(self.text, 13,
["Hello there,",
"how are [...]"],
max_lines=2)
self.check_wrap(self.text, 80, [self.text], max_lines=1)
self.check_wrap(self.text, 12,
["Hello there,",
"how are you",
"this fine",
"day? I'm",
"glad to hear",
"it!"],
max_lines=6)
def test_spaces(self):
# strip spaces before placeholder
self.check_wrap(self.text, 12,
["Hello there,",
"how are you",
"this fine",
"day? [...]"],
max_lines=4)
# placeholder at the start of line
self.check_wrap(self.text, 6,
["Hello",
"[...]"],
max_lines=2)
# final spaces
self.check_wrap(self.text + ' ' * 10, 12,
["Hello there,",
"how are you",
"this fine",
"day? I'm",
"glad to hear",
"it!"],
max_lines=6)
def test_placeholder(self):
self.check_wrap(self.text, 12,
["Hello..."],
max_lines=1,
placeholder='...')
self.check_wrap(self.text, 12,
["Hello there,",
"how are..."],
max_lines=2,
placeholder='...')
# long placeholder and indentation
with self.assertRaises(ValueError):
wrap(self.text, 16, initial_indent=' ',
max_lines=1, placeholder=' [truncated]...')
with self.assertRaises(ValueError):
wrap(self.text, 16, subsequent_indent=' ',
max_lines=2, placeholder=' [truncated]...')
self.check_wrap(self.text, 16,
[" Hello there,",
" [truncated]..."],
max_lines=2,
initial_indent=' ',
subsequent_indent=' ',
placeholder=' [truncated]...')
self.check_wrap(self.text, 16,
[" [truncated]..."],
max_lines=1,
initial_indent=' ',
subsequent_indent=' ',
placeholder=' [truncated]...')
self.check_wrap(self.text, 80, [self.text], placeholder='.' * 1000)
class LongWordTestCase (BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper()
self.text = '''\
Did you say "supercalifragilisticexpialidocious?"
How *do* you spell that odd word, anyways?
'''
def test_break_long(self):
# Wrap text with long words and lots of punctuation
self.check_wrap(self.text, 30,
['Did you say "supercalifragilis',
'ticexpialidocious?" How *do*',
'you spell that odd word,',
'anyways?'])
self.check_wrap(self.text, 50,
['Did you say "supercalifragilisticexpialidocious?"',
'How *do* you spell that odd word, anyways?'])
# SF bug 797650. Prevent an infinite loop by making sure that at
# least one character gets split off on every pass.
self.check_wrap('-'*10+'hello', 10,
['----------',
' h',
' e',
' l',
' l',
' o'],
subsequent_indent = ' '*15)
# bug 1146. Prevent a long word to be wrongly wrapped when the
# preceding word is exactly one character shorter than the width
self.check_wrap(self.text, 12,
['Did you say ',
'"supercalifr',
'agilisticexp',
'ialidocious?',
'" How *do*',
'you spell',
'that odd',
'word,',
'anyways?'])
def test_nobreak_long(self):
# Test with break_long_words disabled
self.wrapper.break_long_words = 0
self.wrapper.width = 30
expect = ['Did you say',
'"supercalifragilisticexpialidocious?"',
'How *do* you spell that odd',
'word, anyways?'
]
result = self.wrapper.wrap(self.text)
self.check(result, expect)
# Same thing with kwargs passed to standalone wrap() function.
result = wrap(self.text, width=30, break_long_words=0)
self.check(result, expect)
def test_max_lines_long(self):
self.check_wrap(self.text, 12,
['Did you say ',
'"supercalifr',
'agilisticexp',
'[...]'],
max_lines=4)
class IndentTestCases(BaseTestCase):
# called before each test method
def setUp(self):
self.text = '''\
This paragraph will be filled, first without any indentation,
and then with some (including a hanging indent).'''
def test_fill(self):
# Test the fill() method
expect = '''\
This paragraph will be filled, first
without any indentation, and then with
some (including a hanging indent).'''
result = fill(self.text, 40)
self.check(result, expect)
def test_initial_indent(self):
# Test initial_indent parameter
expect = [" This paragraph will be filled,",
"first without any indentation, and then",
"with some (including a hanging indent)."]
result = wrap(self.text, 40, initial_indent=" ")
self.check(result, expect)
expect = "\n".join(expect)
result = fill(self.text, 40, initial_indent=" ")
self.check(result, expect)
def test_subsequent_indent(self):
# Test subsequent_indent parameter
expect = '''\
* This paragraph will be filled, first
without any indentation, and then
with some (including a hanging
indent).'''
result = fill(self.text, 40,
initial_indent=" * ", subsequent_indent=" ")
self.check(result, expect)
# Despite the similar names, DedentTestCase is *not* the inverse
# of IndentTestCase!
class DedentTestCase(unittest.TestCase):
def assertUnchanged(self, text):
"""assert that dedent() has no effect on 'text'"""
self.assertEqual(text, dedent(text))
def test_dedent_nomargin(self):
# No lines indented.
text = "Hello there.\nHow are you?\nOh good, I'm glad."
self.assertUnchanged(text)
# Similar, with a blank line.
text = "Hello there.\n\nBoo!"
self.assertUnchanged(text)
# Some lines indented, but overall margin is still zero.
text = "Hello there.\n This is indented."
self.assertUnchanged(text)
# Again, add a blank line.
text = "Hello there.\n\n Boo!\n"
self.assertUnchanged(text)
def test_dedent_even(self):
# All lines indented by two spaces.
text = " Hello there.\n How are ya?\n Oh good."
expect = "Hello there.\nHow are ya?\nOh good."
self.assertEqual(expect, dedent(text))
# Same, with blank lines.
text = " Hello there.\n\n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEqual(expect, dedent(text))
# Now indent one of the blank lines.
text = " Hello there.\n \n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEqual(expect, dedent(text))
def test_dedent_uneven(self):
# Lines indented unevenly.
text = '''\
def foo():
while 1:
return foo
'''
expect = '''\
def foo():
while 1:
return foo
'''
self.assertEqual(expect, dedent(text))
# Uneven indentation with a blank line.
text = " Foo\n Bar\n\n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEqual(expect, dedent(text))
# Uneven indentation with a whitespace-only line.
text = " Foo\n Bar\n \n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEqual(expect, dedent(text))
# dedent() should not mangle internal tabs
def test_dedent_preserve_internal_tabs(self):
text = " hello\tthere\n how are\tyou?"
expect = "hello\tthere\nhow are\tyou?"
self.assertEqual(expect, dedent(text))
# make sure that it preserves tabs when it's not making any
# changes at all
self.assertEqual(expect, dedent(expect))
# dedent() should not mangle tabs in the margin (i.e.
# tabs and spaces both count as margin, but are *not*
# considered equivalent)
def test_dedent_preserve_margin_tabs(self):
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# same effect even if we have 8 spaces
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# dedent() only removes whitespace that can be uniformly removed!
text = "\thello there\n\thow are you?"
expect = "hello there\nhow are you?"
self.assertEqual(expect, dedent(text))
text = " \thello there\n \thow are you?"
self.assertEqual(expect, dedent(text))
text = " \t hello there\n \t how are you?"
self.assertEqual(expect, dedent(text))
text = " \thello there\n \t how are you?"
expect = "hello there\n how are you?"
self.assertEqual(expect, dedent(text))
# Test textwrap.indent
class IndentTestCase(unittest.TestCase):
# The examples used for tests. If any of these change, the expected
# results in the various test cases must also be updated.
# The roundtrip cases are separate, because textwrap.dedent doesn't
# handle Windows line endings
ROUNDTRIP_CASES = (
# Basic test case
"Hi.\nThis is a test.\nTesting.",
# Include a blank line
"Hi.\nThis is a test.\n\nTesting.",
# Include leading and trailing blank lines
"\nHi.\nThis is a test.\nTesting.\n",
)
CASES = ROUNDTRIP_CASES + (
# Use Windows line endings
"Hi.\r\nThis is a test.\r\nTesting.\r\n",
# Pathological case
"\nHi.\r\nThis is a test.\n\r\nTesting.\r\n\n",
)
def test_indent_nomargin_default(self):
# indent should do nothing if 'prefix' is empty.
for text in self.CASES:
self.assertEqual(indent(text, ''), text)
def test_indent_nomargin_explicit_default(self):
# The same as test_indent_nomargin, but explicitly requesting
# the default behaviour by passing None as the predicate
for text in self.CASES:
self.assertEqual(indent(text, '', None), text)
def test_indent_nomargin_all_lines(self):
# The same as test_indent_nomargin, but using the optional
# predicate argument
predicate = lambda line: True
for text in self.CASES:
self.assertEqual(indent(text, '', predicate), text)
def test_indent_no_lines(self):
# Explicitly skip indenting any lines
predicate = lambda line: False
for text in self.CASES:
self.assertEqual(indent(text, ' ', predicate), text)
def test_roundtrip_spaces(self):
# A whitespace prefix should roundtrip with dedent
for text in self.ROUNDTRIP_CASES:
self.assertEqual(dedent(indent(text, ' ')), text)
def test_roundtrip_tabs(self):
# A whitespace prefix should roundtrip with dedent
for text in self.ROUNDTRIP_CASES:
self.assertEqual(dedent(indent(text, '\t\t')), text)
def test_roundtrip_mixed(self):
# A whitespace prefix should roundtrip with dedent
for text in self.ROUNDTRIP_CASES:
self.assertEqual(dedent(indent(text, ' \t \t ')), text)
def test_indent_default(self):
# Test default indenting of lines that are not whitespace only
prefix = ' '
expected = (
# Basic test case
" Hi.\n This is a test.\n Testing.",
# Include a blank line
" Hi.\n This is a test.\n\n Testing.",
# Include leading and trailing blank lines
"\n Hi.\n This is a test.\n Testing.\n",
# Use Windows line endings
" Hi.\r\n This is a test.\r\n Testing.\r\n",
# Pathological case
"\n Hi.\r\n This is a test.\n\r\n Testing.\r\n\n",
)
for text, expect in zip(self.CASES, expected):
self.assertEqual(indent(text, prefix), expect)
def test_indent_explicit_default(self):
# Test default indenting of lines that are not whitespace only
prefix = ' '
expected = (
# Basic test case
" Hi.\n This is a test.\n Testing.",
# Include a blank line
" Hi.\n This is a test.\n\n Testing.",
# Include leading and trailing blank lines
"\n Hi.\n This is a test.\n Testing.\n",
# Use Windows line endings
" Hi.\r\n This is a test.\r\n Testing.\r\n",
# Pathological case
"\n Hi.\r\n This is a test.\n\r\n Testing.\r\n\n",
)
for text, expect in zip(self.CASES, expected):
self.assertEqual(indent(text, prefix, None), expect)
def test_indent_all_lines(self):
# Add 'prefix' to all lines, including whitespace-only ones.
prefix = ' '
expected = (
# Basic test case
" Hi.\n This is a test.\n Testing.",
# Include a blank line
" Hi.\n This is a test.\n \n Testing.",
# Include leading and trailing blank lines
" \n Hi.\n This is a test.\n Testing.\n",
# Use Windows line endings
" Hi.\r\n This is a test.\r\n Testing.\r\n",
# Pathological case
" \n Hi.\r\n This is a test.\n \r\n Testing.\r\n \n",
)
predicate = lambda line: True
for text, expect in zip(self.CASES, expected):
self.assertEqual(indent(text, prefix, predicate), expect)
def test_indent_empty_lines(self):
# Add 'prefix' solely to whitespace-only lines.
prefix = ' '
expected = (
# Basic test case
"Hi.\nThis is a test.\nTesting.",
# Include a blank line
"Hi.\nThis is a test.\n \nTesting.",
# Include leading and trailing blank lines
" \nHi.\nThis is a test.\nTesting.\n",
# Use Windows line endings
"Hi.\r\nThis is a test.\r\nTesting.\r\n",
# Pathological case
" \nHi.\r\nThis is a test.\n \r\nTesting.\r\n \n",
)
predicate = lambda line: not line.strip()
for text, expect in zip(self.CASES, expected):
self.assertEqual(indent(text, prefix, predicate), expect)
class ShortenTestCase(BaseTestCase):
def check_shorten(self, text, width, expect, **kwargs):
result = shorten(text, width, **kwargs)
self.check(result, expect)
def test_simple(self):
# Simple case: just words, spaces, and a bit of punctuation
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_shorten(text, 18, "Hello there, [...]")
self.check_shorten(text, len(text), text)
self.check_shorten(text, len(text) - 1,
"Hello there, how are you this fine day? "
"I'm glad to [...]")
def test_placeholder(self):
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_shorten(text, 17, "Hello there,$$", placeholder='$$')
self.check_shorten(text, 18, "Hello there, how$$", placeholder='$$')
self.check_shorten(text, 18, "Hello there, $$", placeholder=' $$')
self.check_shorten(text, len(text), text, placeholder='$$')
self.check_shorten(text, len(text) - 1,
"Hello there, how are you this fine day? "
"I'm glad to hear$$", placeholder='$$')
def test_empty_string(self):
self.check_shorten("", 6, "")
def test_whitespace(self):
# Whitespace collapsing
text = """
This is a paragraph that already has
line breaks and \t tabs too."""
self.check_shorten(text, 62,
"This is a paragraph that already has line "
"breaks and tabs too.")
self.check_shorten(text, 61,
"This is a paragraph that already has line "
"breaks and [...]")
self.check_shorten("hello world! ", 12, "hello world!")
self.check_shorten("hello world! ", 11, "hello [...]")
# The leading space is trimmed from the placeholder
# (it would be ugly otherwise).
self.check_shorten("hello world! ", 10, "[...]")
def test_width_too_small_for_placeholder(self):
shorten("x" * 20, width=8, placeholder="(......)")
with self.assertRaises(ValueError):
shorten("x" * 20, width=8, placeholder="(.......)")
def test_first_word_too_long_but_placeholder_fits(self):
self.check_shorten("Helloo", 5, "[...]")
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
xindus40223115/2015cda_g1 | man3.py | 15 | 11740 |
import cherrypy
# 這是 MAN 類別的定義
'''
# 在 application 中導入子模組
import programs.cdag30.man as cdag30_man
# 加入 cdag30 模組下的 man.py 且以子模組 man 對應其 MAN() 類別
root.cdag30.man = cdag30_man.MAN()
# 完成設定後, 可以利用
/cdag30/man/assembly
# 呼叫 man.py 中 MAN 類別的 assembly 方法
'''
class MAN(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014CDA 協同專案下的 cdag30 模組下的 MAN 類別.<br /><br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="assembly">執行 MAN 類別中的 assembly 方法</a><br /><br />
請確定下列零件於 V:/home/lego/man 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="/static/lego_man.7z">lego_man.7z</a>(滑鼠右鍵存成 .7z 檔案)<br />
'''
return outstring
@cherrypy.expose
def assembly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js"></script>
</head>
<body>
</script><script language="JavaScript">
/*man2.py 完全利用函式呼叫進行組立*/
/*設計一個零件組立函式*/
// featID 為組立件第一個組立零件的編號
// inc 則為 part1 的組立順序編號, 第一個入組立檔編號為 featID+0
// part2 為外加的零件名稱
////////////////////////////////////////////////
// axis_plane_assembly 組立函式
////////////////////////////////////////////////
function axis_plane_assembly(session, assembly, transf, featID, inc, part2, axis1, plane1, axis2, plane2){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
var asmDatums = new Array(axis1, plane1);
var compDatums = new Array(axis2, plane2);
var relation = new Array (pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (true, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
}
// 以上為 axis_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly 採 align 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號, 也就是樂高人偶的 body
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly2 採 mate 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly2(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號, 也就是樂高人偶的 body
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly2() 函式, 主要採三面 MATE 組立
//
// 假如 Creo 所在的操作系統不是 Windows 環境
if (!pfcIsWindows())
// 則啟動對應的 UniversalXPConnect 執行權限 (等同 Windows 下的 ActiveX)
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// pfcGetProESession() 是位於 pfcUtils.js 中的函式, 確定此 JavaScript 是在嵌入式瀏覽器中執行
var session = pfcGetProESession();
// 設定 config option, 不要使用元件組立流程中內建的假設約束條件
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣, Pro/Web.Link 中的變數無法直接建立, 必須透過 pfcCreate() 建立
var identityMatrix = pfcCreate("pfcMatrix3D");
// 建立 identity 位置矩陣
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set(x, y, 1.0);
else
identityMatrix.Set(x, y, 0.0);
}
// 利用 identityMatrix 建立 transf 座標轉換矩陣
var transf = pfcCreate("pfcTransform3D").Create(identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔的空白組立檔案, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件, 若不符合條件則丟出錯誤訊息
if (model == void null || model.Type != pfcCreate("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
// 將此模型設為組立物件
var assembly = model;
/////////////////////////////////////////////////////////////////
// 開始執行組立, 全部採函式呼叫組立
/////////////////////////////////////////////////////////////////
// Body 與 WAIST 採三個平面約束組立
// Body 組立面為 DTM4, DTM5, DTM6
// WAIST 組立面為 DTM1, DTM2, DTM3, 組立增量次序為 6, 與 body 採三面 mate 組立
three_plane_assembly2(session, assembly, transf, 40, 0, "LEGO_WAIST.prt", "DTM4", "DTM5", "DTM6", "DTM1", "DTM2", "DTM3");
// 右腳
axis_plane_assembly(session, assembly, transf, 40, 5,
"LEGO_LEG_RT.prt", "A_8", "DTM4", "A_10", "DTM1");
// 左腳
axis_plane_assembly(session, assembly, transf, 40, 5,
"LEGO_LEG_LT.prt", "A_8", "DTM5", "A_10", "DTM1");
// regenerate 並且 repaint 組立檔案
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring | gpl-3.0 |
Deepakkothandan/ansible | lib/ansible/modules/remote_management/oneview/oneview_ethernet_network.py | 147 | 8911 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_ethernet_network
short_description: Manage OneView Ethernet Network resources
description:
- Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
version_added: "2.4"
requirements:
- hpOneView >= 3.1.0
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
state:
description:
- Indicates the desired state for the Ethernet Network resource.
- C(present) will ensure data properties are compliant with OneView.
- C(absent) will remove the resource from OneView, if it exists.
- C(default_bandwidth_reset) will reset the network connection template to the default.
default: present
choices: [present, absent, default_bandwidth_reset]
data:
description:
- List with Ethernet Network properties.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Ensure that the Ethernet Network is present using the default configuration
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: 'Test Ethernet Network'
vlanId: '201'
delegate_to: localhost
- name: Update the Ethernet Network changing bandwidth and purpose
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: 'Test Ethernet Network'
purpose: Management
bandwidth:
maximumBandwidth: 3000
typicalBandwidth: 2000
delegate_to: localhost
- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: 'Test Ethernet Network'
newName: 'Renamed Ethernet Network'
delegate_to: localhost
- name: Ensure that the Ethernet Network is absent
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: absent
data:
name: 'New Ethernet Network'
delegate_to: localhost
- name: Create Ethernet networks in bulk
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
vlanIdRange: '1-10,15,17'
purpose: General
namePrefix: TestNetwork
smartLink: false
privateNetwork: false
bandwidth:
maximumBandwidth: 10000
typicalBandwidth: 2000
delegate_to: localhost
- name: Reset to the default network connection template
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: default_bandwidth_reset
data:
name: 'Test Ethernet Network'
delegate_to: localhost
'''
RETURN = '''
ethernet_network:
description: Has the facts about the Ethernet Networks.
returned: On state 'present'. Can be null.
type: dict
ethernet_network_bulk:
description: Has the facts about the Ethernet Networks affected by the bulk insert.
returned: When 'vlanIdRange' attribute is in data argument. Can be null.
type: dict
ethernet_network_connection_template:
description: Has the facts about the Ethernet Network Connection Template.
returned: On state 'default_bandwidth_reset'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
class EthernetNetworkModule(OneViewModuleBase):
MSG_CREATED = 'Ethernet Network created successfully.'
MSG_UPDATED = 'Ethernet Network updated successfully.'
MSG_DELETED = 'Ethernet Network deleted successfully.'
MSG_ALREADY_PRESENT = 'Ethernet Network is already present.'
MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.'
MSG_BULK_CREATED = 'Ethernet Networks created successfully.'
MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.'
MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.'
MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.'
MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.'
RESOURCE_FACT_NAME = 'ethernet_network'
def __init__(self):
argument_spec = dict(
state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']),
data=dict(type='dict', required=True),
)
super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True)
self.resource_client = self.oneview_client.ethernet_networks
def execute_module(self):
changed, msg, ansible_facts, resource = False, '', {}, None
if self.data.get('name'):
resource = self.get_by_name(self.data['name'])
if self.state == 'present':
if self.data.get('vlanIdRange'):
return self._bulk_present()
else:
return self._present(resource)
elif self.state == 'absent':
return self.resource_absent(resource)
elif self.state == 'default_bandwidth_reset':
changed, msg, ansible_facts = self._default_bandwidth_reset(resource)
return dict(changed=changed, msg=msg, ansible_facts=ansible_facts)
def _present(self, resource):
bandwidth = self.data.pop('bandwidth', None)
scope_uris = self.data.pop('scopeUris', None)
result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
if bandwidth:
if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]:
result['changed'] = True
result['msg'] = self.MSG_UPDATED
if scope_uris is not None:
result = self.resource_scopes_set(result, 'ethernet_network', scope_uris)
return result
def _bulk_present(self):
vlan_id_range = self.data['vlanIdRange']
result = dict(ansible_facts={})
ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
if not ethernet_networks:
self.resource_client.create_bulk(self.data)
result['changed'] = True
result['msg'] = self.MSG_BULK_CREATED
else:
vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range)
for net in ethernet_networks[:]:
vlan_ids.remove(net['vlanId'])
if len(vlan_ids) == 0:
result['msg'] = self.MSG_BULK_ALREADY_EXIST
result['changed'] = False
else:
if len(vlan_ids) == 1:
self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
else:
self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
self.resource_client.create_bulk(self.data)
result['changed'] = True
result['msg'] = self.MSG_MISSING_BULK_CREATED
result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
return result
def _update_connection_template(self, ethernet_network, bandwidth):
if 'connectionTemplateUri' not in ethernet_network:
return False, None
connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri'])
merged_data = connection_template.copy()
merged_data.update({'bandwidth': bandwidth})
if not self.compare(connection_template, merged_data):
connection_template = self.oneview_client.connection_templates.update(merged_data)
return True, connection_template
else:
return False, None
def _default_bandwidth_reset(self, resource):
if not resource:
raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND)
default_connection_template = self.oneview_client.connection_templates.get_default()
changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth'])
return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict(
ethernet_network_connection_template=connection_template)
def main():
EthernetNetworkModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 |
haad/ansible | lib/ansible/modules/network/netscaler/netscaler_gslb_vserver.py | 124 | 33875 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_gslb_vserver
short_description: Configure gslb vserver entities in Netscaler.
description:
- Configure gslb vserver entities in Netscaler.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- >-
Name for the GSLB virtual server. Must begin with an ASCII alphanumeric or underscore C(_) character,
and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space, colon C(:), at C(@),
equals C(=), and hyphen C(-) characters. Can be changed after the virtual server is created.
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'NNTP'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'RADIUS'
- 'RDP'
- 'RTSP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
description:
- "Protocol used by services bound to the virtual server."
- >-
dnsrecordtype:
choices:
- 'A'
- 'AAAA'
- 'CNAME'
- 'NAPTR'
description:
- "DNS record type to associate with the GSLB virtual server's domain name."
- "Default value: A"
- "Possible values = A, AAAA, CNAME, NAPTR"
lbmethod:
choices:
- 'ROUNDROBIN'
- 'LEASTCONNECTION'
- 'LEASTRESPONSETIME'
- 'SOURCEIPHASH'
- 'LEASTBANDWIDTH'
- 'LEASTPACKETS'
- 'STATICPROXIMITY'
- 'RTT'
- 'CUSTOMLOAD'
description:
- "Load balancing method for the GSLB virtual server."
- "Default value: LEASTCONNECTION"
- >-
Possible values = ROUNDROBIN, LEASTCONNECTION, LEASTRESPONSETIME, SOURCEIPHASH, LEASTBANDWIDTH,
LEASTPACKETS, STATICPROXIMITY, RTT, CUSTOMLOAD
backuplbmethod:
choices:
- 'ROUNDROBIN'
- 'LEASTCONNECTION'
- 'LEASTRESPONSETIME'
- 'SOURCEIPHASH'
- 'LEASTBANDWIDTH'
- 'LEASTPACKETS'
- 'STATICPROXIMITY'
- 'RTT'
- 'CUSTOMLOAD'
description:
- >-
Backup load balancing method. Becomes operational if the primary load balancing method fails or
cannot be used. Valid only if the primary method is based on either round-trip time (RTT) or static
proximity.
netmask:
description:
- "IPv4 network mask for use in the SOURCEIPHASH load balancing method."
- "Minimum length = 1"
v6netmasklen:
description:
- >-
Number of bits to consider, in an IPv6 source IP address, for creating the hash that is required by
the C(SOURCEIPHASH) load balancing method.
- "Default value: C(128)"
- "Minimum value = C(1)"
- "Maximum value = C(128)"
tolerance:
description:
- >-
Site selection tolerance, in milliseconds, for implementing the RTT load balancing method. If a
site's RTT deviates from the lowest RTT by more than the specified tolerance, the site is not
considered when the NetScaler appliance makes a GSLB decision. The appliance implements the round
robin method of global server load balancing between sites whose RTT values are within the specified
tolerance. If the tolerance is 0 (zero), the appliance always sends clients the IP address of the
site with the lowest RTT.
- "Minimum value = C(0)"
- "Maximum value = C(100)"
persistencetype:
choices:
- 'SOURCEIP'
- 'NONE'
description:
- "Use source IP address based persistence for the virtual server."
- >-
After the load balancing method selects a service for the first packet, the IP address received in
response to the DNS query is used for subsequent requests from the same client.
persistenceid:
description:
- >-
The persistence ID for the GSLB virtual server. The ID is a positive integer that enables GSLB sites
to identify the GSLB virtual server, and is required if source IP address based or spill over based
persistence is enabled on the virtual server.
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
persistmask:
description:
- >-
The optional IPv4 network mask applied to IPv4 addresses to establish source IP address based
persistence.
- "Minimum length = 1"
v6persistmasklen:
description:
- >-
Number of bits to consider in an IPv6 source IP address when creating source IP address based
persistence sessions.
- "Default value: C(128)"
- "Minimum value = C(1)"
- "Maximum value = C(128)"
timeout:
description:
- "Idle time, in minutes, after which a persistence entry is cleared."
- "Default value: C(2)"
- "Minimum value = C(2)"
- "Maximum value = C(1440)"
mir:
choices:
- 'enabled'
- 'disabled'
description:
- "Include multiple IP addresses in the DNS responses sent to clients."
disableprimaryondown:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Continue to direct traffic to the backup chain even after the primary GSLB virtual server returns to
the UP state. Used when spillover is configured for the virtual server.
dynamicweight:
choices:
- 'SERVICECOUNT'
- 'SERVICEWEIGHT'
- 'DISABLED'
description:
- >-
Specify if the appliance should consider the service count, service weights, or ignore both when
using weight-based load balancing methods. The state of the number of services bound to the virtual
server help the appliance to select the service.
considereffectivestate:
choices:
- 'NONE'
- 'STATE_ONLY'
description:
- >-
If the primary state of all bound GSLB services is DOWN, consider the effective states of all the
GSLB services, obtained through the Metrics Exchange Protocol (MEP), when determining the state of
the GSLB virtual server. To consider the effective state, set the parameter to STATE_ONLY. To
disregard the effective state, set the parameter to NONE.
- >-
The effective state of a GSLB service is the ability of the corresponding virtual server to serve
traffic. The effective state of the load balancing virtual server, which is transferred to the GSLB
service, is UP even if only one virtual server in the backup chain of virtual servers is in the UP
state.
comment:
description:
- "Any comments that you might want to associate with the GSLB virtual server."
somethod:
choices:
- 'CONNECTION'
- 'DYNAMICCONNECTION'
- 'BANDWIDTH'
- 'HEALTH'
- 'NONE'
description:
- "Type of threshold that, when exceeded, triggers spillover. Available settings function as follows:"
- "* C(CONNECTION) - Spillover occurs when the number of client connections exceeds the threshold."
- >-
* C(DYNAMICCONNECTION) - Spillover occurs when the number of client connections at the GSLB virtual
server exceeds the sum of the maximum client (Max Clients) settings for bound GSLB services. Do not
specify a spillover threshold for this setting, because the threshold is implied by the Max Clients
settings of the bound GSLB services.
- >-
* C(BANDWIDTH) - Spillover occurs when the bandwidth consumed by the GSLB virtual server's incoming and
outgoing traffic exceeds the threshold.
- >-
* C(HEALTH) - Spillover occurs when the percentage of weights of the GSLB services that are UP drops
below the threshold. For example, if services gslbSvc1, gslbSvc2, and gslbSvc3 are bound to a virtual
server, with weights 1, 2, and 3, and the spillover threshold is 50%, spillover occurs if gslbSvc1
and gslbSvc3 or gslbSvc2 and gslbSvc3 transition to DOWN.
- "* C(NONE) - Spillover does not occur."
sopersistence:
choices:
- 'enabled'
- 'disabled'
description:
- >-
If spillover occurs, maintain source IP address based persistence for both primary and backup GSLB
virtual servers.
sopersistencetimeout:
description:
- "Timeout for spillover persistence, in minutes."
- "Default value: C(2)"
- "Minimum value = C(2)"
- "Maximum value = C(1440)"
sothreshold:
description:
- >-
Threshold at which spillover occurs. Specify an integer for the CONNECTION spillover method, a
bandwidth value in kilobits per second for the BANDWIDTH method (do not enter the units), or a
percentage for the HEALTH method (do not enter the percentage symbol).
- "Minimum value = C(1)"
- "Maximum value = C(4294967287)"
sobackupaction:
choices:
- 'DROP'
- 'ACCEPT'
- 'REDIRECT'
description:
- >-
Action to be performed if spillover is to take effect, but no backup chain to spillover is usable or
exists.
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging appflow flow information."
domain_bindings:
description:
- >-
List of bindings for domains for this glsb vserver.
suboptions:
cookietimeout:
description:
- Timeout, in minutes, for the GSLB site cookie.
domainname:
description:
- Domain name for which to change the time to live (TTL) and/or backup service IP address.
ttl:
description:
- Time to live (TTL) for the domain.
sitedomainttl:
description:
- >-
TTL, in seconds, for all internally created site domains (created when a site prefix is
configured on a GSLB service) that are associated with this virtual server.
- Minimum value = C(1)
service_bindings:
description:
- List of bindings for gslb services bound to this gslb virtual server.
suboptions:
servicename:
description:
- Name of the GSLB service for which to change the weight.
weight:
description:
- Weight to assign to the GSLB service.
disabled:
description:
- When set to C(yes) the GSLB Vserver state will be set to C(disabled).
- When set to C(no) the GSLB Vserver state will be set to C(enabled).
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: false
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
'''
RETURN = '''
'''
import copy
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbvserver import gslbvserver
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbvserver_gslbservice_binding import gslbvserver_gslbservice_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbvserver_domain_binding import gslbvserver_domain_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
ensure_feature_is_enabled,
get_immutables_intersection,
complete_missing_attributes
)
gslbvserver_domain_binding_rw_attrs = [
'name',
'domainname',
'backupipflag',
'cookietimeout',
'backupip',
'ttl',
'sitedomainttl',
'cookie_domainflag',
]
gslbvserver_gslbservice_binding_rw_attrs = [
'name',
'servicename',
'weight',
]
def get_actual_domain_bindings(client, module):
log('get_actual_domain_bindings')
# Get actual domain bindings and index them by domainname
actual_domain_bindings = {}
if gslbvserver_domain_binding.count(client, name=module.params['name']) != 0:
# Get all domain bindings associated with the named gslb vserver
fetched_domain_bindings = gslbvserver_domain_binding.get(client, name=module.params['name'])
# index by domainname
for binding in fetched_domain_bindings:
complete_missing_attributes(binding, gslbvserver_domain_binding_rw_attrs, fill_value=None)
actual_domain_bindings[binding.domainname] = binding
return actual_domain_bindings
def get_configured_domain_bindings_proxys(client, module):
log('get_configured_domain_bindings_proxys')
configured_domain_proxys = {}
# Get configured domain bindings and index them by domainname
if module.params['domain_bindings'] is not None:
for configured_domain_binding in module.params['domain_bindings']:
binding_values = copy.deepcopy(configured_domain_binding)
binding_values['name'] = module.params['name']
gslbvserver_domain_binding_proxy = ConfigProxy(
actual=gslbvserver_domain_binding(),
client=client,
attribute_values_dict=binding_values,
readwrite_attrs=gslbvserver_domain_binding_rw_attrs,
readonly_attrs=[],
)
configured_domain_proxys[configured_domain_binding['domainname']] = gslbvserver_domain_binding_proxy
return configured_domain_proxys
def sync_domain_bindings(client, module):
log('sync_domain_bindings')
actual_domain_bindings = get_actual_domain_bindings(client, module)
configured_domain_proxys = get_configured_domain_bindings_proxys(client, module)
# Delete actual bindings not in configured bindings
for domainname, actual_domain_binding in actual_domain_bindings.items():
if domainname not in configured_domain_proxys.keys():
log('Deleting absent binding for domain %s' % domainname)
gslbvserver_domain_binding.delete(client, actual_domain_binding)
# Delete actual bindings that differ from configured
for proxy_key, binding_proxy in configured_domain_proxys.items():
if proxy_key in actual_domain_bindings:
actual_binding = actual_domain_bindings[proxy_key]
if not binding_proxy.has_equal_attributes(actual_binding):
log('Deleting differing binding for domain %s' % binding_proxy.domainname)
gslbvserver_domain_binding.delete(client, actual_binding)
log('Adding anew binding for domain %s' % binding_proxy.domainname)
binding_proxy.add()
# Add configured domains that are missing from actual
for proxy_key, binding_proxy in configured_domain_proxys.items():
if proxy_key not in actual_domain_bindings.keys():
log('Adding domain binding for domain %s' % binding_proxy.domainname)
binding_proxy.add()
def domain_bindings_identical(client, module):
log('domain_bindings_identical')
actual_domain_bindings = get_actual_domain_bindings(client, module)
configured_domain_proxys = get_configured_domain_bindings_proxys(client, module)
actual_keyset = set(actual_domain_bindings.keys())
configured_keyset = set(configured_domain_proxys.keys())
symmetric_difference = actual_keyset ^ configured_keyset
log('symmetric difference %s' % symmetric_difference)
if len(symmetric_difference) != 0:
return False
# Item for item equality test
for key, proxy in configured_domain_proxys.items():
diff = proxy.diff_object(actual_domain_bindings[key])
if 'backupipflag' in diff:
del diff['backupipflag']
if not len(diff) == 0:
return False
# Fallthrough to True result
return True
def get_actual_service_bindings(client, module):
log('get_actual_service_bindings')
# Get actual domain bindings and index them by domainname
actual_bindings = {}
if gslbvserver_gslbservice_binding.count(client, name=module.params['name']) != 0:
# Get all service bindings associated with the named gslb vserver
fetched_bindings = gslbvserver_gslbservice_binding.get(client, name=module.params['name'])
# index by servicename
for binding in fetched_bindings:
complete_missing_attributes(binding, gslbvserver_gslbservice_binding_rw_attrs, fill_value=None)
actual_bindings[binding.servicename] = binding
return actual_bindings
def get_configured_service_bindings(client, module):
log('get_configured_service_bindings_proxys')
configured_proxys = {}
# Get configured domain bindings and index them by domainname
if module.params['service_bindings'] is not None:
for configured_binding in module.params['service_bindings']:
binding_values = copy.deepcopy(configured_binding)
binding_values['name'] = module.params['name']
gslbvserver_service_binding_proxy = ConfigProxy(
actual=gslbvserver_gslbservice_binding(),
client=client,
attribute_values_dict=binding_values,
readwrite_attrs=gslbvserver_gslbservice_binding_rw_attrs,
readonly_attrs=[],
)
configured_proxys[configured_binding['servicename']] = gslbvserver_service_binding_proxy
return configured_proxys
def sync_service_bindings(client, module):
actual = get_actual_service_bindings(client, module)
configured = get_configured_service_bindings(client, module)
# Delete extraneous
extraneous_service_bindings = list(set(actual.keys()) - set(configured.keys()))
for servicename in extraneous_service_bindings:
log('Deleting missing binding from service %s' % servicename)
binding = actual[servicename]
binding.name = module.params['name']
gslbvserver_gslbservice_binding.delete(client, binding)
# Recreate different
common_service_bindings = list(set(actual.keys()) & set(configured.keys()))
for servicename in common_service_bindings:
proxy = configured[servicename]
binding = actual[servicename]
if not proxy.has_equal_attributes(actual):
log('Recreating differing service binding %s' % servicename)
gslbvserver_gslbservice_binding.delete(client, binding)
proxy.add()
# Add missing
missing_service_bindings = list(set(configured.keys()) - set(actual.keys()))
for servicename in missing_service_bindings:
proxy = configured[servicename]
log('Adding missing service binding %s' % servicename)
proxy.add()
def service_bindings_identical(client, module):
actual_bindings = get_actual_service_bindings(client, module)
configured_proxys = get_configured_service_bindings(client, module)
actual_keyset = set(actual_bindings.keys())
configured_keyset = set(configured_proxys.keys())
symmetric_difference = actual_keyset ^ configured_keyset
if len(symmetric_difference) != 0:
return False
# Item for item equality test
for key, proxy in configured_proxys.items():
if key in actual_bindings.keys():
if not proxy.has_equal_attributes(actual_bindings[key]):
return False
# Fallthrough to True result
return True
def gslb_vserver_exists(client, module):
if gslbvserver.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def gslb_vserver_identical(client, module, gslb_vserver_proxy):
gslb_vserver_list = gslbvserver.get_filtered(client, 'name:%s' % module.params['name'])
diff_dict = gslb_vserver_proxy.diff_object(gslb_vserver_list[0])
if len(diff_dict) != 0:
return False
else:
return True
def all_identical(client, module, gslb_vserver_proxy):
return (
gslb_vserver_identical(client, module, gslb_vserver_proxy) and
domain_bindings_identical(client, module) and
service_bindings_identical(client, module)
)
def diff_list(client, module, gslb_vserver_proxy):
gslb_vserver_list = gslbvserver.get_filtered(client, 'name:%s' % module.params['name'])
return gslb_vserver_proxy.diff_object(gslb_vserver_list[0])
def do_state_change(client, module, gslb_vserver_proxy):
if module.params['disabled']:
log('Disabling glsb_vserver')
result = gslbvserver.disable(client, gslb_vserver_proxy.actual)
else:
log('Enabling gslbvserver')
result = gslbvserver.enable(client, gslb_vserver_proxy.actual)
return result
def main():
module_specific_arguments = dict(
name=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'NNTP',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'RADIUS',
'RDP',
'RTSP',
'MYSQL',
'MSSQL',
'ORACLE',
]
),
dnsrecordtype=dict(
type='str',
choices=[
'A',
'AAAA',
'CNAME',
'NAPTR',
]
),
lbmethod=dict(
type='str',
choices=[
'ROUNDROBIN',
'LEASTCONNECTION',
'LEASTRESPONSETIME',
'SOURCEIPHASH',
'LEASTBANDWIDTH',
'LEASTPACKETS',
'STATICPROXIMITY',
'RTT',
'CUSTOMLOAD',
]
),
backuplbmethod=dict(
type='str',
choices=[
'ROUNDROBIN',
'LEASTCONNECTION',
'LEASTRESPONSETIME',
'SOURCEIPHASH',
'LEASTBANDWIDTH',
'LEASTPACKETS',
'STATICPROXIMITY',
'RTT',
'CUSTOMLOAD',
]
),
netmask=dict(type='str'),
v6netmasklen=dict(type='float'),
tolerance=dict(type='float'),
persistencetype=dict(
type='str',
choices=[
'SOURCEIP',
'NONE',
]
),
persistenceid=dict(type='float'),
persistmask=dict(type='str'),
v6persistmasklen=dict(type='float'),
timeout=dict(type='float'),
mir=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
disableprimaryondown=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
dynamicweight=dict(
type='str',
choices=[
'SERVICECOUNT',
'SERVICEWEIGHT',
'DISABLED',
]
),
considereffectivestate=dict(
type='str',
choices=[
'NONE',
'STATE_ONLY',
]
),
comment=dict(type='str'),
somethod=dict(
type='str',
choices=[
'CONNECTION',
'DYNAMICCONNECTION',
'BANDWIDTH',
'HEALTH',
'NONE',
]
),
sopersistence=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
sopersistencetimeout=dict(type='float'),
sothreshold=dict(type='float'),
sobackupaction=dict(
type='str',
choices=[
'DROP',
'ACCEPT',
'REDIRECT',
]
),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
domainname=dict(type='str'),
cookie_domain=dict(type='str'),
)
hand_inserted_arguments = dict(
domain_bindings=dict(type='list'),
service_bindings=dict(type='list'),
disabled=dict(
type='bool',
default=False,
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'name',
'servicetype',
'dnsrecordtype',
'lbmethod',
'backuplbmethod',
'netmask',
'v6netmasklen',
'tolerance',
'persistencetype',
'persistenceid',
'persistmask',
'v6persistmasklen',
'timeout',
'mir',
'disableprimaryondown',
'dynamicweight',
'considereffectivestate',
'comment',
'somethod',
'sopersistence',
'sopersistencetimeout',
'sothreshold',
'sobackupaction',
'appflowlog',
'cookie_domain',
]
readonly_attrs = [
'curstate',
'status',
'lbrrreason',
'iscname',
'sitepersistence',
'totalservices',
'activeservices',
'statechangetimesec',
'statechangetimemsec',
'tickssincelaststatechange',
'health',
'policyname',
'priority',
'gotopriorityexpression',
'type',
'vsvrbindsvcip',
'vsvrbindsvcport',
'__count',
]
immutable_attrs = [
'name',
'servicetype',
]
transforms = {
'mir': [lambda v: v.upper()],
'disableprimaryondown': [lambda v: v.upper()],
'sopersistence': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
}
# Instantiate config proxy
gslb_vserver_proxy = ConfigProxy(
actual=gslbvserver(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
ensure_feature_is_enabled(client, 'GSLB')
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying state present')
if not gslb_vserver_exists(client, module):
log('Creating object')
if not module.check_mode:
gslb_vserver_proxy.add()
sync_domain_bindings(client, module)
sync_service_bindings(client, module)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not all_identical(client, module, gslb_vserver_proxy):
log('Entering update actions')
# Check if we try to change value of immutable attributes
if not gslb_vserver_identical(client, module, gslb_vserver_proxy):
log('Updating gslb vserver')
immutables_changed = get_immutables_intersection(gslb_vserver_proxy, diff_list(client, module, gslb_vserver_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, gslb_vserver_proxy),
**module_result
)
if not module.check_mode:
gslb_vserver_proxy.update()
# Update domain bindings
if not domain_bindings_identical(client, module):
if not module.check_mode:
sync_domain_bindings(client, module)
# Update service bindings
if not service_bindings_identical(client, module):
if not module.check_mode:
sync_service_bindings(client, module)
module_result['changed'] = True
if not module.check_mode:
if module.params['save_config']:
client.save_config()
else:
module_result['changed'] = False
if not module.check_mode:
res = do_state_change(client, module, gslb_vserver_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check for state
if not module.check_mode:
if not gslb_vserver_exists(client, module):
module.fail_json(msg='GSLB Vserver does not exist', **module_result)
if not gslb_vserver_identical(client, module, gslb_vserver_proxy):
module.fail_json(msg='GSLB Vserver differs from configured', diff=diff_list(client, module, gslb_vserver_proxy), **module_result)
if not domain_bindings_identical(client, module):
module.fail_json(msg='Domain bindings differ from configured', diff=diff_list(client, module, gslb_vserver_proxy), **module_result)
if not service_bindings_identical(client, module):
module.fail_json(msg='Service bindings differ from configured', diff=diff_list(client, module, gslb_vserver_proxy), **module_result)
elif module.params['state'] == 'absent':
if gslb_vserver_exists(client, module):
if not module.check_mode:
gslb_vserver_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
if gslb_vserver_exists(client, module):
module.fail_json(msg='GSLB Vserver still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.