repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
lakshayg/tensorflow | tensorflow/python/training/gradient_descent.py | 99 | 2907 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GradientDescent for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle):
return training_ops.resource_apply_gradient_descent(
handle.handle, math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return resource_variable_ops.resource_scatter_add(
handle.handle, indices, -grad * self._learning_rate)
def _apply_sparse_duplicate_indices(self, grad, var):
delta = ops.IndexedSlices(
grad.values *
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
| apache-2.0 |
dhruvsrivastava/OJ | python/lib/python2.7/site-packages/wheel/signatures/__init__.py | 565 | 3779 | """
Create and verify jws-js format Ed25519 signatures.
"""
__all__ = [ 'sign', 'verify' ]
import json
from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary
ed25519ll = None
ALG = "Ed25519"
def get_ed25519ll():
"""Lazy import-and-test of ed25519 module"""
global ed25519ll
if not ed25519ll:
try:
import ed25519ll # fast (thousands / s)
except (ImportError, OSError): # pragma nocover
from . import ed25519py as ed25519ll # pure Python (hundreds / s)
test()
return ed25519ll
def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header":native(encoded_header),
"signature":native(encoded_signature)}],
"payload": native(encoded_payload)}
def assertTrue(condition, message=""):
if not condition:
raise ValueError(message)
def verify(jwsjs):
"""Return (decoded headers, payload) if all signatures in jwsjs are
consistent, else raise ValueError.
Caller must decide whether the keys are actually trusted."""
get_ed25519ll()
# XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+)
recipients = jwsjs["recipients"]
encoded_payload = binary(jwsjs["payload"])
headers = []
for recipient in recipients:
assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient))
h = binary(recipient["header"])
s = binary(recipient["signature"])
header = json.loads(native(urlsafe_b64decode(h)))
assertTrue(header["alg"] == ALG,
"Unexpected algorithm {0}".format(header["alg"]))
if "alg" in header["jwk"] and not "kty" in header["jwk"]:
header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08
assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519
"Unexpected key type {0}".format(header["jwk"]["kty"]))
vk = urlsafe_b64decode(binary(header["jwk"]["vk"]))
secured_input = b".".join((h, encoded_payload))
sig = urlsafe_b64decode(s)
sig_msg = sig+secured_input
verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk))
verified_header, verified_payload = verified_input.split('.')
verified_header = binary(verified_header)
decoded_header = native(urlsafe_b64decode(verified_header))
headers.append(json.loads(decoded_header))
verified_payload = binary(verified_payload)
# only return header, payload that have passed through the crypto library.
payload = json.loads(native(urlsafe_b64decode(verified_payload)))
return headers, payload
def test():
kp = ed25519ll.crypto_sign_keypair()
payload = {'test': 'onstartup'}
jwsjs = json.loads(json.dumps(sign(payload, kp)))
verify(jwsjs)
jwsjs['payload'] += 'x'
try:
verify(jwsjs)
except ValueError:
pass
else: # pragma no cover
raise RuntimeError("No error from bad wheel.signatures payload.")
| bsd-3-clause |
Dev4X/oppia | scripts/install_third_party.py | 4 | 21784 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Installation script for Oppia third-party libraries."""
import itertools
import os
import shutil
import StringIO
import tarfile
import urllib
import urllib2
import zipfile
import common
TOOLS_DIR = os.path.join('..', 'oppia_tools')
THIRD_PARTY_DIR = os.path.join('.', 'third_party')
THIRD_PARTY_STATIC_DIR = os.path.join(THIRD_PARTY_DIR, 'static')
# Place to download zip files for temporary storage.
TMP_UNZIP_PATH = os.path.join('.', 'tmp_unzip.zip')
# Check that the current directory is correct.
common.require_cwd_to_be_oppia()
def download_files(source_url_root, target_dir, source_filenames):
"""Downloads a group of files and saves them to a given directory.
Each file is downloaded only if it does not already exist.
Args:
source_url_root: the URL to prepend to all the filenames.
target_dir: the directory to save the files to.
source_filenames: a list of filenames. Each filename is appended to the
end of the source_url_root in order to give the URL from which to
download the file. The downloaded file is then placed in target_dir,
and retains the same filename.
"""
assert isinstance(source_filenames, list)
common.ensure_directory_exists(target_dir)
for filename in source_filenames:
if not os.path.exists(os.path.join(target_dir, filename)):
print 'Downloading file %s to %s' % (filename, target_dir)
urllib.urlretrieve(
'%s/%s' % (source_url_root, filename),
os.path.join(target_dir, filename))
def download_and_unzip_files(
source_url, target_parent_dir, zip_root_name, target_root_name):
"""Downloads a zip file, unzips it, and saves the result in a given dir.
The download occurs only if the target directory that the zip file unzips
to does not exist.
NB: This function assumes that the root level of the zip file has exactly
one folder.
Args:
source_url: the URL from which to download the zip file.
target_parent_dir: the directory to save the contents of the zip file to.
zip_root_name: the name of the top-level folder in the zip directory.
target_root_name: the name that the top-level folder should be renamed to
in the local directory.
"""
if not os.path.exists(os.path.join(target_parent_dir, target_root_name)):
print 'Downloading and unzipping file %s to %s' % (
zip_root_name, target_parent_dir)
common.ensure_directory_exists(target_parent_dir)
urllib.urlretrieve(source_url, TMP_UNZIP_PATH)
try:
with zipfile.ZipFile(TMP_UNZIP_PATH, 'r') as z:
z.extractall(target_parent_dir)
os.remove(TMP_UNZIP_PATH)
except:
if os.path.exists(TMP_UNZIP_PATH):
os.remove(TMP_UNZIP_PATH)
# Some downloads (like jqueryui-themes) may require a user-agent.
req = urllib2.Request(source_url)
req.add_header('User-agent', 'python')
# This is needed to get a seekable filestream that can be used
# by zipfile.ZipFile.
file_stream = StringIO.StringIO(urllib2.urlopen(req).read())
with zipfile.ZipFile(file_stream, 'r') as z:
z.extractall(target_parent_dir)
# Rename the target directory.
os.rename(
os.path.join(target_parent_dir, zip_root_name),
os.path.join(target_parent_dir, target_root_name))
def download_and_untar_files(
source_url, target_parent_dir, tar_root_name, target_root_name):
"""Downloads a tar file, untars it, and saves the result in a given dir.
The download occurs only if the target directory that the tar file untars
to does not exist.
NB: This function assumes that the root level of the tar file has exactly
one folder.
Args:
source_url: the URL from which to download the tar file.
target_parent_dir: the directory to save the contents of the tar file to.
tar_root_name: the name of the top-level folder in the tar directory.
target_root_name: the name that the top-level folder should be renamed to
in the local directory.
"""
if not os.path.exists(os.path.join(target_parent_dir, target_root_name)):
print 'Downloading and untarring file %s to %s' % (
tar_root_name, target_parent_dir)
common.ensure_directory_exists(target_parent_dir)
urllib.urlretrieve(source_url, TMP_UNZIP_PATH)
with tarfile.open(TMP_UNZIP_PATH, 'r:gz') as t:
t.extractall(target_parent_dir)
os.remove(TMP_UNZIP_PATH)
# Rename the target directory.
os.rename(
os.path.join(target_parent_dir, tar_root_name),
os.path.join(target_parent_dir, target_root_name))
# This is a temporary modified version of UI Bootstrap used for displaying
# HTML in popovers. It should be replaced with UI Bootstrap when version
# 0.13.0 is released. See https://github.com/angular-ui/bootstrap/issues/220
# TODO(sll): Delete this snippet of code after v0.13.0 is released.
UI_BOOTSTRAP_JBRUNI_URL = (
'https://raw.githubusercontent.com/jbruni/jbruni.github.io/master/javascripts')
UI_BOOTSTRAP_JBRUNI_DST = os.path.join(
THIRD_PARTY_STATIC_DIR, 'ui-bootstrap-jbruni-0.13.0')
UI_BOOTSTRAP_JBRUNI_FILES = ['ui-bootstrap-tpls-0.13.0-jbruni.min.js']
download_files(
UI_BOOTSTRAP_JBRUNI_URL, UI_BOOTSTRAP_JBRUNI_DST, UI_BOOTSTRAP_JBRUNI_FILES)
# Download all the standalone files.
YUICOMPRESSOR_REV = '2.4.8'
YUICOMPRESSOR_FILENAME = 'yuicompressor-%s' % YUICOMPRESSOR_REV
YUICOMPRESSOR_URL = (
'https://github.com/yui/yuicompressor/releases/download/v%s'
% YUICOMPRESSOR_REV)
YUICOMPRESSOR_DST = os.path.join(TOOLS_DIR, YUICOMPRESSOR_FILENAME)
YUICOMPRESSOR_FILES = ['%s.jar' % YUICOMPRESSOR_FILENAME]
UI_BOOTSTRAP_REV = '0.12.0'
UI_BOOTSTRAP_URL = (
'https://raw.githubusercontent.com/angular-ui/bootstrap/gh-pages')
UI_BOOTSTRAP_DST = os.path.join(
THIRD_PARTY_STATIC_DIR, 'ui-bootstrap-%s' % UI_BOOTSTRAP_REV)
UI_BOOTSTRAP_FILES = [
'ui-bootstrap-tpls-%s.%s' % (UI_BOOTSTRAP_REV, suffix)
for suffix in ['js', 'min.js']]
MATERIAL_DESIGN_ICONS_REV = '1.0.1'
MATERIAL_DESIGN_ICONS_URL_PREFIX = (
'https://raw.githubusercontent.com/google/material-design-icons/%s' %
MATERIAL_DESIGN_ICONS_REV)
MATERIAL_DESIGN_ICONS_ACTION_URL = (
'%s/action/drawable-xxxhdpi' % MATERIAL_DESIGN_ICONS_URL_PREFIX)
MATERIAL_DESIGN_ICONS_COMMUNICATION_URL = (
'%s/communication/drawable-xxxhdpi' % MATERIAL_DESIGN_ICONS_URL_PREFIX)
MATERIAL_DESIGN_ICONS_CONTENT_URL = (
'%s/content/drawable-xxxhdpi' % MATERIAL_DESIGN_ICONS_URL_PREFIX)
MATERIAL_DESIGN_ICONS_FILE_URL = (
'%s/file/drawable-xxxhdpi' % MATERIAL_DESIGN_ICONS_URL_PREFIX)
MATERIAL_DESIGN_ICONS_NAVIGATION_URL = (
'%s/navigation/drawable-xxxhdpi' % MATERIAL_DESIGN_ICONS_URL_PREFIX)
MATERIAL_DESIGN_ICONS_SOCIAL_URL = (
'%s/social/drawable-xxxhdpi' % MATERIAL_DESIGN_ICONS_URL_PREFIX)
MATERIAL_DESIGN_ICONS_DST = os.path.join(
THIRD_PARTY_STATIC_DIR,
'material-design-icons-%s' % MATERIAL_DESIGN_ICONS_REV)
MATERIAL_DESIGN_ICON_ACTION_FILES = [
'ic_info_black_48dp.png', 'ic_help_black_48dp.png',
'ic_home_black_48dp.png']
MATERIAL_DESIGN_ICON_COMMUNICATION_FILES = ['ic_forum_black_48dp.png']
MATERIAL_DESIGN_ICON_CONTENT_FILES = [
'ic_link_black_48dp.png', 'ic_save_black_48dp.png']
MATERIAL_DESIGN_ICON_FILE_FILES = ['ic_cloud_upload_black_48dp.png']
MATERIAL_DESIGN_ICON_NAVIGATION_FILES = [
'ic_more_vert_black_48dp.png', 'ic_menu_black_48dp.png',
'ic_close_black_48dp.png']
MATERIAL_DESIGN_ICON_SOCIAL_FILES = ['ic_group_black_48dp.png']
# Note that Angular 1.3 requires a jQuery version that is >= 2.1.1.
JQUERY_REV = '2.1.1'
JQUERY_URL = 'https://ajax.googleapis.com/ajax/libs/jquery/%s' % JQUERY_REV
JQUERY_DST = os.path.join(THIRD_PARTY_STATIC_DIR, 'jquery-%s' % JQUERY_REV)
JQUERY_FILES = ['jquery.%s' % suffix for suffix in ['js', 'min.js', 'min.map']]
JQUERYUI_REV = '1.10.3'
JQUERYUI_URL = (
'https://ajax.googleapis.com/ajax/libs/jqueryui/%s' % JQUERYUI_REV)
JQUERYUI_DST = os.path.join(
THIRD_PARTY_STATIC_DIR, 'jqueryui-%s' % JQUERYUI_REV)
JQUERYUI_FILES = ['jquery-ui.min.js']
ANGULAR_REV = '1.3.13'
ANGULAR_URL = (
'https://ajax.googleapis.com/ajax/libs/angularjs/%s' % ANGULAR_REV)
ANGULAR_TEST_URL = 'https://code.angularjs.org/%s' % ANGULAR_REV
ANGULAR_DST = os.path.join(
THIRD_PARTY_STATIC_DIR, 'angularjs-%s' % ANGULAR_REV)
ANGULAR_FILES = [
'angular%s.%s' % (part1, part2) for (part1, part2) in itertools.product(
['', '-animate', '-resource', '-route', '-sanitize', '-aria'],
['js', 'min.js', 'min.js.map'])]
ANGULAR_TEST_FILES = ['angular-mocks.js', 'angular-scenario.js']
D3_REV = '3.4.11'
D3_URL = 'https://raw.github.com/mbostock/d3/v%s' % D3_REV
D3_DST = os.path.join(THIRD_PARTY_STATIC_DIR, 'd3js-%s' % D3_REV)
D3_FILES = ['d3.min.js']
NG_INFINITE_SCROLL_REV = '1.0.0'
NG_INFINITE_SCROLL_URL = (
'https://raw.github.com/BinaryMuse/ngInfiniteScroll/%s/build/'
% NG_INFINITE_SCROLL_REV)
NG_INFINITE_SCROLL_DST = os.path.join(
THIRD_PARTY_STATIC_DIR, 'nginfinitescroll-%s' % NG_INFINITE_SCROLL_REV)
NG_INFINITE_SCROLL_FILES = ['ng-infinite-scroll.min.js']
download_files(YUICOMPRESSOR_URL, YUICOMPRESSOR_DST, YUICOMPRESSOR_FILES)
download_files(UI_BOOTSTRAP_URL, UI_BOOTSTRAP_DST, UI_BOOTSTRAP_FILES)
download_files(JQUERY_URL, JQUERY_DST, JQUERY_FILES)
download_files(JQUERYUI_URL, JQUERYUI_DST, JQUERYUI_FILES)
download_files(ANGULAR_URL, ANGULAR_DST, ANGULAR_FILES)
download_files(ANGULAR_TEST_URL, ANGULAR_DST, ANGULAR_TEST_FILES)
download_files(D3_URL, D3_DST, D3_FILES)
download_files(
MATERIAL_DESIGN_ICONS_ACTION_URL, MATERIAL_DESIGN_ICONS_DST,
MATERIAL_DESIGN_ICON_ACTION_FILES)
download_files(
MATERIAL_DESIGN_ICONS_COMMUNICATION_URL, MATERIAL_DESIGN_ICONS_DST,
MATERIAL_DESIGN_ICON_COMMUNICATION_FILES)
download_files(
MATERIAL_DESIGN_ICONS_CONTENT_URL, MATERIAL_DESIGN_ICONS_DST,
MATERIAL_DESIGN_ICON_CONTENT_FILES)
download_files(
MATERIAL_DESIGN_ICONS_FILE_URL, MATERIAL_DESIGN_ICONS_DST,
MATERIAL_DESIGN_ICON_FILE_FILES)
download_files(
MATERIAL_DESIGN_ICONS_NAVIGATION_URL, MATERIAL_DESIGN_ICONS_DST,
MATERIAL_DESIGN_ICON_NAVIGATION_FILES)
download_files(
MATERIAL_DESIGN_ICONS_SOCIAL_URL, MATERIAL_DESIGN_ICONS_DST,
MATERIAL_DESIGN_ICON_SOCIAL_FILES)
download_files(
NG_INFINITE_SCROLL_URL, NG_INFINITE_SCROLL_DST, NG_INFINITE_SCROLL_FILES)
# Download all the frontend library zip files.
BOWER_MATERIAL_REV = '0.6.0-rc1'
BOWER_MATERIAL_ROOT_NAME = 'bower-material-%s' % BOWER_MATERIAL_REV
BOWER_MATERIAL_ZIP_URL = (
'https://github.com/angular/bower-material/archive/v%s.zip'
% BOWER_MATERIAL_REV)
BOWER_MATERIAL_ZIP_ROOT_NAME = BOWER_MATERIAL_ROOT_NAME
BOWER_MATERIAL_TARGET_ROOT_NAME = BOWER_MATERIAL_ROOT_NAME
HAMMER_JS_REV = '2.0.4'
HAMMER_JS_ROOT_NAME = 'hammer.js-%s' % HAMMER_JS_REV
HAMMER_JS_ZIP_URL = (
'https://github.com/hammerjs/hammer.js/archive/%s.zip' % HAMMER_JS_REV)
HAMMER_JS_ZIP_ROOT_NAME = HAMMER_JS_ROOT_NAME
HAMMER_JS_TARGET_ROOT_NAME = 'hammer-js-%s' % HAMMER_JS_REV
SELECT2_REV = '3.5.1'
SELECT2_ZIP_URL = (
'https://github.com/ivaynberg/select2/archive/%s.zip' % SELECT2_REV)
SELECT2_ZIP_ROOT_NAME = 'select2-%s' % SELECT2_REV
SELECT2_TARGET_ROOT_NAME = 'select2-%s' % SELECT2_REV
FONTAWESOME_REV='4.3.0'
FONTAWESOME_ZIP_URL = (
'http://fortawesome.github.io/Font-Awesome/assets/font-awesome-%s.zip' % FONTAWESOME_REV)
FONTAWESOME_ZIP_ROOT_NAME = 'font-awesome-%s' % FONTAWESOME_REV
FONTAWESOME_TARGET_ROOT_NAME = 'font-awesome-%s' % FONTAWESOME_REV
TEXTANGULAR_REV = '1.3.7'
TEXTANGULAR_ZIP_URL = (
'https://github.com/fraywing/textAngular/archive/v%s.zip' % TEXTANGULAR_REV)
TEXTANGULAR_ZIP_ROOT_NAME = 'textAngular-%s' % TEXTANGULAR_REV
TEXTANGULAR_TARGET_ROOT_NAME = 'textAngular-%s' % TEXTANGULAR_REV
JQUERYUI_FILENAME = 'jquery-ui-themes-%s' % JQUERYUI_REV
JQUERYUI_THEMES_SRC = (
'http://jqueryui.com/resources/download/%s.zip' % JQUERYUI_FILENAME)
JQUERYUI_THEMES_ZIP_ROOT_NAME = JQUERYUI_FILENAME
JQUERYUI_THEMES_TARGET_ROOT_NAME = JQUERYUI_FILENAME
CODEMIRROR_REV = '3.19.0'
CODEMIRROR_ZIP_URL = 'https://github.com/marijnh/CodeMirror/archive/3.19.0.zip'
CODEMIRROR_ZIP_ROOT_NAME = 'CodeMirror-%s' % CODEMIRROR_REV
CODEMIRROR_TARGET_ROOT_NAME = 'code-mirror-%s' % CODEMIRROR_REV
UI_CODEMIRROR_REV = '0.1.2'
UI_CODEMIRROR_ZIP_URL = (
'https://github.com/angular-ui/ui-codemirror/archive/src%s.zip'
% UI_CODEMIRROR_REV)
UI_CODEMIRROR_ZIP_ROOT_NAME = 'ui-codemirror-src%s' % UI_CODEMIRROR_REV
UI_CODEMIRROR_TARGET_ROOT_NAME = 'ui-codemirror-%s' % UI_CODEMIRROR_REV
UI_MAP_REV = '0.5.0'
UI_MAP_ROOT_NAME = 'ui-map-%s' % UI_MAP_REV
UI_MAP_ZIP_URL = (
'https://github.com/angular-ui/ui-map/archive/v%s.zip' % UI_MAP_REV)
UI_MAP_ZIP_ROOT_NAME = UI_MAP_ROOT_NAME
UI_MAP_TARGET_ROOT_NAME = UI_MAP_ROOT_NAME
# ui-utils contains ui-event, which is needed for ui-map.
UI_UTILS_REV = '0.1.1'
UI_UTILS_ROOT_NAME = 'ui-utils-%s' % UI_UTILS_REV
UI_UTILS_ZIP_URL = (
'https://github.com/angular-ui/ui-utils/archive/v%s.zip' % UI_UTILS_REV)
UI_UTILS_ZIP_ROOT_NAME = UI_UTILS_ROOT_NAME
UI_UTILS_TARGET_ROOT_NAME = UI_UTILS_ROOT_NAME
UI_SORTABLE_REV = '0.12.6'
UI_SORTABLE_ZIP_URL = (
'https://github.com/angular-ui/ui-sortable/archive/src%s.zip'
% UI_SORTABLE_REV)
UI_SORTABLE_ZIP_ROOT_NAME = 'ui-sortable-src%s' % UI_SORTABLE_REV
UI_SORTABLE_TARGET_ROOT_NAME = 'ui-sortable-%s' % UI_SORTABLE_REV
NG_JOYRIDE_REV = '0.1.11'
NG_JOYRIDE_ZIP_URL = (
'https://github.com/abhikmitra/ng-joyride/archive/%s.zip' % NG_JOYRIDE_REV)
NG_JOYRIDE_ZIP_ROOT_NAME = 'ng-joyride-%s' % NG_JOYRIDE_REV
NG_JOYRIDE_TARGET_ROOT_NAME = 'ng-joyride-%s' % NG_JOYRIDE_REV
BOOTSTRAP_REV = '3.3.4'
BOOTSTRAP_ROOT_NAME = 'bootstrap-%s-dist' % BOOTSTRAP_REV
BOOTSTRAP_ZIP_URL = (
'https://github.com/twbs/bootstrap/releases/download/v3.3.4/%s.zip'
% BOOTSTRAP_ROOT_NAME)
BOOTSTRAP_ZIP_ROOT_NAME = BOOTSTRAP_ROOT_NAME
BOOTSTRAP_TARGET_ROOT_NAME = 'bootstrap-%s' % BOOTSTRAP_REV
MATHJAX_REV = '2.4-latest'
MATHJAX_ROOT_NAME = 'MathJax-%s' % MATHJAX_REV
MATHJAX_ZIP_URL = (
'https://github.com/mathjax/MathJax/archive/v%s.zip' % MATHJAX_REV)
MATHJAX_ZIP_ROOT_NAME = MATHJAX_ROOT_NAME
MATHJAX_TARGET_ROOT_NAME = MATHJAX_ROOT_NAME
NG_IMG_CROP_REV = '0.3.2'
NG_IMG_CROP_ZIP_URL = (
'https://github.com/alexk111/ngImgCrop/archive/v%s.zip' % NG_IMG_CROP_REV)
NG_IMG_CROP_ZIP_ROOT_NAME = 'ngImgCrop-%s' % NG_IMG_CROP_REV
NG_IMG_CROP_TARGET_ROOT_NAME = 'ng-img-crop-%s' % NG_IMG_CROP_REV
download_and_unzip_files(
BOWER_MATERIAL_ZIP_URL, THIRD_PARTY_STATIC_DIR,
BOWER_MATERIAL_ZIP_ROOT_NAME, BOWER_MATERIAL_TARGET_ROOT_NAME)
download_and_unzip_files(
HAMMER_JS_ZIP_URL, THIRD_PARTY_STATIC_DIR,
HAMMER_JS_ZIP_ROOT_NAME, HAMMER_JS_TARGET_ROOT_NAME)
download_and_unzip_files(
SELECT2_ZIP_URL, THIRD_PARTY_STATIC_DIR,
SELECT2_ZIP_ROOT_NAME, SELECT2_TARGET_ROOT_NAME)
download_and_unzip_files(
FONTAWESOME_ZIP_URL, THIRD_PARTY_STATIC_DIR,
FONTAWESOME_ZIP_ROOT_NAME, FONTAWESOME_TARGET_ROOT_NAME)
download_and_unzip_files(
TEXTANGULAR_ZIP_URL, THIRD_PARTY_STATIC_DIR,
TEXTANGULAR_ZIP_ROOT_NAME, TEXTANGULAR_TARGET_ROOT_NAME)
download_and_unzip_files(
JQUERYUI_THEMES_SRC,
os.path.join(THIRD_PARTY_STATIC_DIR, 'jqueryui-%s' % JQUERYUI_REV),
JQUERYUI_THEMES_ZIP_ROOT_NAME, JQUERYUI_THEMES_TARGET_ROOT_NAME)
download_and_unzip_files(
CODEMIRROR_ZIP_URL, THIRD_PARTY_STATIC_DIR,
CODEMIRROR_ZIP_ROOT_NAME, CODEMIRROR_TARGET_ROOT_NAME)
download_and_unzip_files(
UI_CODEMIRROR_ZIP_URL, THIRD_PARTY_STATIC_DIR,
UI_CODEMIRROR_ZIP_ROOT_NAME, UI_CODEMIRROR_TARGET_ROOT_NAME)
download_and_unzip_files(
UI_MAP_ZIP_URL, THIRD_PARTY_STATIC_DIR,
UI_MAP_ZIP_ROOT_NAME, UI_MAP_TARGET_ROOT_NAME)
download_and_unzip_files(
UI_UTILS_ZIP_URL, THIRD_PARTY_STATIC_DIR,
UI_UTILS_ZIP_ROOT_NAME, UI_UTILS_TARGET_ROOT_NAME)
download_and_unzip_files(
UI_SORTABLE_ZIP_URL, THIRD_PARTY_STATIC_DIR,
UI_SORTABLE_ZIP_ROOT_NAME, UI_SORTABLE_TARGET_ROOT_NAME)
download_and_unzip_files(
NG_JOYRIDE_ZIP_URL, THIRD_PARTY_STATIC_DIR,
NG_JOYRIDE_ZIP_ROOT_NAME, NG_JOYRIDE_TARGET_ROOT_NAME)
download_and_unzip_files(
BOOTSTRAP_ZIP_URL, THIRD_PARTY_STATIC_DIR,
BOOTSTRAP_ZIP_ROOT_NAME, BOOTSTRAP_TARGET_ROOT_NAME)
download_and_unzip_files(
MATHJAX_ZIP_URL, THIRD_PARTY_STATIC_DIR,
MATHJAX_ZIP_ROOT_NAME, MATHJAX_TARGET_ROOT_NAME)
download_and_unzip_files(
NG_IMG_CROP_ZIP_URL, THIRD_PARTY_STATIC_DIR,
NG_IMG_CROP_ZIP_ROOT_NAME, NG_IMG_CROP_TARGET_ROOT_NAME)
# MathJax is too big. Remove many unneeded files by following these
# instructions:
# https://github.com/mathjax/MathJax/wiki/Shrinking-MathJax-for-%22local%22-installation
MATHJAX_DIR_PREFIX = os.path.join(
THIRD_PARTY_STATIC_DIR, MATHJAX_TARGET_ROOT_NAME)
MATHJAX_SUBDIRS_TO_REMOVE = [
'unpacked', os.path.join('fonts', 'HTML-CSS', 'TeX', 'png')]
for subdir in MATHJAX_SUBDIRS_TO_REMOVE:
full_dir = os.path.join(MATHJAX_DIR_PREFIX, subdir)
if os.path.isdir(full_dir):
print 'Removing unnecessary MathJax directory \'%s\'' % subdir
shutil.rmtree(full_dir)
# Download all the backend (Python) library zip files.
BLEACH_REV = '1.2.2'
BLEACH_ROOT_NAME = 'bleach-%s' % BLEACH_REV
BLEACH_ZIP_URL = (
'https://github.com/jsocol/bleach/archive/v%s.zip' % BLEACH_REV)
BLEACH_ZIP_ROOT_NAME = BLEACH_ROOT_NAME
BLEACH_TARGET_ROOT_NAME = BLEACH_ROOT_NAME
HTML5LIB_REV = '0.95'
HTML5LIB_ROOT_NAME = 'html5lib-python-%s' % HTML5LIB_REV
HTML5LIB_ZIP_URL = (
'https://github.com/html5lib/html5lib-python/archive/%s.zip'
% HTML5LIB_REV)
HTML5LIB_ZIP_ROOT_NAME = HTML5LIB_ROOT_NAME
HTML5LIB_TARGET_ROOT_NAME = HTML5LIB_ROOT_NAME
download_and_unzip_files(
BLEACH_ZIP_URL, THIRD_PARTY_DIR,
BLEACH_ZIP_ROOT_NAME, BLEACH_TARGET_ROOT_NAME)
download_and_unzip_files(
HTML5LIB_ZIP_URL, THIRD_PARTY_DIR,
HTML5LIB_ZIP_ROOT_NAME, HTML5LIB_TARGET_ROOT_NAME)
# Download all the tar files.
GAE_MAPREDUCE_REV = '1.9.17.0'
GAE_MAPREDUCE_ROOT_NAME = 'gae-mapreduce-%s' % GAE_MAPREDUCE_REV
GAE_MAPREDUCE_TAR_URL = (
'https://pypi.python.org/packages/source/G/GoogleAppEngineMapReduce/'
'GoogleAppEngineMapReduce-%s.tar.gz' % GAE_MAPREDUCE_REV)
GAE_MAPREDUCE_TAR_ROOT_NAME = 'GoogleAppEngineMapReduce-%s' % GAE_MAPREDUCE_REV
GAE_MAPREDUCE_TARGET_ROOT_NAME = GAE_MAPREDUCE_ROOT_NAME
GAE_CLOUD_STORAGE_REV = '1.9.15.0'
GAE_CLOUD_STORAGE_ROOT_NAME = 'gae-cloud-storage-%s' % GAE_CLOUD_STORAGE_REV
GAE_CLOUD_STORAGE_TAR_URL = (
'https://pypi.python.org/packages/source/G/'
'GoogleAppEngineCloudStorageClient/'
'GoogleAppEngineCloudStorageClient-%s.tar.gz' % GAE_CLOUD_STORAGE_REV)
GAE_CLOUD_STORAGE_TAR_ROOT_NAME = (
'GoogleAppEngineCloudStorageClient-%s' % GAE_CLOUD_STORAGE_REV)
GAE_CLOUD_STORAGE_TARGET_ROOT_NAME = GAE_CLOUD_STORAGE_ROOT_NAME
GAE_PIPELINE_REV = '1.9.17.0'
GAE_PIPELINE_ROOT_NAME = 'gae-pipeline-%s' % GAE_PIPELINE_REV
GAE_PIPELINE_TAR_URL = (
'https://pypi.python.org/packages/source/G/'
'GoogleAppEnginePipeline/GoogleAppEnginePipeline-%s.tar.gz'
'#md5=9fe87b281f4b0a7c110534df4e61b6ec' % GAE_PIPELINE_REV)
GAE_PIPELINE_TAR_ROOT_NAME = (
'GoogleAppEnginePipeline-%s' % GAE_PIPELINE_REV)
GAE_PIPELINE_TARGET_ROOT_NAME = GAE_PIPELINE_ROOT_NAME
GRAPHY_REV = '1.0.0'
GRAPHY_ROOT_NAME = 'graphy-%s' % GRAPHY_REV
GRAPHY_TAR_URL = (
'https://pypi.python.org/packages/source/G/'
'Graphy/Graphy-%s.tar.gz#md5=390b4f9194d81d0590abac90c8b717e0'
% GRAPHY_REV)
GRAPHY_TAR_ROOT_NAME = 'Graphy-%s' % GRAPHY_REV
GRAPHY_TARGET_ROOT_NAME = GRAPHY_ROOT_NAME
SIMPLEJSON_REV = '3.7.1'
SIMPLEJSON_ROOT_NAME = 'simplejson-%s' % SIMPLEJSON_REV
SIMPLEJSON_TAR_URL = (
'https://pypi.python.org/packages/source/s/'
'simplejson/simplejson-%s.tar.gz#md5=c76c2d11b87e9fb501bd0b2b72091653'
% SIMPLEJSON_REV)
SIMPLEJSON_TAR_ROOT_NAME = 'simplejson-%s' % SIMPLEJSON_REV
SIMPLEJSON_TARGET_ROOT_NAME = SIMPLEJSON_ROOT_NAME
download_and_untar_files(
GAE_MAPREDUCE_TAR_URL, THIRD_PARTY_DIR,
GAE_MAPREDUCE_TAR_ROOT_NAME, GAE_MAPREDUCE_TARGET_ROOT_NAME)
download_and_untar_files(
GAE_CLOUD_STORAGE_TAR_URL, THIRD_PARTY_DIR,
GAE_CLOUD_STORAGE_TAR_ROOT_NAME, GAE_CLOUD_STORAGE_TARGET_ROOT_NAME)
download_and_untar_files(
GAE_PIPELINE_TAR_URL, THIRD_PARTY_DIR,
GAE_PIPELINE_TAR_ROOT_NAME, GAE_PIPELINE_TARGET_ROOT_NAME)
download_and_untar_files(
GRAPHY_TAR_URL, THIRD_PARTY_DIR,
GRAPHY_TAR_ROOT_NAME, GRAPHY_TARGET_ROOT_NAME)
download_and_untar_files(
SIMPLEJSON_TAR_URL, THIRD_PARTY_DIR,
SIMPLEJSON_TAR_ROOT_NAME, SIMPLEJSON_TARGET_ROOT_NAME)
MIDI_JS_REV = '2ef687b47e5f478f1506b47238f3785d9ea8bd25'
MIDI_JS_ZIP_URL = (
'https://github.com/mudcube/MIDI.js/archive/%s.zip' % MIDI_JS_REV)
MIDI_JS_ZIP_ROOT_NAME = 'MIDI.js-%s' % MIDI_JS_REV
MIDI_JS_TARGET_ROOT_NAME = 'midi-js-2ef687'
download_and_unzip_files(
MIDI_JS_ZIP_URL, THIRD_PARTY_STATIC_DIR,
MIDI_JS_ZIP_ROOT_NAME, MIDI_JS_TARGET_ROOT_NAME)
| apache-2.0 |
googleapis/googleapis-gen | google/cloud/documentai/v1beta3/documentai-v1beta3-py/scripts/fixup_documentai_v1beta3_keywords.py | 1 | 6599 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class documentaiCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'batch_process_documents': ('name', 'input_configs', 'output_config', 'input_documents', 'document_output_config', 'skip_human_review', ),
'create_processor': ('parent', 'processor', ),
'delete_processor': ('name', ),
'disable_processor': ('name', ),
'enable_processor': ('name', ),
'fetch_processor_types': ('parent', ),
'list_processors': ('parent', 'page_size', 'page_token', ),
'process_document': ('name', 'inline_document', 'raw_document', 'document', 'skip_human_review', ),
'review_document': ('human_review_config', 'inline_document', 'document', 'enable_schema_validation', 'priority', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=documentaiCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the documentai client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| apache-2.0 |
diagramsoftware/odoomrp-utils | delivery_partner_properties/models/res_partner.py | 12 | 1832 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, exceptions, _
class ResPartner(models.Model):
_inherit = 'res.partner'
property_mandatory_carrier = fields.Many2one(
'delivery.carrier', string='Mandatory delivery method',
company_dependent=True)
banned_carrier_ids = fields.Many2many(
comodel_name='delivery.carrier', relation='rel_partner_banned_carrier',
column1='partner_id', column2='carrier_id',
string='Banned delivery carrier')
@api.constrains('property_mandatory_carrier', 'banned_carrier_ids')
def mandatory_no_banned(self):
if self.property_mandatory_carrier in self.banned_carrier_ids:
raise exceptions.Warning(_('It is not possible to have the'
' mandatory carrier as banned one and'
' viceversa, please check carrier: %s')
% self.property_mandatory_carrier.name)
| agpl-3.0 |
abantam/pmtud | .waf-1.7.13-5a064c2686fe54de4e11018d22148cfc/waflib/ansiterm.py | 149 | 7136 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys,os
try:
if not(sys.stderr.isatty()and sys.stdout.isatty()):
raise ValueError('not a tty')
from ctypes import*
class COORD(Structure):
_fields_=[("X",c_short),("Y",c_short)]
class SMALL_RECT(Structure):
_fields_=[("Left",c_short),("Top",c_short),("Right",c_short),("Bottom",c_short)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_=[("Size",COORD),("CursorPosition",COORD),("Attributes",c_short),("Window",SMALL_RECT),("MaximumWindowSize",COORD)]
class CONSOLE_CURSOR_INFO(Structure):
_fields_=[('dwSize',c_ulong),('bVisible',c_int)]
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
csinfo=CONSOLE_CURSOR_INFO()
hconsole=windll.kernel32.GetStdHandle(-11)
windll.kernel32.GetConsoleScreenBufferInfo(hconsole,byref(sbinfo))
if sbinfo.Size.X<9 or sbinfo.Size.Y<9:raise ValueError('small console')
windll.kernel32.GetConsoleCursorInfo(hconsole,byref(csinfo))
except Exception:
pass
else:
import re,threading
is_vista=getattr(sys,"getwindowsversion",None)and sys.getwindowsversion()[0]>=6
try:
_type=unicode
except NameError:
_type=str
to_int=lambda number,default:number and int(number)or default
wlock=threading.Lock()
STD_OUTPUT_HANDLE=-11
STD_ERROR_HANDLE=-12
class AnsiTerm(object):
def __init__(self):
self.encoding=sys.stdout.encoding
self.hconsole=windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
self.cursor_history=[]
self.orig_sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
self.orig_csinfo=CONSOLE_CURSOR_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(self.orig_sbinfo))
windll.kernel32.GetConsoleCursorInfo(hconsole,byref(self.orig_csinfo))
def screen_buffer_info(self):
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(sbinfo))
return sbinfo
def clear_line(self,param):
mode=param and int(param)or 0
sbinfo=self.screen_buffer_info()
if mode==1:
line_start=COORD(0,sbinfo.CursorPosition.Y)
line_length=sbinfo.Size.X
elif mode==2:
line_start=COORD(sbinfo.CursorPosition.X,sbinfo.CursorPosition.Y)
line_length=sbinfo.Size.X-sbinfo.CursorPosition.X
else:
line_start=sbinfo.CursorPosition
line_length=sbinfo.Size.X-sbinfo.CursorPosition.X
chars_written=c_int()
windll.kernel32.FillConsoleOutputCharacterA(self.hconsole,c_wchar(' '),line_length,line_start,byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole,sbinfo.Attributes,line_length,line_start,byref(chars_written))
def clear_screen(self,param):
mode=to_int(param,0)
sbinfo=self.screen_buffer_info()
if mode==1:
clear_start=COORD(0,0)
clear_length=sbinfo.CursorPosition.X*sbinfo.CursorPosition.Y
elif mode==2:
clear_start=COORD(0,0)
clear_length=sbinfo.Size.X*sbinfo.Size.Y
windll.kernel32.SetConsoleCursorPosition(self.hconsole,clear_start)
else:
clear_start=sbinfo.CursorPosition
clear_length=((sbinfo.Size.X-sbinfo.CursorPosition.X)+sbinfo.Size.X*(sbinfo.Size.Y-sbinfo.CursorPosition.Y))
chars_written=c_int()
windll.kernel32.FillConsoleOutputCharacterA(self.hconsole,c_wchar(' '),clear_length,clear_start,byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole,sbinfo.Attributes,clear_length,clear_start,byref(chars_written))
def push_cursor(self,param):
sbinfo=self.screen_buffer_info()
self.cursor_history.append(sbinfo.CursorPosition)
def pop_cursor(self,param):
if self.cursor_history:
old_pos=self.cursor_history.pop()
windll.kernel32.SetConsoleCursorPosition(self.hconsole,old_pos)
def set_cursor(self,param):
y,sep,x=param.partition(';')
x=to_int(x,1)-1
y=to_int(y,1)-1
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,x),sbinfo.Size.X),min(max(0,y),sbinfo.Size.Y))
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def set_column(self,param):
x=to_int(param,1)-1
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,x),sbinfo.Size.X),sbinfo.CursorPosition.Y)
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def move_cursor(self,x_offset=0,y_offset=0):
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,sbinfo.CursorPosition.X+x_offset),sbinfo.Size.X),min(max(0,sbinfo.CursorPosition.Y+y_offset),sbinfo.Size.Y))
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def move_up(self,param):
self.move_cursor(y_offset=-to_int(param,1))
def move_down(self,param):
self.move_cursor(y_offset=to_int(param,1))
def move_left(self,param):
self.move_cursor(x_offset=-to_int(param,1))
def move_right(self,param):
self.move_cursor(x_offset=to_int(param,1))
def next_line(self,param):
sbinfo=self.screen_buffer_info()
self.move_cursor(x_offset=-sbinfo.CursorPosition.X,y_offset=to_int(param,1))
def prev_line(self,param):
sbinfo=self.screen_buffer_info()
self.move_cursor(x_offset=-sbinfo.CursorPosition.X,y_offset=-to_int(param,1))
def rgb2bgr(self,c):
return((c&1)<<2)|(c&2)|((c&4)>>2)
def set_color(self,param):
cols=param.split(';')
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(sbinfo))
attr=sbinfo.Attributes
for c in cols:
if is_vista:
c=int(c)
else:
c=to_int(c,0)
if c in range(30,38):
attr=(attr&0xfff0)|self.rgb2bgr(c-30)
elif c in range(40,48):
attr=(attr&0xff0f)|(self.rgb2bgr(c-40)<<4)
elif c==0:
attr=self.orig_sbinfo.Attributes
elif c==1:
attr|=0x08
elif c==4:
attr|=0x80
elif c==7:
attr=(attr&0xff88)|((attr&0x70)>>4)|((attr&0x07)<<4)
windll.kernel32.SetConsoleTextAttribute(self.hconsole,attr)
def show_cursor(self,param):
csinfo.bVisible=1
windll.kernel32.SetConsoleCursorInfo(self.hconsole,byref(csinfo))
def hide_cursor(self,param):
csinfo.bVisible=0
windll.kernel32.SetConsoleCursorInfo(self.hconsole,byref(csinfo))
ansi_command_table={'A':move_up,'B':move_down,'C':move_right,'D':move_left,'E':next_line,'F':prev_line,'G':set_column,'H':set_cursor,'f':set_cursor,'J':clear_screen,'K':clear_line,'h':show_cursor,'l':hide_cursor,'m':set_color,'s':push_cursor,'u':pop_cursor,}
ansi_tokens=re.compile('(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))')
def write(self,text):
try:
wlock.acquire()
for param,cmd,txt in self.ansi_tokens.findall(text):
if cmd:
cmd_func=self.ansi_command_table.get(cmd)
if cmd_func:
cmd_func(self,param)
else:
self.writeconsole(txt)
finally:
wlock.release()
def writeconsole(self,txt):
chars_written=c_int()
writeconsole=windll.kernel32.WriteConsoleA
if isinstance(txt,_type):
writeconsole=windll.kernel32.WriteConsoleW
TINY_STEP=3000
for x in range(0,len(txt),TINY_STEP):
tiny=txt[x:x+TINY_STEP]
writeconsole(self.hconsole,tiny,len(tiny),byref(chars_written),None)
def flush(self):
pass
def isatty(self):
return True
sys.stderr=sys.stdout=AnsiTerm()
os.environ['TERM']='vt100'
| gpl-2.0 |
jcai19/smm_gem5 | src/arch/x86/isa/insts/general_purpose/string/move_string.py | 91 | 3109 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop MOVS_M_M {
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
ld t1, seg, [1, t0, rsi]
st t1, es, [1, t0, rdi]
add rdi, rdi, t3, dataSize=asz
add rsi, rsi, t3, dataSize=asz
};
def macroop MOVS_E_M_M {
and t0, rcx, rcx, flags=(EZF,), dataSize=asz
br label("end"), flags=(CEZF,)
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
topOfLoop:
ld t1, seg, [1, t0, rsi]
st t1, es, [1, t0, rdi]
subi rcx, rcx, 1, flags=(EZF,), dataSize=asz
add rdi, rdi, t3, dataSize=asz
add rsi, rsi, t3, dataSize=asz
br label("topOfLoop"), flags=(nCEZF,)
end:
fault "NoFault"
};
'''
| bsd-3-clause |
estaban/pyload | module/plugins/hoster/VeohCom.py | 1 | 2386 | # -*- coding: utf-8 -*-
############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
############################################################################
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class VeohCom(SimpleHoster):
__name__ = "VeohCom"
__type__ = "hoster"
__pattern__ = r'http://(?:www\.)?veoh\.com/(tv/)?(watch|videos)/(?P<ID>v\w+)'
__version__ = "0.2"
__config__ = [("quality", "Low;High;Auto", "Quality", "Auto")]
__description__ = """Veoh.com hoster plugin"""
__author_name__ = "Walter Purcaro"
__author_mail__ = "vuolter@gmail.com"
FILE_NAME_PATTERN = r'<meta name="title" content="(?P<N>.*?)"'
OFFLINE_PATTERN = r'>Sorry, we couldn\'t find the video you were looking for'
FILE_URL_REPLACEMENTS = [(__pattern__, r'http://www.veoh.com/watch/\g<ID>')]
SH_COOKIES = [(".veoh.com", "lassieLocale", "en")]
def setup(self):
self.resumeDownload = self.multiDL = True
self.chunkLimit = -1
def handleFree(self):
quality = self.getConfig("quality")
if quality == "Auto":
quality = ("High", "Low")
for q in quality:
pattern = r'"fullPreviewHash%sPath":"(.+?)"' % q
m = re.search(pattern, self.html)
if m:
self.pyfile.name += ".mp4"
link = m.group(1).replace("\\", "")
self.logDebug("Download link: " + link)
self.download(link)
return
else:
self.logInfo("No %s quality video found" % q.upper())
else:
self.fail("No video found!")
getInfo = create_getInfo(VeohCom)
| gpl-3.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/jinja2-2.6/examples/bench.py | 75 | 10922 | """\
This benchmark compares some python templating engines with Jinja 2 so
that we get a picture of how fast Jinja 2 is for a semi real world
template. If a template engine is not installed the test is skipped.\
"""
import sys
import cgi
from timeit import Timer
from jinja2 import Environment as JinjaEnvironment
context = {
'page_title': 'mitsuhiko\'s benchmark',
'table': [dict(a=1,b=2,c=3,d=4,e=5,f=6,g=7,h=8,i=9,j=10) for x in range(1000)]
}
jinja_template = JinjaEnvironment(
line_statement_prefix='%',
variable_start_string="${",
variable_end_string="}"
).from_string("""\
<!doctype html>
<html>
<head>
<title>${page_title|e}</title>
</head>
<body>
<div class="header">
<h1>${page_title|e}</h1>
</div>
<ul class="navigation">
% for href, caption in [
('index.html', 'Index'),
('downloads.html', 'Downloads'),
('products.html', 'Products')
]
<li><a href="${href|e}">${caption|e}</a></li>
% endfor
</ul>
<div class="table">
<table>
% for row in table
<tr>
% for cell in row
<td>${cell}</td>
% endfor
</tr>
% endfor
</table>
</div>
</body>
</html>\
""")
def test_jinja():
jinja_template.render(context)
try:
from tornado.template import Template
except ImportError:
test_tornado = None
else:
tornado_template = Template("""\
<!doctype html>
<html>
<head>
<title>{{ page_title }}</title>
</head>
<body>
<div class="header">
<h1>{{ page_title }}</h1>
</div>
<ul class="navigation">
{% for href, caption in [ \
('index.html', 'Index'), \
('downloads.html', 'Downloads'), \
('products.html', 'Products') \
] %}
<li><a href="{{ href }}">{{ caption }}</a></li>
{% end %}
</ul>
<div class="table">
<table>
{% for row in table %}
<tr>
{% for cell in row %}
<td>{{ cell }}</td>
{% end %}
</tr>
{% end %}
</table>
</div>
</body>
</html>\
""")
def test_tornado():
tornado_template.generate(**context)
try:
from django.conf import settings
settings.configure()
from django.template import Template as DjangoTemplate, Context as DjangoContext
except ImportError:
test_django = None
else:
django_template = DjangoTemplate("""\
<!doctype html>
<html>
<head>
<title>{{ page_title }}</title>
</head>
<body>
<div class="header">
<h1>{{ page_title }}</h1>
</div>
<ul class="navigation">
{% for href, caption in navigation %}
<li><a href="{{ href }}">{{ caption }}</a></li>
{% endfor %}
</ul>
<div class="table">
<table>
{% for row in table %}
<tr>
{% for cell in row %}
<td>{{ cell }}</td>
{% endfor %}
</tr>
{% endfor %}
</table>
</div>
</body>
</html>\
""")
def test_django():
c = DjangoContext(context)
c['navigation'] = [('index.html', 'Index'), ('downloads.html', 'Downloads'),
('products.html', 'Products')]
django_template.render(c)
try:
from mako.template import Template as MakoTemplate
except ImportError:
test_mako = None
else:
mako_template = MakoTemplate("""\
<!doctype html>
<html>
<head>
<title>${page_title|h}</title>
</head>
<body>
<div class="header">
<h1>${page_title|h}</h1>
</div>
<ul class="navigation">
% for href, caption in [('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')]:
<li><a href="${href|h}">${caption|h}</a></li>
% endfor
</ul>
<div class="table">
<table>
% for row in table:
<tr>
% for cell in row:
<td>${cell}</td>
% endfor
</tr>
% endfor
</table>
</div>
</body>
</html>\
""")
def test_mako():
mako_template.render(**context)
try:
from genshi.template import MarkupTemplate as GenshiTemplate
except ImportError:
test_genshi = None
else:
genshi_template = GenshiTemplate("""\
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://genshi.edgewall.org/">
<head>
<title>${page_title}</title>
</head>
<body>
<div class="header">
<h1>${page_title}</h1>
</div>
<ul class="navigation">
<li py:for="href, caption in [
('index.html', 'Index'),
('downloads.html', 'Downloads'),
('products.html', 'Products')]"><a href="${href}">${caption}</a></li>
</ul>
<div class="table">
<table>
<tr py:for="row in table">
<td py:for="cell in row">${cell}</td>
</tr>
</table>
</div>
</body>
</html>\
""")
def test_genshi():
genshi_template.generate(**context).render('html', strip_whitespace=False)
try:
from Cheetah.Template import Template as CheetahTemplate
except ImportError:
test_cheetah = None
else:
cheetah_template = CheetahTemplate("""\
#import cgi
<!doctype html>
<html>
<head>
<title>$cgi.escape($page_title)</title>
</head>
<body>
<div class="header">
<h1>$cgi.escape($page_title)</h1>
</div>
<ul class="navigation">
#for $href, $caption in [('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')]:
<li><a href="$cgi.escape($href)">$cgi.escape($caption)</a></li>
#end for
</ul>
<div class="table">
<table>
#for $row in $table:
<tr>
#for $cell in $row:
<td>$cell</td>
#end for
</tr>
#end for
</table>
</div>
</body>
</html>\
""", searchList=[dict(context)])
def test_cheetah():
unicode(cheetah_template)
try:
import tenjin
except ImportError:
test_tenjin = None
else:
tenjin_template = tenjin.Template()
tenjin_template.convert("""\
<!doctype html>
<html>
<head>
<title>${page_title}</title>
</head>
<body>
<div class="header">
<h1>${page_title}</h1>
</div>
<ul class="navigation">
<?py for href, caption in [('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')]: ?>
<li><a href="${href}">${caption}</a></li>
<?py #end ?>
</ul>
<div class="table">
<table>
<?py for row in table: ?>
<tr>
<?py for cell in row: ?>
<td>#{cell}</td>
<?py #end ?>
</tr>
<?py #end ?>
</table>
</div>
</body>
</html>\
""")
def test_tenjin():
from tenjin.helpers import escape, to_str
tenjin_template.render(context, locals())
try:
from spitfire.compiler import util as SpitfireTemplate
from spitfire.compiler.analyzer import o2_options as spitfire_optimizer
except ImportError:
test_spitfire = None
else:
spitfire_template = SpitfireTemplate.load_template("""\
<!doctype html>
<html>
<head>
<title>$cgi.escape($page_title)</title>
</head>
<body>
<div class="header">
<h1>$cgi.escape($page_title)</h1>
</div>
<ul class="navigation">
#for $href, $caption in [('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')]
<li><a href="$cgi.escape($href)">$cgi.escape($caption)</a></li>
#end for
</ul>
<div class="table">
<table>
#for $row in $table
<tr>
#for $cell in $row
<td>$cell</td>
#end for
</tr>
#end for
</table>
</div>
</body>
</html>\
""", 'spitfire_tmpl', spitfire_optimizer, {'enable_filters': False})
spitfire_context = dict(context, **{'cgi': cgi})
def test_spitfire():
spitfire_template(search_list=[spitfire_context]).main()
try:
from chameleon.zpt.template import PageTemplate
except ImportError:
test_chameleon = None
else:
chameleon_template = PageTemplate("""\
<html xmlns:tal="http://xml.zope.org/namespaces/tal">
<head>
<title tal:content="page_title">Page Title</title>
</head>
<body>
<div class="header">
<h1 tal:content="page_title">Page Title</h1>
</div>
<ul class="navigation">
<li tal:repeat="item sections"><a tal:attributes="href item[0]" tal:content="item[1]">caption</a></li>
</ul>
<div class="table">
<table>
<tr tal:repeat="row table">
<td tal:repeat="cell row" tal:content="row[cell]">cell</td>
</tr>
</table>
</div>
</body>
</html>\
""")
chameleon_context = dict(context)
chameleon_context['sections'] = [
('index.html', 'Index'),
('downloads.html', 'Downloads'),
('products.html', 'Products')
]
def test_chameleon():
chameleon_template.render(**chameleon_context)
try:
from chameleon.zpt.template import PageTemplate
from chameleon.genshi import language
except ImportError:
test_chameleon_genshi = None
else:
chameleon_genshi_template = PageTemplate("""\
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://genshi.edgewall.org/">
<head>
<title>${page_title}</title>
</head>
<body>
<div class="header">
<h1>${page_title}</h1>
</div>
<ul class="navigation">
<li py:for="info in sections"><a href="${info[0]}">${info[1]}</a></li>
</ul>
<div class="table">
<table>
<tr py:for="row in table">
<td py:for="cell in row">${row[cell]}</td>
</tr>
</table>
</div>
</body>
</html>\
""", parser=language.Parser())
chameleon_genshi_context = dict(context)
chameleon_genshi_context['sections'] = [
('index.html', 'Index'),
('downloads.html', 'Downloads'),
('products.html', 'Products')
]
def test_chameleon_genshi():
chameleon_genshi_template.render(**chameleon_genshi_context)
sys.stdout.write('\r' + '\n'.join((
'=' * 80,
'Template Engine BigTable Benchmark'.center(80),
'=' * 80,
__doc__,
'-' * 80
)) + '\n')
for test in 'jinja', 'mako', 'tornado', 'tenjin', 'spitfire', 'django', 'genshi', 'cheetah', 'chameleon', 'chameleon_genshi':
if locals()['test_' + test] is None:
sys.stdout.write(' %-20s*not installed*\n' % test)
continue
t = Timer(setup='from __main__ import test_%s as bench' % test,
stmt='bench()')
sys.stdout.write(' >> %-20s<running>' % test)
sys.stdout.flush()
sys.stdout.write('\r %-20s%.4f seconds\n' % (test, t.timeit(number=50) / 50))
sys.stdout.write('-' * 80 + '\n')
sys.stdout.write('''\
WARNING: The results of this benchmark are useless to compare the
performance of template engines and should not be taken seriously in any
way. It's testing the performance of simple loops and has no real-world
usefulnes. It only used to check if changes on the Jinja code affect
performance in a good or bad way and how it roughly compares to others.
''' + '=' * 80 + '\n')
| lgpl-3.0 |
damdam-s/OpenUpgrade | addons/account_analytic_plans/wizard/account_crossovered_analytic.py | 341 | 2972 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_crossovered_analytic(osv.osv_memory):
_name = "account.crossovered.analytic"
_description = "Print Crossovered Analytic"
_columns = {
'date1': fields.date('Start Date', required=True),
'date2': fields.date('End Date', required=True),
'journal_ids': fields.many2many('account.analytic.journal', 'crossovered_journal_rel', 'crossover_id', 'journal_id', 'Analytic Journal'),
'ref': fields.many2one('account.analytic.account', 'Analytic Account Reference', required=True),
'empty_line': fields.boolean('Dont show empty lines'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
cr.execute('SELECT account_id FROM account_analytic_line')
res = cr.fetchall()
acc_ids = [x[0] for x in res]
data = self.read(cr, uid, ids, context=context)[0]
data['ref'] = data['ref'][0]
obj_acc = self.pool.get('account.analytic.account').browse(cr, uid, data['ref'], context=context)
name = obj_acc.name
account_ids = self.pool.get('account.analytic.account').search(cr, uid, [('parent_id', 'child_of', [data['ref']])], context=context)
flag = True
for acc in account_ids:
if acc in acc_ids:
flag = False
break
if flag:
raise osv.except_osv(_('User Error!'),_('There are no analytic lines related to account %s.' % name))
datas = {
'ids': [],
'model': 'account.analytic.account',
'form': data
}
return self.pool['report'].get_action(cr, uid, [], 'account_analytic_plans.report_crossoveredanalyticplans', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/hotshot/log.py | 175 | 6239 | import _hotshot
import os.path
import parser
import symbol
from _hotshot import \
WHAT_ENTER, \
WHAT_EXIT, \
WHAT_LINENO, \
WHAT_DEFINE_FILE, \
WHAT_DEFINE_FUNC, \
WHAT_ADD_INFO
__all__ = ["LogReader", "ENTER", "EXIT", "LINE"]
ENTER = WHAT_ENTER
EXIT = WHAT_EXIT
LINE = WHAT_LINENO
class LogReader:
def __init__(self, logfn):
# fileno -> filename
self._filemap = {}
# (fileno, lineno) -> filename, funcname
self._funcmap = {}
self._reader = _hotshot.logreader(logfn)
self._nextitem = self._reader.next
self._info = self._reader.info
if 'current-directory' in self._info:
self.cwd = self._info['current-directory']
else:
self.cwd = None
# This mirrors the call stack of the profiled code as the log
# is read back in. It contains tuples of the form:
#
# (file name, line number of function def, function name)
#
self._stack = []
self._append = self._stack.append
self._pop = self._stack.pop
def close(self):
self._reader.close()
def fileno(self):
"""Return the file descriptor of the log reader's log file."""
return self._reader.fileno()
def addinfo(self, key, value):
"""This method is called for each additional ADD_INFO record.
This can be overridden by applications that want to receive
these events. The default implementation does not need to be
called by alternate implementations.
The initial set of ADD_INFO records do not pass through this
mechanism; this is only needed to receive notification when
new values are added. Subclasses can inspect self._info after
calling LogReader.__init__().
"""
pass
def get_filename(self, fileno):
try:
return self._filemap[fileno]
except KeyError:
raise ValueError, "unknown fileno"
def get_filenames(self):
return self._filemap.values()
def get_fileno(self, filename):
filename = os.path.normcase(os.path.normpath(filename))
for fileno, name in self._filemap.items():
if name == filename:
return fileno
raise ValueError, "unknown filename"
def get_funcname(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
raise ValueError, "unknown function location"
# Iteration support:
# This adds an optional (& ignored) parameter to next() so that the
# same bound method can be used as the __getitem__() method -- this
# avoids using an additional method call which kills the performance.
def next(self, index=0):
while 1:
# This call may raise StopIteration:
what, tdelta, fileno, lineno = self._nextitem()
# handle the most common cases first
if what == WHAT_ENTER:
filename, funcname = self._decode_location(fileno, lineno)
t = (filename, lineno, funcname)
self._append(t)
return what, t, tdelta
if what == WHAT_EXIT:
try:
return what, self._pop(), tdelta
except IndexError:
raise StopIteration
if what == WHAT_LINENO:
filename, firstlineno, funcname = self._stack[-1]
return what, (filename, lineno, funcname), tdelta
if what == WHAT_DEFINE_FILE:
filename = os.path.normcase(os.path.normpath(tdelta))
self._filemap[fileno] = filename
elif what == WHAT_DEFINE_FUNC:
filename = self._filemap[fileno]
self._funcmap[(fileno, lineno)] = (filename, tdelta)
elif what == WHAT_ADD_INFO:
# value already loaded into self.info; call the
# overridable addinfo() handler so higher-level code
# can pick up the new value
if tdelta == 'current-directory':
self.cwd = lineno
self.addinfo(tdelta, lineno)
else:
raise ValueError, "unknown event type"
def __iter__(self):
return self
#
# helpers
#
def _decode_location(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
#
# This should only be needed when the log file does not
# contain all the DEFINE_FUNC records needed to allow the
# function name to be retrieved from the log file.
#
if self._loadfile(fileno):
filename = funcname = None
try:
filename, funcname = self._funcmap[(fileno, lineno)]
except KeyError:
filename = self._filemap.get(fileno)
funcname = None
self._funcmap[(fileno, lineno)] = (filename, funcname)
return filename, funcname
def _loadfile(self, fileno):
try:
filename = self._filemap[fileno]
except KeyError:
print "Could not identify fileId", fileno
return 1
if filename is None:
return 1
absname = os.path.normcase(os.path.join(self.cwd, filename))
try:
fp = open(absname)
except IOError:
return
st = parser.suite(fp.read())
fp.close()
# Scan the tree looking for def and lambda nodes, filling in
# self._funcmap with all the available information.
funcdef = symbol.funcdef
lambdef = symbol.lambdef
stack = [st.totuple(1)]
while stack:
tree = stack.pop()
try:
sym = tree[0]
except (IndexError, TypeError):
continue
if sym == funcdef:
self._funcmap[(fileno, tree[2][2])] = filename, tree[2][1]
elif sym == lambdef:
self._funcmap[(fileno, tree[1][2])] = filename, "<lambda>"
stack.extend(list(tree[1:]))
| mit |
nikolay-fedotov/tempest | tempest/api/object_storage/test_container_acl_negative.py | 2 | 10157 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Joe H. Rahme <joe.hakim.rahme@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class ObjectACLsNegativeTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(ObjectACLsNegativeTest, cls).resource_setup()
cls.data.setup_test_user()
test_os = clients.Manager(cls.data.test_credentials)
cls.test_auth_data = test_os.auth_provider.auth_data
def setUp(self):
super(ObjectACLsNegativeTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
super(ObjectACLsNegativeTest, self).tearDown()
@test.attr(type=['negative', 'gate'])
def test_write_object_without_using_creds(self):
# trying to create object with empty headers
# X-Auth-Token is not provided
object_name = data_utils.rand_name(name='Object')
self.custom_object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=None
)
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.create_object,
self.container_name, object_name, 'data')
@test.attr(type=['negative', 'gate'])
def test_delete_object_without_using_creds(self):
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
# trying to delete object with empty headers
# X-Auth-Token is not provided
self.custom_object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=None
)
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.delete_object,
self.container_name, object_name)
@test.attr(type=['negative', 'gate'])
def test_write_object_with_non_authorized_user(self):
# attempt to upload another file using non-authorized user
# User provided token is forbidden. ACL are not set
object_name = data_utils.rand_name(name='Object')
# trying to create object with non-authorized user
self.custom_object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.create_object,
self.container_name, object_name, 'data')
@test.attr(type=['negative', 'gate'])
def test_read_object_with_non_authorized_user(self):
# attempt to read object using non-authorized user
# User provided token is forbidden. ACL are not set
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(
self.container_name, object_name, 'data')
self.assertEqual(resp['status'], '201')
self.assertHeaders(resp, 'Object', 'PUT')
# trying to get object with non authorized user token
self.custom_object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.get_object,
self.container_name, object_name)
@test.attr(type=['negative', 'gate'])
def test_delete_object_with_non_authorized_user(self):
# attempt to delete object using non-authorized user
# User provided token is forbidden. ACL are not set
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(
self.container_name, object_name, 'data')
self.assertEqual(resp['status'], '201')
self.assertHeaders(resp, 'Object', 'PUT')
# trying to delete object with non-authorized user token
self.custom_object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.delete_object,
self.container_name, object_name)
@test.attr(type=['negative', 'smoke'])
def test_read_object_without_rights(self):
# attempt to read object using non-authorized user
# update X-Container-Read metadata ACL
cont_headers = {'X-Container-Read': 'badtenant:baduser'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertIn(int(resp_meta['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
self.assertEqual(resp['status'], '201')
self.assertHeaders(resp, 'Object', 'PUT')
# Trying to read the object without rights
self.custom_object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.get_object,
self.container_name, object_name)
@test.attr(type=['negative', 'smoke'])
def test_write_object_without_rights(self):
# attempt to write object using non-authorized user
# update X-Container-Write metadata ACL
cont_headers = {'X-Container-Write': 'badtenant:baduser'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertIn(int(resp_meta['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp_meta, 'Container', 'POST')
# Trying to write the object without rights
self.custom_object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
object_name = data_utils.rand_name(name='Object')
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.create_object,
self.container_name,
object_name, 'data')
@test.attr(type=['negative', 'smoke'])
def test_write_object_without_write_rights(self):
# attempt to write object using non-authorized user
# update X-Container-Read and X-Container-Write metadata ACL
cont_headers = {'X-Container-Read':
self.data.test_tenant + ':' + self.data.test_user,
'X-Container-Write': ''}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertIn(int(resp_meta['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp_meta, 'Container', 'POST')
# Trying to write the object without write rights
self.custom_object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
object_name = data_utils.rand_name(name='Object')
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.create_object,
self.container_name,
object_name, 'data')
@test.attr(type=['negative', 'smoke'])
def test_delete_object_without_write_rights(self):
# attempt to delete object using non-authorized user
# update X-Container-Read and X-Container-Write metadata ACL
cont_headers = {'X-Container-Read':
self.data.test_tenant + ':' + self.data.test_user,
'X-Container-Write': ''}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertIn(int(resp_meta['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
self.assertEqual(resp['status'], '201')
self.assertHeaders(resp, 'Object', 'PUT')
# Trying to delete the object without write rights
self.custom_object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.delete_object,
self.container_name,
object_name)
| apache-2.0 |
egabancho/invenio | invenio/modules/workflows/tasks/marcxml_tasks.py | 1 | 9255 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Set of function for harvesting."""
import os
import glob
import traceback
from functools import wraps
from six import callable
def approve_record(obj, eng):
"""Will add the approval widget to the record.
The workflow need to be halted to use the
action in the holdingpen.
:param obj: Bibworkflow Object to process
:param eng: BibWorkflowEngine processing the object
"""
try:
eng.halt(action="approval",
msg='Record needs approval')
except KeyError:
# Log the error
obj.extra_data["_error_msg"] = 'Could not assign action'
def was_approved(obj, eng):
"""Check if the record was approved."""
extra_data = obj.get_extra_data()
return extra_data.get("approved", False)
def convert_record_to_bibfield(obj, eng):
"""Convert to record from MARCXML.
Expecting MARCXML, this task converts it using the current configuration to a
SmartJSON object.
:param obj: Bibworkflow Object to process
:param eng: BibWorkflowEngine processing the object
"""
from invenio.modules.workflows.utils import convert_marcxml_to_bibfield
obj.data = convert_marcxml_to_bibfield(obj.data)
eng.log.info("Field conversion succeeded")
convert_record_to_bibfield.description = 'Get Record from MARCXML'
def get_files_list(path, parameter):
"""Function returning the list of file in a directory."""
@wraps(get_files_list)
def _get_files_list(obj, eng):
if callable(parameter):
unknown = parameter
while callable(unknown):
unknown = unknown(obj, eng)
else:
unknown = parameter
result = glob.glob1(path, unknown)
for i in range(0, len(result)):
result[i] = path + os.sep + result[i]
return result
return _get_files_list
def set_obj_extra_data_key(key, value):
"""Task setting the value of an object extra data key."""
@wraps(set_obj_extra_data_key)
def _set_obj_extra_data_key(obj, eng):
my_value = value
my_key = key
if callable(my_value):
while callable(my_value):
my_value = my_value(obj, eng)
if callable(my_key):
while callable(my_key):
my_key = my_key(obj, eng)
obj.extra_data[str(my_key)] = my_value
return _set_obj_extra_data_key
def get_obj_extra_data_key(name):
"""Task returning the value of an object extra data key."""
@wraps(get_obj_extra_data_key)
def _get_obj_extra_data_key(obj, eng):
return obj.extra_data[name]
return _get_obj_extra_data_key
def get_eng_extra_data_key(name):
"""Task returning the value of an engine extra data key."""
@wraps(get_eng_extra_data_key)
def _get_eng_extra_data_key(obj, eng):
return eng.extra_data[name]
return _get_eng_extra_data_key
def get_data(obj, eng):
"""Task returning data of the object."""
return obj.data
def convert_record(stylesheet="oaidc2marcxml.xsl"):
"""Convert the object data to marcxml using the given stylesheet.
:param stylesheet: which stylesheet to use
:return: function to convert record
:raise WorkflowError:
"""
@wraps(convert_record)
def _convert_record(obj, eng):
from invenio.modules.workflows.errors import WorkflowError
from invenio.legacy.bibconvert.xslt_engine import convert
eng.log.info("Starting conversion using %s stylesheet" %
(stylesheet,))
if not obj.data:
obj.log.error("Not valid conversion data!")
raise WorkflowError("Error: conversion data missing",
id_workflow=eng.uuid,
id_object=obj.id)
try:
obj.data = convert(obj.data, stylesheet)
except Exception as e:
msg = "Could not convert record: %s\n%s" % \
(str(e), traceback.format_exc())
raise WorkflowError("Error: %s" % (msg,),
id_workflow=eng.uuid,
id_object=obj.id)
_convert_record.description = 'Convert record'
return _convert_record
def update_last_update(repository_list):
"""Perform the update of the update date."""
from invenio.legacy.oaiharvest.dblayer import update_lastrun
@wraps(update_last_update)
def _update_last_update(obj, eng):
if "_should_last_run_be_update" in obj.extra_data:
if obj.extra_data["_should_last_run_be_update"]:
repository_list_to_process = repository_list
if not isinstance(repository_list_to_process, list):
if callable(repository_list_to_process):
while callable(repository_list_to_process):
repository_list_to_process = repository_list_to_process(
obj, eng)
else:
repository_list_to_process = [
repository_list_to_process]
for repository in repository_list_to_process:
update_lastrun(repository["id"])
return _update_last_update
def quick_match_record(obj, eng):
"""Retrieve the record Id from a record.
Retrieve the record Id from a record by using tag 001 or SYSNO or OAI ID or
DOI tag. opt_mod is the desired mode.
001 fields even in the insert mode
:param obj: Bibworkflow Object to process
:param eng: BibWorkflowEngine processing the object
"""
from invenio.legacy.bibupload.engine import (find_record_from_recid,
find_record_from_sysno,
find_records_from_extoaiid,
find_record_from_oaiid,
find_record_from_doi)
from invenio.modules.records.api import Record
identifier_function_to_check = {'recid': find_record_from_recid,
'system_number': find_record_from_sysno,
'oaiid': find_record_from_oaiid,
'system_control_number': find_records_from_extoaiid,
'doi': find_record_from_doi}
record = Record(obj.data.dumps())
try:
identifiers = record.persistent_identifiers
except Exception as e:
# if anything goes wrong, assume we need to get it manually.
eng.log.error("Problem with getting identifiers: %s\n%s"
% (str(e), traceback.format_exc()))
identifiers = []
obj.extra_data["persistent_ids"] = identifiers
identifier_dict = {}
for name, value in identifiers:
value_dict = {}
for dic in value:
value_dict.update(dic)
identifier_dict[name] = value_dict
if "recid" in identifier_dict:
# If there is a recid, we are good, right?
obj.extra_data["persistent_ids"]["recid"] = identifier_dict["recid"]
return True
# So if there is no explicit recid key, then maybe we can find the record
# using any of the other stable identifiers defined.
found_recid = False
for name, func in identifier_function_to_check.iteritems():
if name in identifier_dict:
if name in identifier_dict[name]:
# To get {"doi": {"doi": val}}
found_recid = func(identifier_dict[name][name])
elif "value" in identifier_dict[name]:
# To get {"doi": {"value": val}}
found_recid = func(identifier_dict[name]["value"])
if found_recid:
break
if found_recid:
obj.extra_data["persistent_ids"]["recid"] = found_recid
return True
return False
def upload_record(mode="ir"):
"""Perform the upload step."""
@wraps(upload_record)
def _upload_record(obj, eng):
from invenio.legacy.bibsched.bibtask import task_low_level_submission
eng.log_info("Saving data to temporary file for upload")
filename = obj.save_to_file()
params = ["-%s" % (mode,), filename]
task_id = task_low_level_submission("bibupload", "bibworkflow",
*tuple(params))
eng.log_info("Submitted task #%s" % (task_id,))
return _upload_record
| gpl-2.0 |
Just-D/chromium-1 | components/test/data/password_manager/automated_tests/tests.py | 12 | 19494 | # -*- coding: utf-8 -*-
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Automated tests for many websites"""
import argparse
from environment import Environment
from websitetest import WebsiteTest
TEST_CASES = ("PromptFailTest", "PromptSuccessTest", "SaveAndAutofillTest")
class Alexa(WebsiteTest):
def Login(self):
self.GoTo("https://www.alexa.com/secure/login")
self.FillUsernameInto("#email")
self.FillPasswordInto("#pwd")
self.Submit("#pwd")
class Dropbox(WebsiteTest):
def Login(self):
self.GoTo("https://www.dropbox.com/login")
self.FillUsernameInto(".text-input-input[name='login_email']")
self.FillPasswordInto(".text-input-input[name='login_password']")
self.Submit(".text-input-input[name='login_password']")
class Facebook(WebsiteTest):
def Login(self):
self.GoTo("https://www.facebook.com")
self.FillUsernameInto("[name='email']")
self.FillPasswordInto("[name='pass']")
self.Submit("[name='pass']")
class Github(WebsiteTest):
def Login(self):
self.GoTo("https://github.com/login")
self.FillUsernameInto("[name='login']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='commit']")
class Google(WebsiteTest):
def Login(self):
self.GoTo("https://accounts.google.com/ServiceLogin?sacu=1&continue=")
self.FillUsernameInto("#Email")
self.FillPasswordInto("#Passwd")
self.Submit("#Passwd")
class Imgur(WebsiteTest):
def Login(self):
self.GoTo("https://imgur.com/signin")
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='password']")
class Liveinternet(WebsiteTest):
def Login(self):
self.GoTo("http://liveinternet.ru/journals.php?s=&action1=login")
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='password']")
class Linkedin(WebsiteTest):
def Login(self):
self.GoTo("https://www.linkedin.com")
self.FillUsernameInto("#session_key-login")
self.FillPasswordInto("#session_password-login")
self.Submit("#session_password-login")
class Mailru(WebsiteTest):
def Login(self):
self.GoTo("https://mail.ru")
self.FillUsernameInto("#mailbox__login")
self.FillPasswordInto("#mailbox__password")
self.Submit("#mailbox__password")
class Nytimes(WebsiteTest):
def Login(self):
self.GoTo("https://myaccount.nytimes.com/auth/login")
self.FillUsernameInto("#userid")
self.FillPasswordInto("#password")
self.Submit("#password")
class Odnoklassniki(WebsiteTest):
def Login(self):
self.GoTo("https://ok.ru")
self.FillUsernameInto("#field_email")
self.FillPasswordInto("#field_password")
self.Submit("#field_password")
class Pinterest(WebsiteTest):
def Login(self):
self.GoTo("https://www.pinterest.com/login/")
self.FillUsernameInto("[name='username_or_email']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='password']")
class Reddit(WebsiteTest):
def Login(self):
self.GoTo("http://www.reddit.com")
self.Click(".user .login-required")
self.FillUsernameInto("#user_login")
self.FillPasswordInto("#passwd_login")
self.Wait(2)
self.Submit("#passwd_login")
class Tumblr(WebsiteTest):
def Login(self):
self.GoTo("https://www.tumblr.com/login")
self.FillUsernameInto("#signup_email")
self.FillPasswordInto("#signup_password")
self.Submit("#signup_password")
class Twitter(WebsiteTest):
def Login(self):
self.GoTo("https:///twitter.com")
self.FillUsernameInto("#signin-email")
self.FillPasswordInto("#signin-password")
self.Submit("#signin-password")
class Vkontakte(WebsiteTest):
def Login(self):
self.GoTo("https:///vk.com")
self.FillUsernameInto("[name='email']")
self.FillPasswordInto("[name='pass']")
self.Submit("[name='pass']")
class Wikia(WebsiteTest):
def Login(self):
self.GoTo("https://wikia.com");
self.Click("#AccountNavigation");
self.FillUsernameInto("#usernameInput")
self.FillPasswordInto("#passwordInput")
self.Submit("input.login-button")
class Wikipedia(WebsiteTest):
def Login(self):
self.GoTo("https://en.wikipedia.org/w/index.php?title=Special:UserLogin")
self.FillUsernameInto("#wpName1")
self.FillPasswordInto("#wpPassword1")
self.Submit("#wpPassword1")
class Wordpress(WebsiteTest):
def Login(self):
self.GoTo("https://de.wordpress.com/wp-login.php")
self.FillUsernameInto("[name='log']")
self.FillPasswordInto("[name='pwd']")
self.Submit("[name='pwd']")
class Yahoo(WebsiteTest):
def Login(self):
self.GoTo("https://login.yahoo.com")
self.FillUsernameInto("#login-username")
self.FillPasswordInto("#login-passwd")
self.Click("#login-signin")
class Yandex(WebsiteTest):
def Login(self):
self.GoTo("https://mail.yandex.com")
self.FillUsernameInto("[name='login']")
self.FillPasswordInto("[name='passwd']")
self.Submit("[name='passwd']")
# Fails due to test framework issue(?).
class Aliexpress(WebsiteTest):
def Login(self):
self.GoTo("https://login.aliexpress.com/buyer.htm?return=http%3A%2F%2Fwww.aliexpress.com%2F")
self.WaitUntilDisplayed("iframe#alibaba-login-box")
frame = self.driver.find_element_by_css_selector("iframe#alibaba-login-box")
self.driver.switch_to_frame(frame)
self.FillUsernameInto("#fm-login-id")
self.FillPasswordInto("#fm-login-password")
self.Click("#fm-login-submit")
# Fails to save password.
class Adobe(WebsiteTest):
def Login(self):
self.GoTo("https://adobeid-na1.services.adobe.com/renga-idprovider/pages/l"
"ogin?callback=https%3A%2F%2Fims-na1.adobelogin.com%2Fims%2Fadob"
"eid%2Fadobedotcom2%2FAdobeID%2Ftoken%3Fredirect_uri%3Dhttps%253"
"A%252F%252Fwww.adobe.com%252F%2523from_ims%253Dtrue%2526old_has"
"h%253D%2526client_id%253Dadobedotcom2%2526scope%253Dcreative_cl"
"oud%25252CAdobeID%25252Copenid%25252Cgnav%25252Cread_organizati"
"ons%25252Cadditional_info.projectedProductContext%2526api%253Da"
"uthorize&client_id=adobedotcom2&scope=creative_cloud%2CAdobeID%"
"2Copenid%2Cgnav%2Cread_organizations%2Cadditional_info.projecte"
"dProductContext&display=web_v2&denied_callback=https%3A%2F%2Fim"
"s-na1.adobelogin.com%2Fims%2Fdenied%2Fadobedotcom2%3Fredirect_u"
"ri%3Dhttps%253A%252F%252Fwww.adobe.com%252F%2523from_ims%253Dtr"
"ue%2526old_hash%253D%2526client_id%253Dadobedotcom2%2526scope%2"
"53Dcreative_cloud%25252CAdobeID%25252Copenid%25252Cgnav%25252Cr"
"ead_organizations%25252Cadditional_info.projectedProductContext"
"%2526api%253Dauthorize%26response_type%3Dtoken&relay=afebfef8-e"
"2b3-4c0e-9c94-07baf205bae8&locale=en_US&flow_type=token&dc=fals"
"e&client_redirect=https%3A%2F%2Fims-na1.adobelogin.com%2Fims%2F"
"redirect%2Fadobedotcom2%3Fclient_redirect%3Dhttps%253A%252F%252"
"Fwww.adobe.com%252F%2523from_ims%253Dtrue%2526old_hash%253D%252"
"6client_id%253Dadobedotcom2%2526scope%253Dcreative_cloud%25252C"
"AdobeID%25252Copenid%25252Cgnav%25252Cread_organizations%25252C"
"additional_info.projectedProductContext%2526api%253Dauthorize&i"
"dp_flow_type=login")
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Submit("#sign_in")
# Bug not reproducible without test.
class Amazon(WebsiteTest):
def Login(self):
self.GoTo(
"https://www.amazon.com/ap/signin?openid.assoc_handle=usflex"
"&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net"
"%2Fauth%2F2.0")
self.FillUsernameInto("[name='email']")
self.FillPasswordInto("[name='password']")
self.Click("#signInSubmit-input")
# Password not saved.
class Ask(WebsiteTest):
def Login(self):
self.GoTo("http://www.ask.com/answers/browse?qsrc=321&q=&o=0&l=dir#")
while not self.IsDisplayed("[name='username']"):
self.Click("#a16CnbSignInText")
self.Wait(1)
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Click(".signin_show.signin_submit")
# Password not saved.
class Baidu(WebsiteTest):
def Login(self):
self.GoTo("https://passport.baidu.com")
self.FillUsernameInto("[name='userName']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='password']")
# Chrome crashes.
class Buzzfeed(WebsiteTest):
def Login(self):
self.GoTo("http://www.buzzfeed.com/signin")
self.FillUsernameInto("#login-username")
self.FillPasswordInto("#login-password")
self.Submit("#login-password")
# http://crbug.com/368690
class Cnn(WebsiteTest):
def Login(self):
self.GoTo("http://www.cnn.com")
self.Wait(5)
while not self.IsDisplayed(".cnnOvrlyBtn.cnnBtnLogIn"):
self.ClickIfClickable("#hdr-auth .no-border.no-pad-right a")
self.Wait(1)
self.Click(".cnnOvrlyBtn.cnnBtnLogIn")
self.FillUsernameInto("#cnnOverlayEmail1l")
self.FillPasswordInto("#cnnOverlayPwd")
self.Click(".cnnOvrlyBtn.cnnBtnLogIn")
self.Click(".cnnOvrlyBtn.cnnBtnLogIn")
self.Wait(5)
# Fails due to "Too many failed logins. Please wait a minute".
# http://crbug.com/466953
class Craigslist(WebsiteTest):
def Login(self):
self.GoTo("https://accounts.craigslist.org/login")
self.FillUsernameInto("#inputEmailHandle")
self.FillPasswordInto("#inputPassword")
self.Submit("button")
# Crashes.
class Dailymotion(WebsiteTest):
def Login(self):
self.GoTo("http://www.dailymotion.com/gb")
self.Click(".sd_header__login span")
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='save']")
# http://crbug.com/368690
class Ebay(WebsiteTest):
def Login(self):
self.GoTo("https://signin.ebay.com/")
self.FillUsernameInto("[name='userid']")
self.FillPasswordInto("[name='pass']")
self.Submit("[name='pass']")
# Iframe, password saved but not autofilled.
class Espn(WebsiteTest):
def Login(self):
self.GoTo("http://espn.go.com/")
while not self.IsDisplayed("#cboxLoadedContent iframe"):
self.Click("#signin .cbOverlay")
self.Wait(1)
frame = self.driver.find_element_by_css_selector("#cboxLoadedContent "
"iframe")
self.driver.switch_to_frame(frame)
self.FillUsernameInto("#username")
self.FillPasswordInto("#password")
while self.IsDisplayed("#password"):
self.ClickIfClickable("#submitBtn")
self.Wait(1)
# Fails due to test framework issue.
class Flipkart(WebsiteTest):
def Login(self):
self.GoTo("http://www.flipkart.com/")
self.Wait(2)
self.Click(".header-links .js-login")
self.FillUsernameInto("#login_email_id")
self.FillPasswordInto("#login_password")
self.Submit("#login_password")
class Instagram(WebsiteTest):
def Login(self):
self.GoTo("https://instagram.com/accounts/login/")
self.FillUsernameInto("#lfFieldInputUsername")
self.FillPasswordInto("#lfFieldInputPassword")
self.Submit(".lfSubmit")
# http://crbug.com/367768
class Live(WebsiteTest):
def Login(self):
self.GoTo("https://login.live.com")
self.FillUsernameInto("[name='login']")
self.FillPasswordInto("[name='passwd']")
self.Submit("[name='passwd']")
# http://crbug.com/368690
class One63(WebsiteTest):
def Login(self):
self.GoTo("http://www.163.com")
self.HoverOver("#js_N_navHighlight")
self.FillUsernameInto("#js_loginframe_username")
self.FillPasswordInto(".ntes-loginframe-label-ipt[type='password']")
self.Click(".ntes-loginframe-btn")
class StackExchange(WebsiteTest):
def Login(self):
self.GoTo("https://stackexchange.com/users/login#log-in")
iframe_selector = "#affiliate-signin-iframe"
self.WaitUntilDisplayed(iframe_selector)
frame = self.driver.find_element_by_css_selector(iframe_selector)
self.driver.switch_to_frame(frame)
self.FillUsernameInto("[name='email']")
self.FillPasswordInto("[name='password']")
self.Submit("[value='Sign In']")
# http://crbug.com/368690
class Vube(WebsiteTest):
def Login(self):
self.GoTo("https://vube.com")
self.Click("[vube-login='']")
self.FillUsernameInto("[ng-model='login.user']")
self.FillPasswordInto("[ng-model='login.pass']")
while (self.IsDisplayed("[ng-model='login.pass']")
and not self.IsDisplayed(".prompt.alert")):
self.ClickIfClickable("[ng-click='login()']")
self.Wait(1)
# Password not saved.
class Ziddu(WebsiteTest):
def Login(self):
self.GoTo("http://www.ziddu.com/login.php")
self.FillUsernameInto("#email")
self.FillPasswordInto("#password")
self.Click(".login input")
all_tests = {
"163": One63("163"), # http://crbug.com/368690
"adobe": Adobe("adobe"), # Password saving not offered.
"alexa": Alexa("alexa"),
"aliexpress": Aliexpress("aliexpress"), # Fails due to test framework issue.
"amazon": Amazon("amazon"), # Bug not reproducible without test.
"ask": Ask("ask"), # Password not saved.
"baidu": Baidu("baidu"), # Password not saved.
"buzzfeed": Buzzfeed("buzzfeed",
username_not_auto=True,
password_not_auto=True),
"cnn": Cnn("cnn"), # http://crbug.com/368690
"craigslist": Craigslist("craigslist"), # Too many failed logins per time.
"dailymotion": Dailymotion("dailymotion"), # Crashes.
"dropbox": Dropbox("dropbox"),
"ebay": Ebay("ebay"), # http://crbug.com/368690
"espn": Espn("espn"), # Iframe, password saved but not autofilled.
"facebook": Facebook("facebook"),
"flipkart": Flipkart("flipkart"), # Fails due to test framework issue.
"github": Github("github"),
"google": Google("google"),
"imgur": Imgur("imgur"),
"instagram": Instagram("instagram"), # Iframe, pw saved but not autofilled.
"linkedin": Linkedin("linkedin"),
"liveinternet": Liveinternet("liveinternet"),
"live": Live("live", username_not_auto=True), # http://crbug.com/367768
"mailru": Mailru("mailru"),
"nytimes": Nytimes("nytimes"),
"odnoklassniki": Odnoklassniki("odnoklassniki"),
"pinterest": Pinterest("pinterest"),
"reddit": Reddit("reddit", username_not_auto=True),
"stackexchange": StackExchange("stackexchange"), # Iframe, not autofilled.
"tumblr": Tumblr("tumblr", username_not_auto=True),
"twitter": Twitter("twitter"),
"vkontakte": Vkontakte("vkontakte"),
"vube": Vube("vube"), # http://crbug.com/368690
"wikia": Wikia("wikia"),
"wikipedia": Wikipedia("wikipedia", username_not_auto=True),
"wordpress": Wordpress("wordpress"),
"yahoo": Yahoo("yahoo", username_not_auto=True),
"yandex": Yandex("yandex"),
"ziddu": Ziddu("ziddu"), # Password not saved.
}
def SaveResults(environment_tests_results, environment_save_path,
save_only_failures):
"""Save the test results in an xml file.
Args:
environment_tests_results: A list of the TestResults that are going to be
saved.
environment_save_path: The file where the results are going to be saved.
If it's None, the results are not going to be stored.
Raises:
Exception: An exception is raised if the file is not found.
"""
if environment_save_path:
xml = "<result>"
for (name, test_type, success, failure_log) in environment_tests_results:
if not (save_only_failures and success):
xml += (
"<test name='{0}' successful='{1}' type='{2}'>{3}</test>".format(
name, success, test_type, failure_log))
xml += "</result>"
with open(environment_save_path, "w") as save_file:
save_file.write(xml)
def RunTest(chrome_path, chromedriver_path, profile_path,
environment_passwords_path, website_test_name,
test_case_name):
"""Runs the test for the specified website.
Args:
chrome_path: The chrome binary file.
chromedriver_path: The chromedriver binary file.
profile_path: The chrome testing profile folder.
environment_passwords_path: The usernames and passwords file.
website_test_name: Name of the website to test (refer to keys in
all_tests above).
Returns:
The results of the test as list of TestResults.
Raises:
Exception: An exception is raised if one of the tests for the website
fails, or if the website name is not known.
"""
enable_automatic_password_saving = (test_case_name == "SaveAndAutofillTest")
environment = Environment(chrome_path, chromedriver_path, profile_path,
environment_passwords_path,
enable_automatic_password_saving)
try:
if website_test_name in all_tests:
environment.AddWebsiteTest(all_tests[website_test_name])
else:
raise Exception("Test name {} is unknown.".format(website_test_name))
environment.RunTestsOnSites(test_case_name)
return environment.tests_results
finally:
environment.Quit()
def main():
parser = argparse.ArgumentParser(
description="Password Manager automated tests help.")
parser.add_argument(
"--chrome-path", action="store", dest="chrome_path",
help="Set the chrome path (required).", required=True)
parser.add_argument(
"--chromedriver-path", action="store", dest="chromedriver_path",
help="Set the chromedriver path (required).", required=True)
parser.add_argument(
"--profile-path", action="store", dest="profile_path",
help="Set the profile path (required). You just need to choose a "
"temporary empty folder. If the folder is not empty all its content "
"is going to be removed.",
required=True)
parser.add_argument(
"--passwords-path", action="store", dest="passwords_path",
help="Set the usernames/passwords path (required).", required=True)
parser.add_argument("--save-path", action="store", dest="save_path",
help="Write the results in a file.")
parser.add_argument("--save-only-failures",
help="Only save logs for failing tests.",
dest="save_only_failures", action="store_true",
default=False)
parser.add_argument("website", help="Website test name on which"
"tests should be run.")
parser.add_argument("--test-cases-to-run", help="Names of test cases which"
"should be run. Currently supported test cases are:"
"PromptFailTest, PromptSuccessTest, SaveAndAutofillTest",
dest="test_cases_to_run", action="store", nargs="*")
args = parser.parse_args()
save_path = None
if args.save_path:
save_path = args.save_path
test_cases_to_run = args.test_cases_to_run or TEST_CASES
for test_case in test_cases_to_run:
tests_results = RunTest(
args.chrome_path, args.chromedriver_path, args.profile_path,
args.passwords_path, args.website, test_case)
SaveResults(tests_results, save_path,
save_only_failures=args.save_only_failures)
if __name__ == "__main__":
main()
| bsd-3-clause |
tlecomte/friture | friture/plotting/canvasWidget.py | 2 | 5938 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Timothée Lecomte
# This file is part of Friture.
#
# Friture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# Friture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Friture. If not, see <http://www.gnu.org/licenses/>.
from PyQt5 import QtCore, QtGui, QtWidgets
from .grid import Grid
class CanvasWidget(QtWidgets.QWidget):
resized = QtCore.pyqtSignal(int, int)
def __init__(self, parent, verticalScaleTransform, horizontalScaleTransform):
super(CanvasWidget, self).__init__(parent)
# set proper size policy for this widget
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding))
self.setAutoFillBackground(False)
self.setAttribute(QtCore.Qt.WA_NoSystemBackground, True)
self.horizontalScaleTransform = horizontalScaleTransform
self.verticalScaleTransform = verticalScaleTransform
self.lastPos = QtCore.QPoint()
self.ruler = False
self.mousex = 0
self.mousey = 0
# use a cross cursor to easily select a point on the graph
self.setCursor(QtCore.Qt.CrossCursor)
self.attachedItems = []
self.grid = Grid()
self.trackerFormatter = lambda x, y: "x=%d, y=%d" % (x, y)
self.anyOpaqueItem = False
def setTrackerFormatter(self, formatter):
self.trackerFormatter = formatter
def sizeHint(self):
return QtCore.QSize(50, 50)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
self.drawBackground(painter)
self.drawData(painter)
self.drawRuler(painter)
self.drawBorder(painter)
self.drawTrackerText(painter)
painter.end()
def resizeEvent(self, event):
# give the opportunity to the scales to adapt
self.resized.emit(self.width(), self.height())
def attach(self, item):
self.attachedItems.append(item)
self.reviewOpaqueItems()
def detach(self, item):
self.attachedItems.remove(item)
self.reviewOpaqueItems()
def reviewOpaqueItems(self):
self.anyOpaqueItem = False
for item in self.attachedItems:
try:
if item.isOpaque():
self.anyOpaqueItem = True
except:
# do nothing
continue
def drawData(self, painter):
for item in self.attachedItems:
item.draw(painter, self.horizontalScaleTransform, self.verticalScaleTransform, self.rect())
def drawTrackerText(self, painter):
if self.ruler:
painter.setRenderHint(QtGui.QPainter.Antialiasing)
x = self.horizontalScaleTransform.toPlot(self.mousex)
y = self.verticalScaleTransform.toPlot(float(self.height() - self.mousey))
text = self.trackerFormatter(x, y)
# compute tracker bounding rect
painter.setPen(QtCore.Qt.black)
rect = painter.boundingRect(QtCore.QRect(self.mousex, self.mousey, 0, 0), QtCore.Qt.AlignLeft, text)
# small offset so that it does not touch the rulers
rect.translate(4, -(rect.height() + 4))
# avoid crossing the top and right borders
dx = - max(rect.x() + rect.width() - self.width(), 0)
dy = - min(rect.y(), 0)
rect.translate(dx, dy)
# avoid crossing the left and bottom borders
dx = - min(rect.x(), 0)
dy = - max(rect.y() + rect.height() - self.height(), 0)
rect.translate(dx, dy)
# draw a white background
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtCore.Qt.white)
painter.drawRect(rect)
painter.setPen(QtCore.Qt.black)
painter.drawText(rect, QtCore.Qt.AlignLeft, text)
def drawBackground(self, painter):
if self.anyOpaqueItem:
return
self.grid.draw(painter, self.horizontalScaleTransform, self.verticalScaleTransform, self.rect())
def drawBorder(self, painter):
w = self.width()
h = self.height()
rectPath = QtGui.QPainterPath()
rectPath.moveTo(0, 0)
rectPath.lineTo(0, h - 1)
rectPath.lineTo(w - 1, h - 1)
rectPath.lineTo(w - 1, 0)
rectPath.closeSubpath()
painter.setPen(QtGui.QPen(QtGui.QColor(QtCore.Qt.gray)))
painter.drawPath(rectPath)
def drawRuler(self, painter):
if self.ruler:
w = self.width()
h = self.height()
painter.setPen(QtGui.QPen(QtGui.QColor(QtCore.Qt.black)))
painter.drawLine(self.mousex, 0, self.mousex, h)
painter.drawLine(0, self.mousey, w, self.mousey)
def mousePressEvent(self, event):
self.lastPos = event.pos()
self.mousex = event.x()
self.mousey = event.y()
self.ruler = True
# ask for update so the the ruler is actually painted
self.update()
def mouseReleaseEvent(self, event):
self.ruler = False
# ask for update so the the ruler is actually erased
self.update()
def mouseMoveEvent(self, event):
if event.buttons() & QtCore.Qt.LeftButton:
self.mousex = event.x()
self.mousey = event.y()
self.update()
def setGrid(self, xMajorTick, xMinorTick, yMajorTick, yMinorTick):
self.grid.setGrid(xMajorTick, xMinorTick, yMajorTick, yMinorTick)
| gpl-3.0 |
florian-f/sklearn | sklearn/datasets/tests/test_mldata.py | 1 | 5233 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
ActiveState/code | recipes/Python/576693_Ordered_Dictionary_for_Py24/recipe-576693.py | 85 | 8822 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
NoBodyCam/TftpPxeBootBareMetal | nova/tests/test_nexenta.py | 9 | 11345 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for OpenStack Nova volume driver
"""
import base64
import urllib2
import nova.flags
import nova.test
from nova.volume import nexenta
from nova.volume.nexenta import jsonrpc
from nova.volume.nexenta import volume
FLAGS = nova.flags.FLAGS
class TestNexentaDriver(nova.test.TestCase):
TEST_VOLUME_NAME = 'volume1'
TEST_VOLUME_NAME2 = 'volume2'
TEST_SNAPSHOT_NAME = 'snapshot1'
TEST_VOLUME_REF = {
'name': TEST_VOLUME_NAME,
'size': 1,
}
TEST_VOLUME_REF2 = {
'name': TEST_VOLUME_NAME2,
'size': 1,
}
TEST_SNAPSHOT_REF = {
'name': TEST_SNAPSHOT_NAME,
'volume_name': TEST_VOLUME_NAME,
}
def __init__(self, method):
super(TestNexentaDriver, self).__init__(method)
def setUp(self):
super(TestNexentaDriver, self).setUp()
self.flags(
nexenta_host='1.1.1.1',
nexenta_volume='nova',
nexenta_target_prefix='iqn:',
nexenta_target_group_prefix='nova/',
nexenta_blocksize='8K',
nexenta_sparse=True,
)
self.nms_mock = self.mox.CreateMockAnything()
for mod in ['volume', 'zvol', 'iscsitarget',
'stmf', 'scsidisk', 'snapshot']:
setattr(self.nms_mock, mod, self.mox.CreateMockAnything())
self.stubs.Set(jsonrpc, 'NexentaJSONProxy',
lambda *_, **__: self.nms_mock)
self.drv = volume.NexentaDriver()
self.drv.do_setup({})
def test_setup_error(self):
self.nms_mock.volume.object_exists('nova').AndReturn(True)
self.mox.ReplayAll()
self.drv.check_for_setup_error()
def test_setup_error_fail(self):
self.nms_mock.volume.object_exists('nova').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(LookupError, self.drv.check_for_setup_error)
def test_local_path(self):
self.assertRaises(NotImplementedError, self.drv.local_path, '')
def test_create_volume(self):
self.nms_mock.zvol.create('nova/volume1', '1G', '8K', True)
self.mox.ReplayAll()
self.drv.create_volume(self.TEST_VOLUME_REF)
def test_delete_volume(self):
self.nms_mock.zvol.destroy('nova/volume1', '')
self.mox.ReplayAll()
self.drv.delete_volume(self.TEST_VOLUME_REF)
def test_create_snapshot(self):
self.nms_mock.zvol.create_snapshot('nova/volume1', 'snapshot1', '')
self.mox.ReplayAll()
self.drv.create_snapshot(self.TEST_SNAPSHOT_REF)
def test_create_volume_from_snapshot(self):
self.nms_mock.zvol.clone('nova/volume1@snapshot1', 'nova/volume2')
self.mox.ReplayAll()
self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2,
self.TEST_SNAPSHOT_REF)
def test_delete_snapshot(self):
self.nms_mock.snapshot.destroy('nova/volume1@snapshot1', '')
self.mox.ReplayAll()
self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF)
_CREATE_EXPORT_METHODS = [
('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},),
u'Unable to create iscsi target\n'
u' iSCSI target iqn.1986-03.com.sun:02:nova-volume1 already'
u' configured\n'
u' itadm create-target failed with error 17\n',
),
('stmf', 'create_targetgroup', ('nova/volume1',),
u'Unable to create targetgroup: stmfadm: nova/volume1:'
u' already exists\n',
),
('stmf', 'add_targetgroup_member', ('nova/volume1', 'iqn:volume1'),
u'Unable to add member to targetgroup: stmfadm:'
u' iqn.1986-03.com.sun:02:nova-volume1: already exists\n',
),
('scsidisk', 'create_lu', ('nova/volume1', {}),
u"Unable to create lu with zvol 'nova/volume1':\n"
u" sbdadm: filename /dev/zvol/rdsk/nova/volume1: in use\n",
),
('scsidisk', 'add_lun_mapping_entry', ('nova/volume1', {
'target_group': 'nova/volume1', 'lun': '0'}),
u"Unable to add view to zvol 'nova/volume1' (LUNs in use: ):\n"
u" stmfadm: view entry exists\n",
),
]
def _stub_export_method(self, module, method, args, error, fail=False):
m = getattr(self.nms_mock, module)
m = getattr(m, method)
mock = m(*args)
if fail:
mock.AndRaise(nexenta.NexentaException(error))
def _stub_all_export_methods(self, fail=False):
for params in self._CREATE_EXPORT_METHODS:
self._stub_export_method(*params, fail=fail)
def test_create_export(self):
self._stub_all_export_methods()
self.mox.ReplayAll()
retval = self.drv.create_export({}, self.TEST_VOLUME_REF)
self.assertEquals(retval,
{'provider_location':
'%s:%s,1 %s%s' % (FLAGS.nexenta_host,
FLAGS.nexenta_iscsi_target_portal_port,
FLAGS.nexenta_target_prefix,
self.TEST_VOLUME_NAME)})
def __get_test(i):
def _test_create_export_fail(self):
for params in self._CREATE_EXPORT_METHODS[:i]:
self._stub_export_method(*params)
self._stub_export_method(*self._CREATE_EXPORT_METHODS[i],
fail=True)
self.mox.ReplayAll()
self.assertRaises(nexenta.NexentaException,
self.drv.create_export, {}, self.TEST_VOLUME_REF)
return _test_create_export_fail
for i in range(len(_CREATE_EXPORT_METHODS)):
locals()['test_create_export_fail_%d' % i] = __get_test(i)
def test_ensure_export(self):
self._stub_all_export_methods(fail=True)
self.mox.ReplayAll()
self.drv.ensure_export({}, self.TEST_VOLUME_REF)
def test_remove_export(self):
self.nms_mock.scsidisk.delete_lu('nova/volume1')
self.nms_mock.stmf.destroy_targetgroup('nova/volume1')
self.nms_mock.iscsitarget.delete_target('iqn:volume1')
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
def test_remove_export_fail_0(self):
self.nms_mock.scsidisk.delete_lu('nova/volume1')
self.nms_mock.stmf.destroy_targetgroup('nova/volume1').AndRaise(
nexenta.NexentaException())
self.nms_mock.iscsitarget.delete_target('iqn:volume1')
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
def test_remove_export_fail_1(self):
self.nms_mock.scsidisk.delete_lu('nova/volume1')
self.nms_mock.stmf.destroy_targetgroup('nova/volume1')
self.nms_mock.iscsitarget.delete_target('iqn:volume1').AndRaise(
nexenta.NexentaException())
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
class TestNexentaJSONRPC(nova.test.TestCase):
URL = 'http://example.com/'
URL_S = 'https://example.com/'
USER = 'user'
PASSWORD = 'password'
HEADERS = {'Authorization': 'Basic %s' % (base64.b64encode(
':'.join((USER, PASSWORD))),),
'Content-Type': 'application/json'}
REQUEST = 'the request'
def setUp(self):
super(TestNexentaJSONRPC, self).setUp()
self.proxy = jsonrpc.NexentaJSONProxy(
self.URL, self.USER, self.PASSWORD, auto=True)
self.mox.StubOutWithMock(urllib2, 'Request', True)
self.mox.StubOutWithMock(urllib2, 'urlopen')
self.resp_mock = self.mox.CreateMockAnything()
self.resp_info_mock = self.mox.CreateMockAnything()
self.resp_mock.info().AndReturn(self.resp_info_mock)
urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
def test_call(self):
urllib2.Request(self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
self.mox.ReplayAll()
result = self.proxy('arg1', 'arg2')
self.assertEquals("the result", result)
def test_call_deep(self):
urllib2.Request(self.URL,
'{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
' "method": "meth"}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
self.mox.ReplayAll()
result = self.proxy.obj1.subobj.meth('arg1', 'arg2')
self.assertEquals("the result", result)
def test_call_auto(self):
urllib2.Request(self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
urllib2.Request(self.URL_S,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = 'EOF in headers'
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
self.mox.ReplayAll()
result = self.proxy('arg1', 'arg2')
self.assertEquals("the result", result)
def test_call_error(self):
urllib2.Request(self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": {"message": "the error"}, "result": "the result"}')
self.mox.ReplayAll()
self.assertRaises(jsonrpc.NexentaJSONException,
self.proxy, 'arg1', 'arg2')
def test_call_fail(self):
urllib2.Request(self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = 'EOF in headers'
self.proxy.auto = False
self.mox.ReplayAll()
self.assertRaises(jsonrpc.NexentaJSONException,
self.proxy, 'arg1', 'arg2')
| apache-2.0 |
humitos/argentinaenpython.com.ar | web/plugins/nanogallery_directive/nanogallery_directive.py | 2 | 8707 | # -*- coding: utf-8 -*-
# Copyright © 2015 Manuel Kaufmann
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division, print_function, unicode_literals
import os
from collections import OrderedDict
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola.plugin_categories import LateTask, RestExtension
from nikola.utils import copy_tree
def flag_to_boolean(value):
"""Function to parse directives.flag options."""
return 'true'
class Plugin(RestExtension, LateTask):
name = 'nanogallery_directive'
def set_site(self, site):
self.site = site
site.template_hooks['extra_head'].append(
'<link href="/assets/css/nanogallery.min.css" rel="stylesheet" type="text/css">'
)
site.template_hooks['body_end'].append(
'<script type="text/javascript" src="/assets/js/jquery.nanogallery.min.js"></script>'
)
NanoGallery.site = site
return super(Plugin, self).set_site(site)
def gen_tasks(self):
kw = {
'output_folder': self.site.config['OUTPUT_FOLDER'],
}
# Copy all the assets to the right places
asset_folder = os.path.join(os.path.dirname(__file__), 'files')
for task in copy_tree(asset_folder, kw['output_folder']):
task['basename'] = str(self.name)
yield task
class NanoGallery(Directive):
"""Restructured text extension for inserting nanogallery galleries."""
# http://nanogallery.brisbois.fr/#docGeneralSettings
option_spec = {
'theme': directives.unchanged,
'colorscheme': directives.unchanged,
'rtl': flag_to_boolean,
'maxitemsperline': directives.nonnegative_int,
'maxwidth': directives.nonnegative_int,
'paginationdots': flag_to_boolean,
'paginationmaxlinesperpage': directives.nonnegative_int,
'paginationswipe': flag_to_boolean,
'locationhash': flag_to_boolean,
'itemsselectable': flag_to_boolean,
'showcheckboxes': flag_to_boolean,
'checkboxstyle': directives.unchanged,
'keepselection': flag_to_boolean,
'i18n': directives.unchanged,
'lazybuild': directives.unchanged,
'lazybuildtreshold': directives.nonnegative_int,
'openonstart': directives.unchanged,
'breakpointsizesm': directives.nonnegative_int,
'breakpointsizeme': directives.nonnegative_int,
'breakpointsizela': directives.nonnegative_int,
'breakpointsizexl': directives.nonnegative_int,
'thumbnailheight': directives.nonnegative_int,
'thumbnailwidth': directives.nonnegative_int,
'thumbnailalignment': directives.unchanged,
'thumbnailgutterwidth': directives.nonnegative_int,
'thumbnailgutterheight': directives.nonnegative_int,
'thumbnailopenimage': flag_to_boolean,
'thumbnaillabel': directives.unchanged,
'thumbnailhovereffect': directives.unchanged,
'touchanimation': flag_to_boolean,
'touchautoopendelay': directives.nonnegative_int,
'thumbnaildisplayinterval': directives.nonnegative_int,
'thumbnaildisplaytransition': flag_to_boolean,
'thumbnaillazyload': flag_to_boolean,
'thumbnaillazyloadtreshold': directives.nonnegative_int,
'thumbnailadjustlastrowheight': flag_to_boolean,
'thumbnailalbumdisplayimage': flag_to_boolean,
}
has_content = True
def __init__(self, *args, **kwargs):
super(NanoGallery, self).__init__(*args, **kwargs)
self.state.document.settings.record_dependencies.add(
'####MAGIC####CONFIG:NANOGALLERY_OPTIONS')
def _sanitize_options(self):
THUMBNAIL_SIZE = self.site.config.get('THUMBNAIL_SIZE', 128)
defaults = {
# 'theme': 'clean',
# 'maxitemsperline': 4,
# 'thumbnailgutterwidth': 10,
# 'thumbnailgutterheight': 10,
# 'locationhash': 'false',
# 'colorscheme': 'lightBackground',
# 'thumbnailheight': 'auto',
# 'thumbnailwidth': THUMBNAIL_SIZE,
# 'thumbnailhovereffect': 'imageScale150',
# 'thumbnaillabel': {'display': 'false'},
}
user_defaults = self.site.config.get('NANOGALLERY_OPTIONS', {})
defaults.update(user_defaults)
defaults.update(self.options)
# TODO: validate options here and (maybe) display an error
for option in defaults.keys():
assert option in self.option_spec
# We need to convert all the lowercase options (rst make them
# lowercase automatically) to the correct ones -supported by
# nanoGALLERY Javascript function
js_options = OrderedDict([
('theme', 'theme'),
('colorscheme', 'colorScheme'),
('rtl', 'RTL'),
('maxitemsperline', 'maxItemsPerLine'),
('maxwidth', 'maxWidth'),
('paginationdots', 'paginationDots'),
('paginationmaxlinesperpage', 'paginationMaxLinesPerPage'),
('paginationswipe', 'paginationSwipe'),
('locationhash', 'locationHash'),
('itemsselectable', 'itemsSelectable'),
('showcheckboxes', 'showCheckboxes'),
('checkboxstyle', 'checkboxStyle'),
('keepselection', 'keepSelection'),
('i18n', 'i18n'),
('lazybuild', 'lazyBuild'),
('lazybuildtreshold', 'lazyBuildTreshold'),
('openonstart', 'openOnStart'),
('breakpointsizesm', 'breakpointSizeSM'),
('breakpointsizeme', 'breakpointSizeME'),
('breakpointsizela', 'breakpointSizeLA'),
('breakpointsizexl', 'breakpointSizeXL'),
('thumbnailheight', 'thumbnailHeight'),
('thumbnailwidth', 'thumbnailWidth'),
('thumbnailalignment', 'thumbnailAlignment'),
('thumbnailgutterwidth', 'thumbnailGutterWidth'),
('thumbnailgutterheight', 'thumbnailGutterHeight'),
('thumbnailopenimage', 'thumbnailOpenImage'),
('thumbnaillabel', 'thumbnailLabel'),
('thumbnailhovereffect', 'thumbnailHoverEffect'),
('touchanimation', 'touchAnimation'),
('touchautoopendelay', 'touchAutoOpenDelay'),
('thumbnaildisplayinterval', 'thumbnailDisplayInterval'),
('thumbnaildisplaytransition', 'thumbnailDisplayTransition'),
('thumbnaillazyload', 'thumbnailLazyLoad'),
('thumbnaillazyloadtreshold', 'thumbnailLazyLoadTreshold'),
('thumbnailadjustlastrowheight', 'thumbnailAdjustLastRowHeight'),
('thumbnailalbumdisplayimage', 'thumbnailAlbumDisplayImage')
])
options = {}
for k in defaults:
options[js_options[k]] = defaults[k]
return options
def run(self):
if len(self.content) == 0:
return
image_list = [t for t in self.content]
thumbs = ['.thumbnail'.join(os.path.splitext(p)) for p in image_list]
photo_array = []
for img, thumb in zip(image_list, thumbs):
photo_array.append({
'href': img,
'data': {
'ngthumb': thumb,
},
})
output = self.site.template_system.render_template(
'embedded-nanogallery.tmpl', None, {
'nanogallery_content': photo_array,
'options': self._sanitize_options()
})
return [nodes.raw('', output, format='html')]
directives.register_directive('nanogallery', NanoGallery)
| gpl-2.0 |
mtp1376/youtube-dl | youtube_dl/extractor/sexykarma.py | 42 | 4400 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
parse_duration,
int_or_none,
)
class SexyKarmaIE(InfoExtractor):
IE_DESC = 'Sexy Karma and Watch Indian Porn'
_VALID_URL = r'https?://(?:www\.)?(?:sexykarma\.com|watchindianporn\.net)/(?:[^/]+/)*video/(?P<display_id>[^/]+)-(?P<id>[a-zA-Z0-9]+)\.html'
_TESTS = [{
'url': 'http://www.sexykarma.com/gonewild/video/taking-a-quick-pee-yHI70cOyIHt.html',
'md5': 'b9798e7d1ef1765116a8f516c8091dbd',
'info_dict': {
'id': 'yHI70cOyIHt',
'display_id': 'taking-a-quick-pee',
'ext': 'mp4',
'title': 'Taking a quick pee.',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'wildginger7',
'upload_date': '20141008',
'duration': 22,
'view_count': int,
'comment_count': int,
'categories': list,
}
}, {
'url': 'http://www.sexykarma.com/gonewild/video/pot-pixie-tribute-8Id6EZPbuHf.html',
'md5': 'dd216c68d29b49b12842b9babe762a5d',
'info_dict': {
'id': '8Id6EZPbuHf',
'display_id': 'pot-pixie-tribute',
'ext': 'mp4',
'title': 'pot_pixie tribute',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'banffite',
'upload_date': '20141013',
'duration': 16,
'view_count': int,
'comment_count': int,
'categories': list,
'age_limit': 18,
}
}, {
'url': 'http://www.watchindianporn.net/video/desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number-dW2mtctxJfs.html',
'md5': '9afb80675550406ed9a63ac2819ef69d',
'info_dict': {
'id': 'dW2mtctxJfs',
'display_id': 'desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number',
'ext': 'mp4',
'title': 'Desi dancer namrata stripping completely nude and dancing on a hot number',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'Don',
'upload_date': '20140213',
'duration': 83,
'view_count': int,
'comment_count': int,
'categories': list,
'age_limit': 18,
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
video_url = self._html_search_regex(
r"url: escape\('([^']+)'\)", webpage, 'url')
title = self._html_search_regex(
r'<h2 class="he2"><span>(.*?)</span>',
webpage, 'title')
thumbnail = self._html_search_regex(
r'<span id="container"><img\s+src="([^"]+)"',
webpage, 'thumbnail', fatal=False)
uploader = self._html_search_regex(
r'class="aupa">\s*(.*?)</a>',
webpage, 'uploader')
upload_date = unified_strdate(self._html_search_regex(
r'Added: <strong>(.+?)</strong>', webpage, 'upload date', fatal=False))
duration = parse_duration(self._search_regex(
r'<td>Time:\s*</td>\s*<td align="right"><span>\s*(.+?)\s*</span>',
webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
r'<td>Views:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
r'<td>Comments:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
webpage, 'comment count', fatal=False))
categories = re.findall(
r'<a href="[^"]+/search/video/desi"><span>([^<]+)</span></a>',
webpage)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'uploader': uploader,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'comment_count': comment_count,
'categories': categories,
'age_limit': 18,
}
| unlicense |
timverhoeven/python | superidx.py | 1 | 6116 | #!/usr/bin/env python
from app import *
from command import *
from configitem import polleritem
from fileutils import chomp
from nextfeed import nextfeed2
from getopt import getopt
def getdummyid(filename, regex):
dummyid = None
lastline = chomp(tail(filename))
match = re.search(regex, lastline)
if match and match.lastindex > 0:
dummyid = match.group(1)
return dummyid
class superidxcfg(polleritem):
def __init__(self, basepath, paths, filemask, database, **kwargs):
polleritem.__init__(self, basepath, paths, filemask, **kwargs)
self.__setdefault__('_checkfor', ['.ERR', '.ACK'])
self.__setdefault__('_dummyrec', None) #Do not check for dummy records
self.__setdefault__('_dtformat', '%Y%m%d%H%M%S')
self.__setdefault__('_funds', []) #No valid funds, ie. nothing will process
self.__setdefault__('_priority', 0) #Lowest priority
self.__setdefault__('_timeout', 0) #Off, process will run until infinity and beyond...
self._database = database
class superidx(appitem):
def __init__(self, key, config, **kwargs):
appitem.__init__(self, key, config, **kwargs)
def __initialise__(self):
filename = self._key
config = self._config
self._rootpath = os.path.dirname(filename)
self._donepath = "%s/%s" % (self._rootpath, config._donepath)
self._errorpath = "%s/%s" % (self._rootpath, config._errorpath)
makedir(self._donepath)
makedir(self._errorpath)
def __validate__(self):
filename = self._key
config = self._config
skip = False
for ext in config._checkfor:
checkfile = replaceext(filename, ext)
if exists(checkfile):
self.loge("Result file '%s' already exists before process was run." % (checkfile))
relmoveto(checkfile, relpath=config._errorpath, timestamp=True)
skip = True
if skip:
relmoveto(filename, relpath=config._errorpath, timestamp=True)
return 3
if config._dummyrec:
dummyid = getdummyid(filename, config._dummyrec)
if dummyid == None:
relmoveto(filename, relpath=config._errorpath, timestamp=True)
self.loge(msg="Could not match dummy record id from '%s' with '%s'." % (filename, config._dummyrec))
return 4
else:
self.logi("Dummy record id '%s'." % (dummyid))
self._dummyid = dummyid
basefile = os.path.basename(filename)
self._fund = basefile[3:5]
if not self._fund in config._funds:
relmoveto(filename, relpath=config._errorpath, timestamp=True)
self.loge(msg="'%s' is not a valid fund for '%s'." % (self._fund, config._database))
return 5
return 0
def process(self, tid=None):
filename = self._key
config = self._config
self.logi("Processing '%s' (%s)." % (filename, config._name))
self.__initialise__()
valrc = self.__validate__() * -1;
if valrc != 0:
self.loge("File '%s' import failed (%d)." % (filename, valrc))
return valrc
try:
with nextfeed2(config._database) as (hipid, feed):
config['_feed'] = feed
config['_filename'] = filename
config['_fund'] = self._fund
self.logd("Using feed '%s:%s'." % (feed, hipid))
exp = chomp(self.subst_item('_exp'))
proc = command(cmd=exp, devnull=True, timeout=config._timeout)
self.logd("Running '%s'." % (exp))
rc, _ = proc.execute()
if rc != 0:
relmoveto(filename, relpath=config._errorpath, timestamp=True)
if rc != -1:
self.loge("File '%s' import failed (%d)." % (filename, rc))
else:
self.loge("File '%s' import timed out after '%d' second(s)." % (filename, config._timeout))
else:
for ext in config._checkfor:
checkfile = replaceext(filename, ext)
if exists(checkfile):
relcopyto(checkfile, relpath=config._donepath, timestamp=True)
relmoveto(filename, relpath=config._donepath, timestamp=True)
self.logi("File '%s' imported successfully (%d)." % (filename, rc))
except:
rc =-2
self.loge("Error getting next feed user for database '%s'." % config._database)
relmoveto(filename, relpath=config._errorpath, timestamp=True)
return rc
if __name__ == '__main__':
def usage():
p = prog()
print '''
%s:
-------------------------------------------------------------------------------
Usage:
------
%s --config <filename> [--sections <s1, s2, ...>] [--loglevel <l>]
[--threads] [--daemon]
Example:
--------
%s -c /admin/etc/super_idx_import.ym; -s database=live -t -d
%s --config /admin/etc/super_idx_import.ym; --sections database=live
--threads --daemon
''' % (p, p, p, p)
sys.exit(1)
config = None
loglevel = INFO
sections = []
threads = False
daemon = False
try:
opts, args = getopt(sys.argv[1:], "c:s:l:td", ("config=", "sections=", "loglevel=", "threads", "daemon"))
except:
usage()
for k,v in opts:
if k in ("-c", "--config"): config=v
if k in ("-s", "--sections"): sections=re.split('\s*,\s*', v)
if k in ("-l", "--loglevel"): loglevel=v
if k in ("-t", "--threads"): threads=True
if k in ("-d", "--daemon"): daemon=True
if config == None:
usage()
with yamlapp(filename=config, cls=superidx, configcls=superidxcfg, sections=sections, loglevel=int(loglevel), \
logcolour=True, usethreads=threads, daemon=daemon) as a:
a.run()
| gpl-2.0 |
pretix/cleanerversion | versions_tests/tests/test_utils.py | 2 | 4313 | from unittest import skipUnless
from django import VERSION
from django.db import connection
from django.test import TestCase, TransactionTestCase
from django.db import IntegrityError
from versions_tests.models import ChainStore, Color
from versions.util.postgresql import get_uuid_like_indexes_on_table
AT_LEAST_17 = VERSION[:2] >= (1, 7)
@skipUnless(AT_LEAST_17 and connection.vendor == 'postgresql', "Postgresql-specific test")
class PostgresqlVersionUniqueTests(TransactionTestCase):
def setUp(self):
self.red = Color.objects.create(name='red')
self.green = Color.objects.create(name='green')
self.black = Color.objects.create(name='black')
self.yellow = Color.objects.create(name='yellow')
# - only one store with the same name and subchain_id can exist in a single city
# - no two stores can share the same door_frame_color and door_color
store = {
'subchain_id': 1,
'city': 'Santa Barbara',
'name': 'Barbara style',
'opening_hours': '9-9 everyday',
'door_frame_color': self.red,
'door_color': self.black,
}
self.sb1 = ChainStore.objects.create(**store)
def test_version_unique(self):
# It should not be possible to create another store with the same name, city, and subchain_id
with self.assertRaises(IntegrityError):
sb2 = ChainStore.objects.create(
subchain_id = self.sb1.subchain_id,
city = self.sb1.city,
name = self.sb1.name,
door_frame_color = self.sb1.door_frame_color,
door_color = self.green
)
# It should not be possible to create another store with the same door and door_frame color
with self.assertRaises(IntegrityError):
sb3 = ChainStore.objects.create(
subchain_id = self.sb1.subchain_id,
city = self.sb1.city,
name = "Bearded Bob's style",
door_frame_color = self.sb1.door_frame_color,
door_color = self.sb1.door_color
)
# It should be possible to create objects as long as they follow the unique constraints, though:
sb4 = ChainStore.objects.create(
subchain_id = self.sb1.subchain_id,
city = self.sb1.city,
name = "Bearded Bob's style",
door_frame_color = self.sb1.door_frame_color,
door_color = self.green
)
sb5 = ChainStore.objects.create(
subchain_id = sb4.subchain_id + 1,
city = sb4.city,
name = sb4.name,
door_frame_color = sb4.door_frame_color,
door_color = self.yellow
)
# If a version is soft-deleted, it should be possible to create a new object with the
# value of that old version
sb4.delete()
sb6 = ChainStore.objects.create(
subchain_id = sb4.subchain_id,
city = sb4.city,
name = sb4.name,
door_frame_color = sb4.door_frame_color,
door_color = sb4.door_color
)
def test_identity_unique(self):
c = Color.objects.create(name='sky blue')
c.identity = self.green.identity
# It should not be possible to have two "current" objects with the same identity:
with self.assertRaises(IntegrityError):
c.save()
@skipUnless(AT_LEAST_17 and connection.vendor == 'postgresql', "Postgresql-specific test")
class PostgresqlUuidLikeIndexesTest(TestCase):
def test_no_like_indexes_on_uuid_columns(self):
# Django creates like indexes on char columns. In Django 1.7.x and below, there is no
# support for native uuid columns, so CleanerVersion uses a CharField to store the
# uuid values. For postgresql, Django creates special indexes for char fields so that
# like searches (e.g. WHERE foo like '%bar') are fast.
# Those indexes are not going to be used in our case, and extra indexes will slow down
# updates and inserts. So, they should have been removed by the post_migrate handler in
# versions_tests.apps.VersionsTestsConfig.ready.
self.assertEqual(0, len(get_uuid_like_indexes_on_table(ChainStore)))
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/subnet_association.py | 1 | 1285 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubnetAssociation(Model):
"""Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Subnet ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules:
list[~azure.mgmt.network.v2016_09_01.models.SecurityRule]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(self, security_rules=None):
super(SubnetAssociation, self).__init__()
self.id = None
self.security_rules = security_rules
| mit |
dkluffy/dkluff-code | code/opencv/multiscale-template-matching/imutils.py | 15 | 1498 | # Import the necessary packages
import numpy as np
import cv2
def translate(image, x, y):
# Define the translation matrix and perform the translation
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# Return the translated image
return shifted
def rotate(image, angle, center = None, scale = 1.0):
# Grab the dimensions of the image
(h, w) = image.shape[:2]
# If the center is None, initialize it as the center of
# the image
if center is None:
center = (w / 2, h / 2)
# Perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# Return the rotated image
return rotated
def resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized | apache-2.0 |
RethinkRobotics/intera_sdk | intera_interface/src/intera_dataflow/weakrefset.py | 1 | 6431 | # Copyright (C) 2010 Michael Foord
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# This software is licensed under the terms of the BSD license.
# http://www.voidspace.org.uk/python/license.shtml
# https://pypi.python.org/pypi/weakrefset
from _weakref import ref
__all__ = ['WeakSet']
__version__ = '1.0.0'
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return sum(x() is not None for x in self.data)
def __contains__(self, item):
return ref(item) in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
if isinstance(other, self.__class__):
self.data.update(other.data)
else:
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
# Helper functions for simple delegating methods.
def _apply(self, other, method):
if not isinstance(other, self.__class__):
other = self.__class__(other)
newdata = method(other.data)
newset = self.__class__()
newset.data = newdata
return newset
def difference(self, other):
return self._apply(other, self.data.difference)
__sub__ = difference
def difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self._apply(other, self.data.intersection)
__and__ = intersection
def intersection_update(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__lt__ = issubset
def __le__(self, other):
return self.data <= set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__gt__ = issuperset
def __ge__(self, other):
return self.data >= set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
return self._apply(other, self.data.symmetric_difference)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
return self
def union(self, other):
return self._apply(other, self.data.union)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
| apache-2.0 |
peter-jang/ansible | test/units/template/test_templar.py | 59 | 5373 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible import constants as C
from ansible.errors import *
from ansible.plugins import filter_loader, lookup_loader, module_loader
from ansible.plugins.strategy import SharedPluginLoaderObj
from ansible.template import Templar
from units.mock.loader import DictDataLoader
class TestTemplar(unittest.TestCase):
def setUp(self):
fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
shared_loader = SharedPluginLoaderObj()
variables = dict(
foo="bar",
bam="{{foo}}",
num=1,
var_true=True,
var_false=False,
var_dict=dict(a="b"),
bad_dict="{a='b'",
var_list=[1],
recursive="{{recursive}}",
)
self.templar = Templar(loader=fake_loader, variables=variables)
def tearDown(self):
pass
def test_templar_simple(self):
templar = self.templar
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
self.assertEqual(templar.template("foo", convert_bare=True), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
self.assertEqual(templar.template("{{var_true}}"), True)
self.assertEqual(templar.template("{{var_false}}"), False)
self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
self.assertEqual(templar.template("{{var_list}}"), [1])
self.assertEqual(templar.template(1, convert_bare=True), 1)
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
# test with fail_on_undefined=False
self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
# test set_available_variables()
templar.set_available_variables(variables=dict(foo="bam"))
self.assertEqual(templar.template("{{foo}}"), "bam")
# variables must be a dict() for set_available_variables()
self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam")
def test_templar_escape_backslashes(self):
# Rule of thumb: If escape backslashes is True you should end up with
# the same number of backslashes as when you started.
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
old_exts = C.DEFAULT_JINJA2_EXTENSIONS
try:
C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
finally:
C.DEFAULT_JINJA2_EXTENSIONS = old_exts
| gpl-3.0 |
MicBrain/GSoC_CernVM-FS | build/externals/build_googletest/test/gtest_env_var_test.py | 2408 | 3487 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/command/install.py | 53 | 26777 | """distutils.command.install
Implements the Distutils 'install' command."""
from distutils import log
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install.py 62788 2008-05-06 22:41:46Z christian.heimes $"
import sys, os, string
from types import *
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.util import get_platform
from distutils.errors import DistutilsOptionError
from site import USER_BASE
from site import USER_SITE
if sys.version < "2.2":
WINDOWS_SCHEME = {
'purelib': '$base',
'platlib': '$base',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
else:
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/lib/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/lib/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/include/python$py_version_short/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
'nt': WINDOWS_SCHEME,
'nt_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
'scripts': '$userbase/Scripts',
'data' : '$userbase',
},
'mac': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'mac_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/$py_version_short/include/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
'os2': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'os2_home': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/include/python$py_version_short/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install (Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
('user', None,
"install in user site-package '%s'" % USER_SITE),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
]
boolean_options = ['compile', 'force', 'skip-build', 'user']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
self.user = 0
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_userbase = USER_BASE
self.install_usersite = USER_SITE
self.compile = None
self.optimize = None
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options (self):
# This method (and its pliant slaves, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError, \
("must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError, \
"must supply either home or prefix/exec-prefix -- not both"
if self.user and (self.prefix or self.exec_prefix or self.home or
self.install_base or self.install_platbase):
raise DistutilsOptionError("can't combine user with with prefix/"
"exec_prefix/home or install_(plat)base")
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = (string.split(sys.version))[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
'userbase': self.install_userbase,
'usersite': self.install_usersite,
}
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print "config vars:"
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Create directories in the home dir:
if self.user:
self.create_home_path()
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.ext_modules: # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers',
'userbase', 'usersite')
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
# finalize_options ()
def dump_dirs (self, msg):
if DEBUG:
from distutils.fancy_getopt import longopt_xlate
print msg + ":"
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if opt_name in self.negative_opt:
opt_name = string.translate(self.negative_opt[opt_name],
longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = string.translate(opt_name, longopt_xlate)
val = getattr(self, opt_name)
print " %s: %s" % (opt_name, val)
def finalize_unix (self):
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError, \
("install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme("unix_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError, \
"must not supply exec-prefix without prefix"
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
self.select_scheme("unix_prefix")
# finalize_unix ()
def finalize_other (self): # Windows and Mac OS for now
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme(os.name + "_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError, \
"I don't know how to install stuff on '%s'" % os.name
# finalize_other ()
def select_scheme (self, name):
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs (self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs (self):
self._expand_attrs(['install_base',
'install_platbase',
'root'])
def expand_dirs (self):
self._expand_attrs(['install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',])
def convert_paths (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path (self):
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
if type(self.extra_path) is StringType:
self.extra_path = string.split(self.extra_path, ',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
(path_file, extra_dirs) = self.extra_path
else:
raise DistutilsOptionError, \
("'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
# handle_extra_path ()
def change_roots (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
def create_home_path(self):
"""Create directories under ~
"""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in self.config_vars.iteritems():
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0700)" % path)
os.makedirs(path, 0700)
# -- Command execution methods -------------------------------------
def run (self):
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# If we built for any other platform, we can't install.
build_plat = self.distribution.get_command_obj('build').plat_name
# check warn_dir - it is a clue that the 'install' is happening
# internally, and not to sys.path, so we don't check the platform
# matches what we are running.
if self.warn_dir and build_plat != get_platform():
raise DistutilsPlatformError("Can't install when "
"cross-compiling")
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in xrange(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
# run ()
def create_path_file (self):
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs (self):
# Assemble the outputs of all the sub-commands.
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs (self):
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib (self):
"""Return true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers (self):
return self.distribution.has_headers()
def has_scripts (self):
return self.distribution.has_scripts()
def has_data (self):
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
# class install
| apache-2.0 |
mdreid/dinkylink | libs/bs4/tests/test_lxml.py | 273 | 2965 | """Tests to ensure that the lxml tree builder generates good trees."""
import re
import warnings
try:
import lxml.etree
LXML_PRESENT = True
LXML_VERSION = lxml.etree.LXML_VERSION
except ImportError, e:
LXML_PRESENT = False
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf
from bs4.tests import test_htmlparser
from bs4.testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder()
def test_out_of_range_entity(self):
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@skipIf(
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
"Skipping doctype test for old version of lxml to avoid segfault.")
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
self.assertEqual(u"<b/>", unicode(soup.b))
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
def test_real_xhtml_document(self):
"""lxml strips the XML definition from an XHTML doc, which is fine."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b''),
markup.replace(b'\n', b'').replace(
b'<?xml version="1.0" encoding="utf-8"?>', b''))
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML()
| mit |
mjfwest/git-cc | status.py | 1 | 2270 | from common import *
from os.path import join, dirname
class Status:
def __init__(self, files):
self.setFile(files[0])
def setFile(self, file):
self.file = file
def cat(self):
blob = git_exec(['cat-file', 'blob', getBlob(self.id, self.file)], decode=False)
write(join(CC_DIR, self.file), blob)
def stageDirs(self, t):
dir = dirname(self.file)
dirs = []
while not exists(join(CC_DIR, dir)):
dirs.append(dir)
dir = dirname(dir)
self.dirs = dirs
t.stageDir(dir)
def commitDirs(self, t):
while len(self.dirs) > 0:
dir = self.dirs.pop();
if not exists(join(CC_DIR, dir)):
cc_exec(['mkelem', '-nc', '-eltype', 'directory', dir])
t.add(dir)
class Modify(Status):
def stage(self, t):
t.stage(self.file)
def commit(self, t):
self.cat()
class Add(Status):
def stage(self, t):
self.stageDirs(t)
def commit(self, t):
self.commitDirs(t)
self.cat()
cc_exec(['mkelem', '-nc', self.file])
t.add(self.file)
class Delete(Status):
def stage(self, t):
t.stageDir(dirname(self.file))
def commit(self, t):
# TODO Empty dirs?!?
cc_exec(['rm', self.file])
class Rename(Status):
def __init__(self, files):
self.old = files[0]
self.new = files[1]
self.setFile(self.new)
def stage(self, t):
t.stageDir(dirname(self.old))
t.stage(self.old)
self.stageDirs(t)
def commit(self, t):
self.commitDirs(t)
cc_exec(['mv', '-nc', self.old, self.new])
t.checkedout.remove(self.old)
t.add(self.new)
self.cat()
class SymLink(Status):
def __init__(self, files):
self.setFile(files[0])
id = files[1]
self.target = git_exec(['cat-file', 'blob', getBlob(id, self.file)], decode=False)
if exists(join(CC_DIR, self.file)):
self.rmfirst=True
else:
self.rmfirst=False
def stage(self, t):
self.stageDirs(t)
def commit(self, t):
if self.rmfirst:
cc_exec(['rm', self.file])
cc_exec(['ln', '-s', self.target, self.file])
| gpl-2.0 |
CXQERP/ODOOERP | addons/account/wizard/account_fiscalyear_close_state.py | 297 | 3130 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close_state(osv.osv_memory):
"""
Closes Account Fiscalyear
"""
_name = "account.fiscalyear.close.state"
_description = "Fiscalyear Close state"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to Close', required=True, help="Select a fiscal year to close"),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
journal_period_obj = self.pool.get('account.journal.period')
period_obj = self.pool.get('account.period')
fiscalyear_obj = self.pool.get('account.fiscalyear')
account_move_obj = self.pool.get('account.move')
for data in self.read(cr, uid, ids, context=context):
fy_id = data['fy_id'][0]
account_move_ids = account_move_obj.search(cr, uid, [('period_id.fiscalyear_id', '=', fy_id), ('state', '=', "draft")], context=context)
if account_move_ids:
raise osv.except_osv(_('Invalid Action!'), _('In order to close a fiscalyear, you must first post related journal entries.'))
cr.execute('UPDATE account_journal_period ' \
'SET state = %s ' \
'WHERE period_id IN (SELECT id FROM account_period \
WHERE fiscalyear_id = %s)',
('done', fy_id))
cr.execute('UPDATE account_period SET state = %s ' \
'WHERE fiscalyear_id = %s', ('done', fy_id))
cr.execute('UPDATE account_fiscalyear ' \
'SET state = %s WHERE id = %s', ('done', fy_id))
self.invalidate_cache(cr, uid, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
citrix-openstack-build/glance | glance/tests/stubs.py | 4 | 7172 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite"""
import os
try:
import sendfile
SENDFILE_SUPPORTED = True
except ImportError:
SENDFILE_SUPPORTED = False
import routes
import webob
from glance.api.middleware import context
from glance.api.v1 import router
import glance.common.client
from glance.registry.api import v1 as rserver
from glance.tests import utils
VERBOSE = False
DEBUG = False
class FakeRegistryConnection(object):
def __init__(self, registry=None):
self.registry = registry or rserver
def __call__(self, *args, **kwargs):
# NOTE(flaper87): This method takes
# __init__'s place in the chain.
return self
def connect(self):
return True
def close(self):
return True
def request(self, method, url, body=None, headers=None):
self.req = webob.Request.blank("/" + url.lstrip("/"))
self.req.method = method
if headers:
self.req.headers = headers
if body:
self.req.body = body
def getresponse(self):
mapper = routes.Mapper()
server = self.registry.API(mapper)
# NOTE(markwash): we need to pass through context auth information if
# we have it.
if 'X-Auth-Token' in self.req.headers:
api = utils.FakeAuthMiddleware(server)
else:
api = context.UnauthenticatedContextMiddleware(server)
webob_res = self.req.get_response(api)
return utils.FakeHTTPResponse(status=webob_res.status_int,
headers=webob_res.headers,
data=webob_res.body)
def stub_out_registry_and_store_server(stubs, base_dir, **kwargs):
"""
Mocks calls to 127.0.0.1 on 9191 and 9292 for testing so
that a real Glance server does not need to be up and
running
"""
class FakeSocket(object):
def __init__(self, *args, **kwargs):
pass
def fileno(self):
return 42
class FakeSendFile(object):
def __init__(self, req):
self.req = req
def sendfile(self, o, i, offset, nbytes):
os.lseek(i, offset, os.SEEK_SET)
prev_len = len(self.req.body)
self.req.body += os.read(i, nbytes)
return len(self.req.body) - prev_len
class FakeGlanceConnection(object):
def __init__(self, *args, **kwargs):
self.sock = FakeSocket()
self.stub_force_sendfile = kwargs.get('stub_force_sendfile',
SENDFILE_SUPPORTED)
def connect(self):
return True
def close(self):
return True
def _clean_url(self, url):
#TODO(bcwaldon): Fix the hack that strips off v1
return url.replace('/v1', '', 1) if url.startswith('/v1') else url
def putrequest(self, method, url):
self.req = webob.Request.blank(self._clean_url(url))
if self.stub_force_sendfile:
fake_sendfile = FakeSendFile(self.req)
stubs.Set(sendfile, 'sendfile', fake_sendfile.sendfile)
self.req.method = method
def putheader(self, key, value):
self.req.headers[key] = value
def endheaders(self):
hl = [i.lower() for i in self.req.headers.keys()]
assert not ('content-length' in hl and
'transfer-encoding' in hl), \
'Content-Length and Transfer-Encoding are mutually exclusive'
def send(self, data):
# send() is called during chunked-transfer encoding, and
# data is of the form %x\r\n%s\r\n. Strip off the %x and
# only write the actual data in tests.
self.req.body += data.split("\r\n")[1]
def request(self, method, url, body=None, headers=None):
self.req = webob.Request.blank(self._clean_url(url))
self.req.method = method
if headers:
self.req.headers = headers
if body:
self.req.body = body
def getresponse(self):
mapper = routes.Mapper()
api = context.UnauthenticatedContextMiddleware(router.API(mapper))
res = self.req.get_response(api)
# httplib.Response has a read() method...fake it out
def fake_reader():
return res.body
setattr(res, 'read', fake_reader)
return res
def fake_get_connection_type(client):
"""
Returns the proper connection type
"""
DEFAULT_REGISTRY_PORT = 9191
DEFAULT_API_PORT = 9292
if (client.port == DEFAULT_API_PORT and
client.host == '0.0.0.0'):
return FakeGlanceConnection
elif (client.port == DEFAULT_REGISTRY_PORT and
client.host == '0.0.0.0'):
rserver = kwargs.get("registry", None)
return FakeRegistryConnection(registry=rserver)
def fake_image_iter(self):
for i in self.source.app_iter:
yield i
def fake_sendable(self, body):
force = getattr(self, 'stub_force_sendfile', None)
if force is None:
return self._stub_orig_sendable(body)
else:
if force:
assert glance.common.client.SENDFILE_SUPPORTED
return force
stubs.Set(glance.common.client.BaseClient, 'get_connection_type',
fake_get_connection_type)
setattr(glance.common.client.BaseClient, '_stub_orig_sendable',
glance.common.client.BaseClient._sendable)
stubs.Set(glance.common.client.BaseClient, '_sendable',
fake_sendable)
def stub_out_registry_server(stubs, **kwargs):
"""
Mocks calls to 127.0.0.1 on 9191 for testing so
that a real Glance Registry server does not need to be up and
running
"""
def fake_get_connection_type(client):
"""
Returns the proper connection type
"""
DEFAULT_REGISTRY_PORT = 9191
if (client.port == DEFAULT_REGISTRY_PORT and
client.host == '0.0.0.0'):
rserver = kwargs.pop("registry", None)
return FakeRegistryConnection(registry=rserver)
def fake_image_iter(self):
for i in self.response.app_iter:
yield i
stubs.Set(glance.common.client.BaseClient, 'get_connection_type',
fake_get_connection_type)
| apache-2.0 |
mpalmi/clip | packages/scap-security-guide/scap-security-guide-0.1.25/shared/fixes/bash/templates/create_services_disabled.py | 4 | 1395 | #!/usr/bin/python
#
# create_services_disabled.py
# automatically generate fixes for disabled services
#
# NOTE: The file 'template_service_disabled' should be located in the same
# working directory as this script. The template contains the following tags
# that *must* be replaced successfully in order for the fixes to work.
#
# SERVICENAME - the name of the service that should be disabled
# PACKAGENAME - the name of the package that installs the service
#
import sys
import csv
def output_checkfile(serviceinfo):
# get the items out of the list
servicename, packagename = serviceinfo
with open("./template_service_disabled", 'r') as templatefile:
filestring = templatefile.read()
filestring = filestring.replace("SERVICENAME", servicename)
with open("./output/service_" + servicename +
"_disabled.sh", 'w+') as outputfile:
outputfile.write(filestring)
outputfile.close()
def main():
if len(sys.argv) < 2:
print ("Provide a CSV file containing lines of the format: " +
"servicename,packagename")
sys.exit(1)
with open(sys.argv[1], 'r') as csv_file:
# put the CSV line's items into a list
servicelines = csv.reader(csv_file)
for line in servicelines:
output_checkfile(line)
sys.exit(0)
if __name__ == "__main__":
main()
| apache-2.0 |
HassanAmr/bioconda-recipes | recipes/searchgui/3.2.11/searchgui.py | 38 | 3263 | #!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'SearchGUI-3.2.11.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
SearchGui updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
deevarvar/myLab | baidu_code/bcoreapi/ticket/old_pay_back.py | 1 | 1812 | #-*- coding=utf-8 -*-
'''
@description: 旧支付渲染接口测试用例。
@author: miliang<miliang@baidu.com>
'''
import sys
import os
import time
import urllib
import urllib2
import json
import MySQLdb
from settings import SERVER,MYSQL,ODP_MOVIE_PATH
from base import Ticket_Base
from old_pay_notice import Ticket_Old_Pay_Notice
class Ticket_Old_Pay_Back(Ticket_Old_Pay_Notice):
def __init__(self,third_from,third_order_id=None,cinema_index=1,num=1,seq_no=None,third_id=None,mode=0,device='pc'):
Ticket_Old_Pay_Notice.__init__(self,third_from,third_order_id,cinema_index,num,seq_no,third_id,mode,device)
self.req_url = 'http://' + SERVER['HOST'] + ':' + SERVER['PORT'] + '/ticket/payback/' + third_from + '/' + device
def doAssert(self):
assert self.page_dict['user_mobile']
assert self.page_dict['detail']['movie_name']
assert self.page_dict['detail']['phone']
assert self.page_dict['detail']['seat_info']
assert self.page_dict['detail']['total_price']
assert self.page_dict['detail']['status']
assert self.page_dict['detail']['seq_no']
assert self.page_dict['detail']['order_id'] == self.third_order_id
assert self.page_dict['detail']['payurl']
if __name__ == '__main__':
if len(sys.argv) == 2:
case = Ticket_Old_Pay_Back(sys.argv[1])
elif len(sys.argv) == 3:
case = Ticket_Old_Pay_Back(sys.argv[1],third_order_id=sys.argv[2])
elif len(sys.argv) == 4:
#case = Ticket_Old_Pay_Back(sys.argv[1],seq_no=sys.argv[2],third_id=sys.argv[3],mode=1)
case = Ticket_Old_Pay_Back(sys.argv[1],num=int(sys.argv[2]),cinema_index=int(sys.argv[3]))
else:
case = Ticket_Old_Pay_Back('maizuo')
case.execute()
| mit |
thinkopensolutions/odoo-brazil-banking | l10n_br_account_banking_payment_cnab/febraban/cnab_240/bancos/itau.py | 1 | 4289 | # coding: utf-8
# ###########################################################################
#
# Author: Luis Felipe Mileo
# Fernando Marcato Rodrigues
# Daniel Sadamo Hirayama
# Copyright 2015 KMEE - www.kmee.com.br
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from ..cnab_240 import Cnab240
import re
import string
class Itau240(Cnab240):
"""
"""
def __init__(self):
"""
:return:
"""
super(Cnab240, self).__init__()
from cnab240.bancos import itau
self.bank = itau
def _prepare_header(self):
"""
:param order:
:return:
"""
vals = super(Itau240, self)._prepare_header()
vals['cedente_dv_ag_cc'] = int(
vals['cedente_dv_ag_cc'])
vals['cedente_agencia_dv'] = int(
vals['cedente_agencia_dv'])
vals['cedente_nome'] = vals['cedente_nome'][:30]
return vals
def _prepare_segmento(self, line):
"""
:param line:
:return:
"""
vals = super(Itau240, self)._prepare_segmento(line)
ref = line.move_line_id.transaction_ref
carteira, nosso_numero, digito = self.nosso_numero(ref)
#======================================================================
# nº da agência: 1572
# nº da conta corrente, sem o DAC: 22211
# nº da subcarteira: 109 (Neste teste saiu 000, conforme já mencionado acima)
# nosso número: 00000008
# You multiply each char of the number composed with the fields above by the sequence of multipliers - 2 1 2 1 2 1 2 positioned from right to left.
# (agency+account+carteira+nossonumero) (15722221110900000008)
#
#======================================================================
reference = str(line.order_id.mode.bank_id.bra_number) + str(
line.order_id.mode.bank_id.acc_number) + str(self.order.mode.boleto_carteira) + str(ref)
vals['cedente_nome'] = line.order_id.company_id.legal_name[:30]
vals['sacado_nome'] = line.partner_id.legal_name[:30]
vals['carteira_numero'] = int(line.order_id.mode.boleto_carteira)
vals['nosso_numero'] = int(ref)
vals['nosso_numero_dv'] = int(self.nosso_numero_dv(reference))
vals['sacado_cidade'] = line.partner_id.l10n_br_city_id.name[:15]
vals['sacado_bairro'] = line.partner_id.district[:15]
vals['sacado_endereco'] = vals['sacado_endereco'][:40]
return vals
# Override cnab_240.nosso_numero. Diferentes números de dígitos entre
# CEF e Itau
def nosso_numero(self, format):
# should not return digit from this method
# ust use nosso_numero_dv top return digit
digito = format[-1:]
carteira = format[:3]
nosso_numero = re.sub(
'[%s]' % re.escape(string.punctuation), '', format[3:-1] or '')
return carteira, nosso_numero, digito
def nosso_numero_dv(self, format):
i = 1
total = 0
# multiply all digits by 1 and 2 consicutively starting:
# eg: 1st x 1 + 2nd x 2 + 3rd x 1 + 4th x 2 + ........
position = 1
for digit in format:
if int(position) % 2 == 0:
result = int(digit) * 2
else:
result = int(digit) * 1
total = total + sum([int(digit) for digit in str(result)])
position += 1
digit = total % 10
if digit != 0:
digit = 10 - digit
return digit
| agpl-3.0 |
teresinahc/strans-pyra | stranspyra/api.py | 1 | 3149 | # Copyright (c) 2016 Renato Alencar <renatoalencar.73@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import requests
import time
import settings
from .exceptions import APIServerError
token = ''
def date():
"""
Returns the current time formated with HTTP format.
@return: `str`
"""
return time.strftime('%a, %d %b %Y %H:%M:%S GMT')
def auth():
"""
Authenticates the user using the url, application key,
email and the password.
@return: a `dict` object with the keys `token` and `minutos`,
from json returned from the API.
"""
global token
endpoint = '/signin'
url = settings.URL
key = settings.API_KEY
res = requests.post(
url + endpoint,
headers={
'date': date(),
'x-api-key': key,
},
json={
'email': settings.EMAIL,
'password': settings.PASSWORD
},
**settings.REQUEST_OPTIONS
)
try:
res = res.json()
token = res['token']
except Exception:
pass
return res
def get(endpoint, **kwargs):
"""
Makes a GET request to the API, sending the `token` without
need to send it all the times.
@param endpoint: the endpoint URL (e.g.: '/linhas').
keyword args passed as URL params.
`settings.REQUEST_OPTIONS` are passed as kwargs to
`requests.get`.
@return: a json decoded object.
Raises `APIServerError` if it returns message with
'api.error'.
"""
global token
url = settings.URL
key = settings.API_KEY
res = requests.get(
url + endpoint,
headers={
'date': date(),
'x-api-key': key,
'x-auth-token': token,
},
params=kwargs,
**settings.REQUEST_OPTIONS
)
jres = res.json()
if isinstance(jres, dict) and \
jres.get('message', '').startswith('api.error'):
if jres['message'] == 'api.error.token.expired' and token is not None:
auth()
return get(endpoint, **kwargs)
raise APIServerError(jres['message'])
return jres
| mit |
oulan/oppia | core/counters.py | 30 | 3121 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for performance counters."""
__author__ = 'Sean Lip'
class PerfCounter(object):
"""Generic in-process numeric counter; not aggregated across instances."""
# TODO(sll): Add aggregation across instances.
def __init__(self, name, description):
if name in Registry._counters:
raise Exception('Counter %s already exists.' % name)
self._name = name
self._description = description
self._value = 0
Registry._counters[self.name] = self
def inc(self, increment=1):
"""Increments the counter value by a given increment."""
self._value += increment
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def value(self):
return self._value
class Registry(object):
"""Registry of all counters."""
_counters = {}
@classmethod
def get_all_counters(cls):
return cls._counters.values()
MEMCACHE_HIT = PerfCounter(
'memcache-hit',
'Number of times an object was found in memcache')
MEMCACHE_MISS = PerfCounter(
'memcache-miss',
'Number of times an object was not found in memcache')
MEMCACHE_SET_SUCCESS = PerfCounter(
'memcache-set-success',
'Number of times an object was successfully put in memcache')
MEMCACHE_SET_FAILURE = PerfCounter(
'memcache-set-failure',
'Number of times an object failed to be put in memcache')
MEMCACHE_DELETE_SUCCESS = PerfCounter(
'memcache-delete-success',
'Number of times an object was successfully deleted from memcache')
MEMCACHE_DELETE_MISSING = PerfCounter(
'memcache-delete-missing',
'Number of attempts to delete a non-existent object from memcache')
MEMCACHE_DELETE_FAILURE = PerfCounter(
'memcache-delete-failure',
'Number of times an object failed to be deleted from memcache')
HTML_RESPONSE_TIME_SECS = PerfCounter(
'html-response-time-secs',
'Total processing time for all HTML responses, in seconds')
HTML_RESPONSE_COUNT = PerfCounter(
'html-response-count',
'Number of times a HTML response was sent out')
JSON_RESPONSE_TIME_SECS = PerfCounter(
'json-response-time-secs',
'Total processing time for all JSON responses, in seconds')
JSON_RESPONSE_COUNT = PerfCounter(
'json-response-count',
'Number of times a JSON response was sent out')
EMAILS_SENT = PerfCounter(
'emails-sent',
'Number of times a call to send_mail() was made')
| apache-2.0 |
DavidAndreev/indico | indico/MaKaC/webinterface/rh/fileAccess.py | 2 | 1768 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from werkzeug.exceptions import NotFound
from MaKaC.review import Abstract
from MaKaC.webinterface.rh.conferenceBase import RHFileBase
from MaKaC.webinterface.rh.base import RHDisplayBaseProtected
from MaKaC.errors import NotFoundError
from MaKaC.conference import LocalFile
from indico.web.flask.util import send_file
class RHFileAccess(RHFileBase, RHDisplayBaseProtected):
def _checkParams( self, params ):
try:
RHFileBase._checkParams( self, params )
except:
raise NotFoundError("The file you tried to access does not exist.")
def _checkProtection( self ):
if isinstance(self._file.getOwner(), Abstract):
RHDisplayBaseProtected._checkProtection(self)
else:
# superseded by attachments
raise NotFound
def _process(self):
assert isinstance(self._file, LocalFile)
return send_file(self._file.getFileName(), self._file.getFilePath(), self._file.getFileType(),
self._file.getCreationDate())
| gpl-3.0 |
solidfire/solidfire-cli | element/cli/commands/cmd_async.py | 2 | 10449 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2016 NetApp, Inc. All Rights Reserved.
#
# DO NOT EDIT THIS CODE BY HAND! It has been generated with jsvcgen.
#
import click
from element.cli import utils as cli_utils
from element.cli import parser
from element.cli.cli import pass_context
from element import utils
import jsonpickle
import simplejson
from solidfire.models import *
from solidfire.custom.models import *
from uuid import UUID
from element import exceptions
from solidfire import common
from element.cli.cli import SolidFireOption, SolidFireCommand
class ProtectionSchemeVisibility(data_model.DataObject):
"""ProtectionSchemeVisibility
The public visibility of the protection scheme.
"""
enum_values = ("customer", "testOnly", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class RemoteClusterSnapshotStatus(data_model.DataObject):
"""RemoteClusterSnapshotStatus
Status of the remote snapshot on the target cluster as seen on the source cluster
"""
enum_values = ("Present", "Not Present", "Syncing", "Deleted", "Unknown", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProtectionSchemeCategory(data_model.DataObject):
"""ProtectionSchemeCategory
The category of the protection scheme.
"""
enum_values = ("helix", "erasureCoded", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProtectionScheme(data_model.DataObject):
"""ProtectionScheme
The method of protecting data on the cluster
"""
enum_values = ("singleHelix", "doubleHelix", "tripleHelix", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class AuthConfigType(data_model.DataObject):
"""AuthConfigType
This type indicates the configuration data which will be accessed or modified by the element auth container.
"""
enum_values = ("mNode", "element", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class DriveEncryptionCapabilityType(data_model.DataObject):
"""DriveEncryptionCapabilityType
This specifies a drive's encryption capability.
"""
enum_values = ("none", "sed", "fips", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class FipsDrivesStatusType(data_model.DataObject):
"""FipsDrivesStatusType
This specifies a node's FIPS 140-2 compliance status.
"""
enum_values = ("None", "Partial", "Ready", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class AuthMethod(data_model.DataObject):
"""AuthMethod
This type qualifies a ClusterAdmin with its authentication method.
"""
enum_values = ("Cluster", "Ldap", "Idp", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class MaintenanceMode(data_model.DataObject):
"""MaintenanceMode
Which mode a node is in when it is having maintenenace peformed.
"""
enum_values = ("Disabled", "FailedToRecover", "Unexpected", "RecoveringFromMaintenance", "PreparingForMaintenance", "ReadyForMaintenance", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProposedNodeErrorCode(data_model.DataObject):
"""ProposedNodeErrorCode
This specifies error code for a proposed node addition.
"""
enum_values = ("nodesNoCapacity", "nodesTooLarge", "nodesConnectFailed", "nodesQueryFailed", "nodesClusterMember", "nonFipsNodeCapable", "nonFipsDrivesCapable", "nodeTypeUnsupported", "nodeTypesHeterogeneous", "nodeTypeInvalid", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class VolumeAccess(data_model.DataObject):
"""VolumeAccess
Describes host access for a volume.
"""
enum_values = ("locked", "readOnly", "readWrite", "replicationTarget", "snapMirrorTarget", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProtectionDomainType(data_model.DataObject):
"""ProtectionDomainType
A Protection Domain is a set of one or more components whose simultaneous failure is protected
from causing data unavailability or loss. This specifies one of the types of Protection Domains
recognized by this cluster.
"""
enum_values = ("node", "chassis", "custom", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
@click.group()
@pass_context
def cli(ctx):
"""getresult listresults """
@cli.command('getresult', short_help="""You can use GetAsyncResult to retrieve the result of asynchronous method calls. Some method calls require some time to run, and might not be finished when the system sends the initial response. To obtain the status or result of the method call, use GetAsyncResult to poll the asyncHandle value returned by the method. GetAsyncResult returns the overall status of the operation (in progress, completed, or error) in a standard fashion, but the actual data returned for the operation depends on the original method call and the return data is documented with each method. """, cls=SolidFireCommand)
@click.option('--asynchandle',
type=int,
required=True,
prompt=True,
help="""A value that was returned from the original asynchronous method call. """)
@click.option('--keepresult',
type=bool,
required=False,
help="""If true, GetAsyncResult does not remove the asynchronous result upon returning it, enabling future queries to that asyncHandle. """)
@pass_context
def getresult(ctx,
# Mandatory main parameter
asynchandle,
# Optional main parameter
keepresult = None):
"""You can use GetAsyncResult to retrieve the result of asynchronous method calls. Some method calls require some time to run, and"""
"""might not be finished when the system sends the initial response. To obtain the status or result of the method call, use"""
"""GetAsyncResult to poll the asyncHandle value returned by the method."""
"""GetAsyncResult returns the overall status of the operation (in progress, completed, or error) in a standard fashion, but the actual"""
"""data returned for the operation depends on the original method call and the return data is documented with each method."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""asynchandle = """ + str(asynchandle)+";" + """keepresult = """+str(keepresult)+""";"""+"")
try:
_dict = ctx.element.get_async_result(async_handle=asynchandle, keep_result=keepresult)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_dict), indent=4))
return
else:
cli_utils.print_result(_dict, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
@cli.command('listresults', short_help="""You can use ListAsyncResults to list the results of all currently running and completed asynchronous methods on the system. Querying asynchronous results with ListAsyncResults does not cause completed asyncHandles to expire; you can use GetAsyncResult to query any of the asyncHandles returned by ListAsyncResults. """, cls=SolidFireCommand)
@click.option('--asyncresulttypes',
type=str,
required=False,
help="""An optional list of types of results. You can use this list to restrict the results to only these types of operations. Possible values are: BulkVolume: Copy operations between volumes, such as backups or restores. Clone: Volume cloning operations. DriveRemoval: Operations involving the system copying data from a drive in preparation to remove it from the cluster. RtfiPendingNode: Operations involving the system installing compatible software on a node before adding it to the cluster """)
@pass_context
def listresults(ctx,
# Optional main parameter
asyncresulttypes = None):
"""You can use ListAsyncResults to list the results of all currently running and completed asynchronous methods on the system."""
"""Querying asynchronous results with ListAsyncResults does not cause completed asyncHandles to expire; you can use GetAsyncResult"""
"""to query any of the asyncHandles returned by ListAsyncResults."""
cli_utils.establish_connection(ctx)
asyncresulttypes = parser.parse_array(asyncresulttypes)
ctx.logger.info(""": """"""asyncresulttypes = """+str(asyncresulttypes)+""";"""+"")
try:
_ListAsyncResultsResult = ctx.element.list_async_results(async_result_types=asyncresulttypes)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_ListAsyncResultsResult), indent=4))
return
else:
cli_utils.print_result(_ListAsyncResultsResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
| apache-2.0 |
broadinstitute/PyGithub | github/tests/CommitComment.py | 39 | 3088 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import datetime
class CommitComment(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.comment = self.g.get_user().get_repo("PyGithub").get_comment(1361949)
def testAttributes(self):
self.assertEqual(self.comment.body, "Comment created by PyGithub")
self.assertEqual(self.comment.commit_id, "6945921c529be14c3a8f566dd1e483674516d46d")
self.assertEqual(self.comment.created_at, datetime.datetime(2012, 5, 22, 18, 40, 18))
self.assertEqual(self.comment.html_url, "https://github.com/jacquev6/PyGithub/commit/6945921c529be14c3a8f566dd1e483674516d46d#commitcomment-1361949")
self.assertEqual(self.comment.id, 1361949)
self.assertEqual(self.comment.line, None)
self.assertEqual(self.comment.path, None)
self.assertEqual(self.comment.position, None)
self.assertEqual(self.comment.updated_at, datetime.datetime(2012, 5, 22, 18, 40, 18))
self.assertEqual(self.comment.url, "https://api.github.com/repos/jacquev6/PyGithub/comments/1361949")
self.assertEqual(self.comment.user.login, "jacquev6")
def testEdit(self):
self.comment.edit("Comment edited by PyGithub")
def testDelete(self):
self.comment.delete()
| gpl-3.0 |
liberorbis/libernext | env/lib/python2.7/site-packages/anyjson/__init__.py | 62 | 5277 | """Wraps the best available JSON implementation available in a common
interface"""
import sys
VERSION = (0, 3, 3)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Rune Halvorsen"
__contact__ = "runefh@gmail.com"
__homepage__ = "http://bitbucket.org/runeh/anyjson/"
__docformat__ = "restructuredtext"
# -eof meta-
#: The json implementation object. This is probably not useful to you,
#: except to get the name of the implementation in use. The name is
#: available through ``implementation.name``.
implementation = None
# json.loads does not support buffer() objects,
# so we load() and StringIO instead, and it won't copy.
if sys.version_info[0] == 3:
from io import StringIO
else:
try:
from cStringIO import StringIO # noqa
except ImportError:
from StringIO import StringIO # noqa
#: List of known json modules, and the names of their loads/dumps
#: methods, as well as the exceptions they throw. Exception can be either
#: an exception class or a string.
_modules = [("yajl", "dumps", TypeError, "loads", ValueError, "load"),
("jsonlib2", "write", "WriteError", "read", "ReadError", None),
("jsonlib", "write", "WriteError", "read", "ReadError", None),
("simplejson", "dumps", TypeError, "loads", ValueError, "load"),
("json", "dumps", TypeError, "loads", ValueError, "load"),
("django.utils.simplejson", "dumps", TypeError, "loads", ValueError, "load"),
("cjson", "encode", "EncodeError", "decode", "DecodeError", None)
]
_fields = ("modname", "encoder", "encerror",
"decoder", "decerror", "filedecoder")
class _JsonImplementation(object):
"""Incapsulates a JSON implementation"""
def __init__(self, modspec):
modinfo = dict(zip(_fields, modspec))
if modinfo["modname"] == "cjson":
import warnings
warnings.warn("cjson is deprecated! See http://pypi.python.org/pypi/python-cjson/1.0.5", DeprecationWarning)
# No try block. We want importerror to end up at caller
module = self._attempt_load(modinfo["modname"])
self.implementation = modinfo["modname"]
self._encode = getattr(module, modinfo["encoder"])
self._decode = getattr(module, modinfo["decoder"])
fdec = modinfo["filedecoder"]
self._filedecode = fdec and getattr(module, fdec)
self._encode_error = modinfo["encerror"]
self._decode_error = modinfo["decerror"]
if isinstance(modinfo["encerror"], basestring):
self._encode_error = getattr(module, modinfo["encerror"])
if isinstance(modinfo["decerror"], basestring):
self._decode_error = getattr(module, modinfo["decerror"])
self.name = modinfo["modname"]
def __repr__(self):
return "<_JsonImplementation instance using %s>" % self.name
def _attempt_load(self, modname):
"""Attempt to load module name modname, returning it on success,
throwing ImportError if module couldn't be imported"""
__import__(modname)
return sys.modules[modname]
def dumps(self, data):
"""Serialize the datastructure to json. Returns a string. Raises
TypeError if the object could not be serialized."""
try:
return self._encode(data)
except self._encode_error, exc:
raise TypeError, TypeError(*exc.args), sys.exc_info()[2]
serialize = dumps
def loads(self, s):
"""deserialize the string to python data types. Raises
ValueError if the string could not be parsed."""
# uses StringIO to support buffer objects.
try:
if self._filedecode and not isinstance(s, basestring):
return self._filedecode(StringIO(s))
return self._decode(s)
except self._decode_error, exc:
raise ValueError, ValueError(*exc.args), sys.exc_info()[2]
deserialize = loads
def force_implementation(modname):
"""Forces anyjson to use a specific json module if it's available"""
global implementation
for name, spec in [(e[0], e) for e in _modules]:
if name == modname:
implementation = _JsonImplementation(spec)
return
raise ImportError("No module named: %s" % modname)
if __name__ == "__main__":
# If run as a script, we do nothing but print an error message.
# We do NOT try to load a compatible module because that may throw an
# exception, which renders the package uninstallable with easy_install
# (It trys to execfile the script when installing, to make sure it works)
print "Running anyjson as a stand alone script is not supported"
sys.exit(1)
else:
for modspec in _modules:
try:
implementation = _JsonImplementation(modspec)
break
except ImportError:
pass
else:
raise ImportError("No supported JSON module found")
def loads(value):
"""Serialize the object to JSON."""
return implementation.loads(value)
deserialize = loads # compat
def dumps(value):
"""Deserialize JSON-encoded object to a Python object."""
return implementation.dumps(value)
serialize = dumps
| gpl-2.0 |
evensonbryan/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/words/test/test_jabbersaslmechanisms.py | 18 | 3077 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.sasl_mechanisms}.
"""
from twisted.trial import unittest
from twisted.words.protocols.jabber import sasl_mechanisms
class PlainTest(unittest.TestCase):
def test_getInitialResponse(self):
"""
Test the initial response.
"""
m = sasl_mechanisms.Plain(None, 'test', 'secret')
self.assertEqual(m.getInitialResponse(), '\x00test\x00secret')
class AnonymousTest(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.jabber.sasl_mechanisms.Anonymous}.
"""
def test_getInitialResponse(self):
"""
Test the initial response to be empty.
"""
m = sasl_mechanisms.Anonymous()
self.assertEqual(m.getInitialResponse(), None)
class DigestMD5Test(unittest.TestCase):
def setUp(self):
self.mechanism = sasl_mechanisms.DigestMD5('xmpp', 'example.org', None,
'test', 'secret')
def test_getInitialResponse(self):
"""
Test that no initial response is generated.
"""
self.assertIdentical(self.mechanism.getInitialResponse(), None)
def test_getResponse(self):
"""
Partially test challenge response.
Does not actually test the response-value, yet.
"""
challenge = 'realm="localhost",nonce="1234",qop="auth",charset=utf-8,algorithm=md5-sess'
directives = self.mechanism._parse(self.mechanism.getResponse(challenge))
self.assertEqual(directives['username'], 'test')
self.assertEqual(directives['nonce'], '1234')
self.assertEqual(directives['nc'], '00000001')
self.assertEqual(directives['qop'], ['auth'])
self.assertEqual(directives['charset'], 'utf-8')
self.assertEqual(directives['digest-uri'], 'xmpp/example.org')
self.assertEqual(directives['realm'], 'localhost')
def test_getResponseNoRealm(self):
"""
Test that we accept challenges without realm.
The realm should default to the host part of the JID.
"""
challenge = 'nonce="1234",qop="auth",charset=utf-8,algorithm=md5-sess'
directives = self.mechanism._parse(self.mechanism.getResponse(challenge))
self.assertEqual(directives['realm'], 'example.org')
def test__parse(self):
"""
Test challenge decoding.
Specifically, check for multiple values for the C{qop} and C{cipher}
directives.
"""
challenge = 'nonce="1234",qop="auth,auth-conf",charset=utf-8,' \
'algorithm=md5-sess,cipher="des,3des"'
directives = self.mechanism._parse(challenge)
self.assertEqual('1234', directives['nonce'])
self.assertEqual('utf-8', directives['charset'])
self.assertIn('auth', directives['qop'])
self.assertIn('auth-conf', directives['qop'])
self.assertIn('des', directives['cipher'])
self.assertIn('3des', directives['cipher'])
| gpl-2.0 |
epyatopal/geocoder-1 | geocoder/google.py | 2 | 7799 | #!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import ratelim
import requests
from geocoder.base import Base
class Google(Base):
"""
Google Geocoding API
====================
Geocoding is the process of converting addresses into geographic
coordinates (like latitude 37.423021 and longitude -122.083739),
which you can use to place markers or position the map.
API Reference
-------------
https://developers.google.com/maps/documentation/geocoding
Parameters
----------
:param location: Your search location you want geocoded.
:param method: (default=geocode) Use the following:
> geocode
> reverse
> batch
> timezone
> elevation
:param key: Your Google developers free key.
:param language: 2-letter code of preferred language of returned address elements.
:param client: Google for Work client ID. Use with client_secret. Cannot use with key parameter
:param client_secret: Google for Work client secret. Use with client.
"""
provider = 'google'
method = 'geocode'
def __init__(self, location, **kwargs):
self.url = 'https://maps.googleapis.com/maps/api/geocode/json'
self.location = location
self.params = {
'address': location,
'key': kwargs.get('key', ''),
'language': kwargs.get('language', ''),
'client': kwargs.get('client', '')
}
self.client_secret = kwargs.get('client_secret', '')
# turn non-empty params into sorted list in order to maintain signature validity.
# Requests will honor the order.
self.params = sorted([(k.encode('utf8'), v.encode('utf8')) for (k, v) in self.params.items() if v])
# the signature parameter needs to come in the end of the url
if self.client_secret:
self.params.append(self._sign_url(self.url, self.params, self.client_secret))
self._initialize(**kwargs)
def _sign_url(self, base_url=None, params=None, client_secret=None):
""" Sign a request URL with a Crypto Key.
Usage:
from urlsigner import sign_url
signed_url = sign_url(base_url=my_url,
params=url_params,
client_secret=CLIENT_SECRET)
Args:
base_url - The trunk of the URL to sign. E.g. https://maps.googleapis.com/maps/api/geocode/json
params - List of tuples of URL parameters INCLUDING YOUR CLIENT ID ('client','gme-...')
client_secret - Your Crypto Key from Google for Work
Returns:
The signature as a dictionary #signed request URL
"""
import hashlib
import urllib
import hmac
import base64
import urlparse
# Return if any parameters aren't given
if not base_url or not client_secret or not dict(params)['client']:
return None
# assuming parameters will be submitted to Requests in identical order!
url = urlparse.urlparse(base_url + "?" + urllib.urlencode(params))
# We only need to sign the path+query part of the string
url_to_sign = url.path + "?" + url.query
# Decode the private key into its binary format
# We need to decode the URL-encoded private key
decoded_key = base64.urlsafe_b64decode(client_secret)
# Create a signature using the private key and the URL-encoded
# string using HMAC SHA1. This signature will be binary.
signature = hmac.new(decoded_key, url_to_sign, hashlib.sha1)
# Encode the binary signature into base64 for use within a URL
encoded_signature = base64.urlsafe_b64encode(signature.digest())
# Return signature as a tuple (to be appended as a param to url)
return ("signature", encoded_signature)
@staticmethod
@ratelim.greedy(2500, 60 * 60 * 24)
@ratelim.greedy(5, 1)
# @ratelim.greedy(100000, 60 * 60 * 24) # Google for Work daily limit
# @ratelim.greedy(10, 1) # Google for Work limit per second
def rate_limited_get(*args, **kwargs):
return requests.get(*args, **kwargs)
def _catch_errors(self):
status = self.parse.get('status')
if not status == 'OK':
self.error = status
def _exceptions(self):
# Build intial Tree with results
if self.parse['results']:
self._build_tree(self.parse.get('results')[0])
# Build Geometry
self._build_tree(self.parse.get('geometry'))
# Parse address components with short & long names
for item in self.parse['address_components']:
for category in item['types']:
self.parse[category]['long_name'] = self._encode(item['long_name'])
self.parse[category]['short_name'] = self._encode(item['short_name'])
@property
def lat(self):
return self.parse['location'].get('lat')
@property
def lng(self):
return self.parse['location'].get('lng')
@property
def quality(self):
quality = self.parse.get('types')
if quality:
return quality[0]
@property
def accuracy(self):
return self.parse.get('location_type')
@property
def bbox(self):
south = self.parse['southwest'].get('lat')
west = self.parse['southwest'].get('lng')
north = self.parse['northeast'].get('lat')
east = self.parse['northeast'].get('lng')
return self._get_bbox(south, west, north, east)
@property
def address(self):
return self.parse.get('formatted_address')
@property
def postal(self):
return self.parse['postal_code'].get('short_name')
@property
def subpremise(self):
return self.parse['subpremise'].get('short_name')
@property
def housenumber(self):
return self.parse['street_number'].get('short_name')
@property
def street(self):
return self.parse['route'].get('short_name')
@property
def street_long(self):
return self.parse['route'].get('long_name')
@property
def road_long(self):
return self.street_long
@property
def neighborhood(self):
return self.parse['neighborhood'].get('short_name')
@property
def sublocality(self):
return self.parse['sublocality'].get('short_name')
@property
def city(self):
city = self.parse['locality'].get('short_name')
postal_town = self.postal_town
if city:
return city
else:
return postal_town
@property
def city_long(self):
city_long = self.parse['locality'].get('long_name')
postal_town_long = self.postal_town_long
if city_long:
return city_long
else:
return postal_town_long
@property
def postal_town(self):
return self.parse['postal_town'].get('short_name')
@property
def postal_town_long(self):
return self.parse['postal_town'].get('long_name')
@property
def county(self):
return self.parse['administrative_area_level_2'].get('short_name')
@property
def state(self):
return self.parse['administrative_area_level_1'].get('short_name')
@property
def state_long(self):
return self.parse['administrative_area_level_1'].get('long_name')
@property
def province_long(self):
return self.state_long
@property
def country(self):
return self.parse['country'].get('short_name')
@property
def country_long(self):
return self.parse['country'].get('long_name')
if __name__ == '__main__':
g = Google('11 Wall Street, New York')
g.debug()
| mit |
phalax4/CarnotKE | jyhton/lib-python/2.7/lib2to3/fixes/fix_filter.py | 326 | 2107 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name(u"_f"),
Name(u"_f"),
results["seq"].clone(),
Name(u"_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
| apache-2.0 |
maurerpe/FreeCAD | src/Mod/Spreadsheet/App/Spreadsheet_legacy.py | 25 | 43880 | #***************************************************************************
#* *
#* Copyright (c) 2013 - Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
"""This is the deprecated spreadsheet module. It is not used anymore
in FreeCAD, but is still there for archiving purposes."""
import re, math, FreeCAD, FreeCADGui
from PySide import QtCore,QtGui
DEBUG = True # set to True to show debug messages
if open.__module__ == '__builtin__':
pyopen = open # because we'll redefine open below
class MathParser:
"A math expression parser"
# code adapted from http://www.nerdparadise.com/tech/python/parsemath/
def __init__(self, string, vars={}):
self.string = string
self.index = 0
self.vars = {
'pi' : math.pi,
'e' : math.e
}
for var in vars.keys():
if self.vars.get(var) != None:
raise RuntimeError("Cannot redefine the value of " + var)
self.vars[var] = vars[var]
def getValue(self):
value = self.parseExpression()
self.skipWhitespace()
if self.hasNext():
raise SyntaxError(
"Unexpected character found: '" +
self.peek() +
"' at index " +
str(self.index))
return value
def peek(self):
return self.string[self.index:self.index + 1]
def hasNext(self):
return self.index < len(self.string)
def skipWhitespace(self):
while self.hasNext():
if self.peek() in ' \t\n\r':
self.index += 1
else:
return
def parseExpression(self):
return self.parseAddition()
def parseAddition(self):
values = [self.parseMultiplication()]
while True:
self.skipWhitespace()
char = self.peek()
if char == '+':
self.index += 1
values.append(self.parseMultiplication())
elif char == '-':
self.index += 1
values.append(-1 * self.parseMultiplication())
else:
break
return sum(values)
def parseMultiplication(self):
values = [self.parseParenthesis()]
while True:
self.skipWhitespace()
char = self.peek()
if char == '*':
self.index += 1
values.append(self.parseParenthesis())
elif char == '/':
div_index = self.index
self.index += 1
denominator = self.parseParenthesis()
if denominator == 0:
raise ZeroDivisionError(
"Division by 0 kills baby whales (occured at index " +
str(div_index) +
")")
values.append(1.0 / denominator)
else:
break
value = 1.0
for factor in values:
value *= factor
return value
def parseParenthesis(self):
self.skipWhitespace()
char = self.peek()
if char == '(':
self.index += 1
value = self.parseExpression()
self.skipWhitespace()
if self.peek() != ')':
raise SyntaxError(
"No closing parenthesis found at character "
+ str(self.index))
self.index += 1
return value
else:
return self.parseNegative()
def parseNegative(self):
self.skipWhitespace()
char = self.peek()
if char == '-':
self.index += 1
return -1 * self.parseParenthesis()
else:
return self.parseValue()
def parseValue(self):
self.skipWhitespace()
char = self.peek()
if char in '0123456789.':
return self.parseNumber()
else:
return self.parseVariable()
def parseVariable(self):
self.skipWhitespace()
var = ''
while self.hasNext():
char = self.peek()
if char.lower() in '_abcdefghijklmnopqrstuvwxyz0123456789':
var += char
self.index += 1
else:
break
value = self.vars.get(var, None)
if value == None:
raise ValueError(
"Unrecognized variable: '" +
var +
"'")
return float(value)
def parseNumber(self):
self.skipWhitespace()
strValue = ''
decimal_found = False
char = ''
while self.hasNext():
char = self.peek()
if char == '.':
if decimal_found:
raise SyntaxError(
"Found an extra period in a number at character " +
str(self.index) +
". Are you European?")
decimal_found = True
strValue += '.'
elif char in '0123456789':
strValue += char
else:
break
self.index += 1
if len(strValue) == 0:
if char == '':
raise SyntaxError("Unexpected end found")
else:
raise SyntaxError(
"I was expecting to find a number at character " +
str(self.index) +
" but instead I found a '" +
char +
"'. What's up with that?")
return float(strValue)
class Spreadsheet:
"""An object representing a spreadsheet. Can be used as a
FreeCAD object or as a standalone python object.
Cells of the spreadsheet can be got/set as arguments, as:
myspreadsheet = Spreadsheet()
myspreadsheet.a1 = 54
print(myspreadsheet.a1)
myspreadsheet.a2 = "My text"
myspreadsheet.b1 = "=a1*3"
print(myspreadsheet.b1)
The cell names are case-insensitive (a1 = A1)
"""
def __init__(self,obj=None):
if obj:
obj.Proxy = self
obj.addProperty("App::PropertyLinkList","Controllers","Base","Cell controllers of this object")
self.Object = obj.Name
self._cells = {} # this stores cell contents
self._relations = {} # this stores relations - currently not used
self.cols = [] # this stores filled columns
self.rows = [] # this stores filed rows
self.Type = "Spreadsheet"
def __repr__(self):
return "Spreadsheet object containing " + str(len(self._cells)) + " cells"
def __setattr__(self, key, value):
if self.isKey(key):
key = key.lower()
if DEBUG: print "Setting key ",key," to value ",value
if (value == "") or (value == None):
# remove cell
if key in self._cells.keys():
del self._cells[key]
else:
# add cell
self._cells[key] = value
if value:
if self.isFunction(value):
self._updateDependencies(key,value)
c,r = self.splitKey(key)
if not c in self.cols:
self.cols.append(c)
self.cols.sort()
if not r in self.rows:
self.rows.append(r)
self.rows.sort()
self._updateControllers()
else:
self.__dict__.__setitem__(key,value)
def __getattr__(self, key):
if key.lower() in self._cells:
key = key.lower()
if self.isFunction(self._cells[key]):
try:
e = self.evaluate(key)
except:
print "Spreadsheet: Error evaluating formula"
return None
else:
return e
else:
return self._cells[key]
else:
return self.__dict__.__getitem__(key)
def __setitem__(self, key, value):
__setattr__(self, key, value)
def __getitem__(self, key):
return __getattr__(self, key)
def __getstate__(self):
self._cells["Type"] = self.Type
if hasattr(self,"Object"):
self._cells["Object"] = self.Object
return self._cells
def __setstate__(self,state):
if state:
self._cells = state
# extracting Type
if "Type" in self._cells.keys():
self.Type = self._cells["Type"]
del self._cells["Type"]
if "Object" in self._cells.keys():
self.Object = self._cells["Object"]
del self._cells["Object"]
# updating relation tables
self.rows = []
self.cols = []
self._relations = {}
for key in self._cells.keys():
c,r = self.splitKey(key)
if not r in self.rows:
self.rows.append(r)
self.rows.sort()
if not c in self.cols:
self.cols.append(c)
self.cols.sort()
if self.isFunction(key):
self._updateDependencies(key)
def _updateDependencies(self,key,value=None):
"search for ancestors in the value and updates the table"
ancestors = []
if not value:
value = self._cells[key]
for v in re.findall(r"[\w']+",value):
if self.isKey(v):
ancestors.append(v)
for a in ancestors:
if a in self._relations:
if not key in self._relations[a]:
self._relations[a].append(key)
else:
self._relations[a] = [key]
def _updateControllers(self):
"triggers the property controllers"
if hasattr(self,"Object"):
obj = FreeCAD.ActiveDocument.getObject(self.Object)
if obj:
import Draft
if Draft.getType(obj) == "Spreadsheet":
if hasattr(obj,"Controllers"):
for co in obj.Controllers:
if Draft.getType(co) == "SpreadsheetPropertyController":
co.Proxy.execute(co)
def execute(self,obj):
pass
def isFunction(self,key):
"isFunction(cell): returns True if the given cell or value is a function"
if str(key).lower() in self._cells:
key = key.lower()
if str(self._cells[key])[0] == "=":
return True
elif str(key)[0] == "=":
return True
else:
return False
def isNumeric(self,key):
"isNumeric(cell): returns True if the given cell returns a number"
key = key.lower()
if self.isFunction(key):
res = self.evaluate(key)
else:
res = self._cells[key]
if isinstance(res,float) or isinstance(res,int):
return True
else:
return False
def isKey(self,value):
"isKey(val): returns True if the given value is a valid cell number"
allowMoreThanOneLetter = False
al = False
nu = False
for v in value:
if not v.isalnum():
return False
elif not al:
if v.isalpha():
al = True
else:
return False
else:
if not nu:
# forbidden to set items at row 0
if v == "0":
return False
if v.isalpha():
if not allowMoreThanOneLetter:
return False
elif nu:
return False
elif v.isdigit():
nu = True
if not nu:
return False
return True
def splitKey(self,key):
"splitKey(cell): splits a key between column and row"
c = ''
r = ''
for ch in key:
if ch.isalpha():
c += ch
else:
r += ch
return c,r
def getFunction(self,key):
"getFunction(cell): returns the function contained in the given cell, instead of the value"
key = key.lower()
if key in self._cells:
return self._cells[key]
else:
return None
def getSize(self):
"getSize(): returns a tuple with number of columns and rows of this spreadsheet"
return (len(self.columns),len(self.rows))
def getCells(self,index):
"getCells(index): returns the cells from the given column of row number"
cells = {}
for k in self._cells.keys():
c,r = self.splitKey(k)
if index in [c,r]:
cells[k] = self._cells[k]
return cells
def evaluate(self,key):
"evaluate(key): evaluates the given formula"
key = key.lower()
elts = re.split(r'(\W+)',self._cells[key][1:])
result = ""
for e in elts:
if self.isKey(e):
if self.isFunction(e):
if self.isNumeric(e):
result += str(self.evaluate(e))
else:
print "Spreadsheet: Error evaluating formula"
return
elif self.isNumeric(e):
result += str(self._cells[e.lower()])
else:
result += e
if DEBUG: print "Evaluating ",result
try:
p = MathParser(result)
result = p.getValue()
except Exception as (ex):
raise #
#msg = ex.message
#raise Exception(msg) #would discard the type
return result
def recompute(self,obj):
"Fills the controlled cells and properties"
if obj:
if hasattr(obj,"Controllers"):
import Draft
for co in obj.Controllers:
if Draft.getType(co) == "SpreadsheetController":
co.Proxy.setCells(co,obj)
elif Draft.getType(co) == "SpreadsheetPropertyController":
co.Proxy.compute(co)
def getControlledCells(self,obj):
"returns a list of cells managed by controllers"
cells = []
if hasattr(obj,"Controllers"):
import Draft
for co in obj.Controllers:
if Draft.getType(co) == "SpreadsheetController":
cells.extend(co.Proxy.getCells(co,obj))
return cells
def getControllingCells(self,obj):
"returns a list of controlling cells managed by controllers"
cells = []
if hasattr(obj,"Controllers"):
import Draft
for co in obj.Controllers:
if Draft.getType(co) == "SpreadsheetPropertyController":
if co.Cell:
cells.append(co.Cell.lower())
return cells
class ViewProviderSpreadsheet(object):
def __init__(self, vobj):
vobj.Proxy = self
def getIcon(self):
import Spreadsheet_rc
return ":/icons/Spreadsheet.svg"
def attach(self,vobj):
self.Object = vobj.Object
def setEdit(self,vobj,mode=0):
if hasattr(self,"editor"):
pass
else:
self.editor = SpreadsheetView(vobj.Object)
addSpreadsheetView(self.editor)
return True
def unsetEdit(self,vobj,mode=0):
return False
def doubleClicked(self,vobj):
self.setEdit(vobj)
def claimChildren(self):
if hasattr(self,"Object"):
if hasattr(self.Object,"Controllers"):
return self.Object.Controllers
def __getstate__(self):
return None
def __setstate__(self,state):
return None
class SpreadsheetController:
"A spreadsheet cell controller object"
def __init__(self,obj):
obj.Proxy = self
self.Type = "SpreadsheetController"
obj.addProperty("App::PropertyEnumeration","FilterType","Filter","The type of filter to apply to the scene objects")
obj.addProperty("App::PropertyString","Filter","Filter","The filter to apply to the scene objects")
obj.addProperty("App::PropertyEnumeration","DataType","Data","The type of data to extract from the objects")
obj.addProperty("App::PropertyString","Data","Data","The data to extract from the objects")
obj.addProperty("App::PropertyString","BaseCell","Base","The starting cell of this controller")
obj.addProperty("App::PropertyEnumeration","Direction","Base","The cells direction of this controller")
obj.FilterType = ["Object Type","Object Name"]
obj.DataType = ["Get Property","Count"]
obj.Direction = ["Horizontal","Vertical"]
def execute(self,obj):
pass
def __getstate__(self):
return self.Type
def __setstate__(self,state):
if state:
self.Type = state
def onChanged(self,obj,prop):
if prop == "DataType":
if obj.DataType == "Count":
obj.setEditorMode('Data',1)
else:
obj.setEditorMode('Data',0)
def getDataSet(self,obj):
"returns a list of objects to be considered by this controller"
result = []
if hasattr(obj,"FilterType"):
import Draft
baseset = FreeCAD.ActiveDocument.Objects
if obj.FilterType == "Object Type":
for o in baseset:
if not ("Spreadsheet" in Draft.getType(o)):
t = Draft.getType(o)
if t == "Part":
t = obj.TypeId
if obj.Filter:
if obj.Filter in t:
result.append(o)
else:
result.append(o)
elif obj.FilterType == "Object Name":
for o in baseset:
if not ("Spreadsheet" in Draft.getType(o)):
if obj.Filter:
if obj.Filter in o.Label:
result.append(o)
else:
result.append(o)
return result
def getCells(self,obj,spreadsheet):
"returns a list of cells controlled by this controller"
cells = []
if obj.BaseCell:
if obj.DataType == "Count":
return obj.BaseCell
for i in range(len(self.getDataSet(obj))):
# get the correct cell key
c,r = spreadsheet.Proxy.splitKey(obj.BaseCell)
if obj.Direction == "Horizontal":
c = c.lower()
c = "abcdefghijklmnopqrstuvwxyz".index(c)
c += i
c = "abcdefghijklmnopqrstuvwxyz"[c]
else:
r = int(r) + i
cells.append(c+str(r))
return cells
def setCells(self,obj,spreadsheet):
"Fills the controlled cells of the given spreadsheet"
if obj.BaseCell:
dataset = self.getDataSet(obj)
if obj.DataType == "Count":
if spreadsheet.Proxy.isKey(obj.BaseCell):
try:
setattr(spreadsheet.Proxy,obj.BaseCell,len(dataset))
except:
print "Spreadsheet: Error counting objects"
elif obj.Data:
for i in range(len(dataset)):
# get the correct cell key
c,r = spreadsheet.Proxy.splitKey(obj.BaseCell)
if obj.Direction == "Horizontal":
c = c.lower()
c = "abcdefghijklmnopqrstuvwxyz".index(c)
c += i
c = "abcdefghijklmnopqrstuvwxyz"[c]
else:
r = int(r) + i
cell = c+str(r)
if DEBUG: print "auto setting cell ",cell
if spreadsheet.Proxy.isKey(cell):
# get the contents
args = obj.Data.split(".")
value = dataset[i]
for arg in args:
print arg
if hasattr(value,arg):
value = getattr(value,arg)
try:
if isinstance(value,float) or isinstance(value,int):
pass
else:
value = str(value)
value = ''.join([ c for c in value if c not in ('<','>',':')])
setattr(spreadsheet.Proxy,cell,value)
if DEBUG: print "setting cell ",cell," to value ",value
except:
print "Spreadsheet: Error retrieving property "+obj.Data+" from object "+dataset[i].Name
class ViewProviderSpreadsheetController:
"A view provider for the spreadsheet cell controller"
def __init__(self,vobj):
vobj.Proxy = self
def getIcon(self):
import Spreadsheet_rc
return ":/icons/SpreadsheetController.svg"
class SpreadsheetPropertyController:
"A spreadsheet property controller object"
def __init__(self,obj):
obj.Proxy = self
self.Type = "SpreadsheetPropertyController"
obj.addProperty("App::PropertyEnumeration","TargetType","Base","The type of item to control")
obj.addProperty("App::PropertyLink","TargetObject","Base","The object that must be controlled")
obj.addProperty("App::PropertyString","TargetProperty","Base","The property or constraint of the target object to control")
obj.addProperty("App::PropertyString","Cell","Base","The cell that contains the value to apply to the property")
obj.TargetType = ["Property","Constraint"]
def execute(self,obj):
pass
def compute(self,obj):
if obj.Cell and obj.TargetObject and obj.TargetProperty and obj.InList:
sp = obj.InList[0]
import Draft
if Draft.getType(sp) == "Spreadsheet":
try:
value = getattr(sp.Proxy,obj.Cell)
except:
if DEBUG: print "No value for cell ",obj.Cell," in spreadsheet."
return
if obj.TargetType == "Property":
b = obj.TargetObject
props = obj.TargetProperty.split(".")
for p in props:
if hasattr(b,p):
if p != props[-1]:
b = getattr(b,p)
else:
return
try:
setattr(b,p,value)
FreeCAD.ActiveDocument.recompute()
if DEBUG: print "setting property ",obj.TargetProperty, " of object ",obj.TargetObject.Name, " to ",value
except:
if DEBUG: print "unable to set property ",obj.TargetProperty, " of object ",obj.TargetObject.Name, " to ",value
else:
if Draft.getType(obj.TargetObject) == "Sketch":
if obj.TargetProperty.isdigit():
# try setting by constraint id
try:
c = int(obj.TargetProperty)
obj.TargetObject.setDatum(c,float(value))
FreeCAD.ActiveDocument.recompute()
if DEBUG: print "setting constraint ",obj.TargetProperty, " of object ",obj.TargetObject.Name, " to ",value
except:
if DEBUG: print "unable to set constraint ",obj.TargetProperty, " of object ",obj.TargetObject.Name, " to ",value
else:
# try setting by constraint name
try:
obj.TargetObject.setDatum(obj.TargetProperty,float(value))
FreeCAD.ActiveDocument.recompute()
if DEBUG: print "setting constraint ",obj.TargetProperty, " of object ",obj.TargetObject.Name, " to ",value
except:
if DEBUG: print "unable to set constraint ",obj.TargetProperty, " of object ",obj.TargetObject.Name, " to ",value
def __getstate__(self):
return self.Type
def __setstate__(self,state):
if state:
self.Type = state
def onChanged(self,obj,prop):
pass
class ViewProviderSpreadsheetPropertyController:
"A view provider for the spreadsheet property controller"
def __init__(self,vobj):
vobj.Proxy = self
def getIcon(self):
import Spreadsheet_rc
return ":/icons/SpreadsheetPropertyController.svg"
class SpreadsheetView(QtGui.QWidget):
"A spreadsheet viewer for FreeCAD"
def __init__(self,spreadsheet=None):
from DraftTools import translate
QtGui.QWidget.__init__(self)
self.setWindowTitle(str(translate("Spreadsheet","Spreadsheet")))
self.setObjectName("Spreadsheet viewer")
self.verticalLayout = QtGui.QVBoxLayout(self)
self.doNotChange = False
# add editor line
self.horizontalLayout = QtGui.QHBoxLayout()
self.label = QtGui.QLabel(self)
self.label.setMinimumSize(QtCore.QSize(82, 0))
self.label.setText(translate("Spreadsheet","Cell")+" A1 :")
self.lineEdit = QtGui.QLineEdit(self)
self.applyButton = QtGui.QPushButton(self)
self.applyButton.setText(translate("Spreadsheet","Apply"))
self.applyButton.setIcon(QtGui.QIcon(":/icons/edit_OK.svg"))
self.applyButton.setToolTip(translate("Spreadsheet","Apply the changes to the current cell"))
self.wipeButton = QtGui.QPushButton(self)
self.wipeButton.setText(translate("Spreadsheet","Delete"))
self.wipeButton.setIcon(QtGui.QIcon(":/icons/process-stop.svg"))
self.wipeButton.setToolTip(translate("Spreadsheet","Deletes the contents of the current cell"))
self.computeButton = QtGui.QPushButton(self)
self.computeButton.setText(translate("Spreadsheet","Compute"))
self.computeButton.setIcon(QtGui.QIcon(":/icons/view-refresh.svg"))
self.computeButton.setToolTip(translate("Spreadsheet","Updates the values handled by controllers"))
self.horizontalLayout.addWidget(self.label)
self.horizontalLayout.addWidget(self.lineEdit)
self.horizontalLayout.addWidget(self.applyButton)
self.horizontalLayout.addWidget(self.wipeButton)
self.horizontalLayout.addWidget(self.computeButton)
self.verticalLayout.addLayout(self.horizontalLayout)
# add table
self.table = QtGui.QTableWidget(30,26,self)
for i in range(26):
ch = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[i]
self.table.setHorizontalHeaderItem(i, QtGui.QTableWidgetItem(ch))
self.verticalLayout.addWidget(self.table)
self.table.setCurrentCell(0,0)
self.spreadsheet = spreadsheet
self.update()
QtCore.QObject.connect(self.table, QtCore.SIGNAL("cellChanged(int,int)"), self.changeCell)
QtCore.QObject.connect(self.table, QtCore.SIGNAL("currentCellChanged(int,int,int,int)"), self.setEditLine)
QtCore.QObject.connect(self.lineEdit, QtCore.SIGNAL("returnPressed()"), self.getEditLine)
QtCore.QObject.connect(self.applyButton, QtCore.SIGNAL("clicked()"), self.getEditLine)
QtCore.QObject.connect(self.wipeButton, QtCore.SIGNAL("clicked()"), self.wipeCell)
QtCore.QObject.connect(self.computeButton, QtCore.SIGNAL("clicked()"), self.recompute)
def closeEvent(self, event):
#if DEBUG: print "Closing spreadsheet view"
if self.spreadsheet:
# before deleting this view, we remove the reference to it in the object
if hasattr(self.spreadsheet,"ViewObject"):
if self.spreadsheet.ViewObject:
if hasattr(self.spreadsheet.ViewObject.Proxy,"editor"):
del self.spreadsheet.ViewObject.Proxy.editor
if FreeCADGui:
if FreeCADGui.ActiveDocument:
FreeCADGui.ActiveDocument.resetEdit()
def update(self):
"updates the cells with the contents of the spreadsheet"
if self.spreadsheet:
controlled = self.spreadsheet.Proxy.getControlledCells(self.spreadsheet)
controlling = self.spreadsheet.Proxy.getControllingCells(self.spreadsheet)
for cell in self.spreadsheet.Proxy._cells.keys():
if not cell in ["Type","Object"]:
c,r = self.spreadsheet.Proxy.splitKey(cell)
c = "abcdefghijklmnopqrstuvwxyz".index(c)
r = int(str(r))-1
content = getattr(self.spreadsheet.Proxy,cell)
if self.spreadsheet.Proxy.isFunction(cell):
self.doNotChange = True
if content == None:
content = ""
if DEBUG: print "Updating ",cell," to ",content
if self.table.item(r,c):
self.table.item(r,c).setText(str(content))
else:
self.table.setItem(r,c,QtGui.QTableWidgetItem(str(content)))
if cell in controlled:
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.Dense6Pattern)
if self.table.item(r,c):
self.table.item(r,c).setBackground(brush)
elif cell in controlling:
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.Dense6Pattern)
if self.table.item(r,c):
self.table.item(r,c).setBackground(brush)
else:
brush = QtGui.QBrush()
if self.table.item(r,c):
self.table.item(r,c).setBackground(brush)
def changeCell(self,r,c,value=None):
"changes the contens of a cell"
if self.doNotChange:
if DEBUG: print "DoNotChange flag is set"
self.doNotChange = False
elif self.spreadsheet:
key = "abcdefghijklmnopqrstuvwxyz"[c]+str(r+1)
if value == None:
value = self.table.item(r,c).text()
if value == "":
if DEBUG: print "Wiping "+key
if self.table.item(r,c):
self.table.item(r,c).setText("")
if key in self.spreadsheet.Proxy._cells.keys():
del self.spreadsheet.Proxy._cells[key]
else:
if DEBUG: print "Changing "+key+" to "+value
# store the entry as best as possible
try:
v = int(value)
except:
try:
v = float(value)
except:
try:
v = v = str(value)
except:
v = value
setattr(self.spreadsheet.Proxy,key,v)
self.update()
# TODO do not update the whole spreadsheet when only one cell has changed:
# use the _relations table and recursively update only cells based on this one
self.setEditLine(r,c)
def setEditLine(self,r,c,orr=None,orc=None):
"copies the contents of the active cell to the edit line"
if self.spreadsheet:
c = "abcdefghijklmnopqrstuvwxyz"[c]
r = r+1
if DEBUG: print "Active cell "+c+str(r)
from DraftTools import translate
self.label.setText(str(translate("Spreadsheet","Cell"))+" "+c.upper()+str(r)+" :")
content = self.spreadsheet.Proxy.getFunction(c+str(r))
if content == None:
content = ""
self.lineEdit.setText(str(content))
def getEditLine(self):
"called when something has been entered in the edit line"
txt = str(self.lineEdit.text())
if DEBUG: print "Text edited ",txt
r = self.table.currentRow()
c = self.table.currentColumn()
self.changeCell(r,c,txt)
def wipeCell(self):
if DEBUG: print "Wiping cell"
self.lineEdit.setText("")
self.getEditLine()
def recompute(self):
if self.spreadsheet:
self.spreadsheet.Proxy.recompute(self.spreadsheet)
self.update()
class _Command_Spreadsheet_Create:
"the Spreadsheet_Create FreeCAD command"
def GetResources(self):
return {'Pixmap' : 'Spreadsheet',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Spreadsheet_Create","Spreadsheet"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Spreadsheet_Create","Adds a spreadsheet object to the active document")}
def Activated(self):
from DraftTools import translate
FreeCAD.ActiveDocument.openTransaction(str(translate("Spreadsheet","Create Spreadsheet")))
FreeCADGui.doCommand("import Spreadsheet")
FreeCADGui.doCommand("s = Spreadsheet.makeSpreadsheet()")
FreeCADGui.doCommand("FreeCAD.ActiveDocument.recompute()")
FreeCADGui.doCommand("FreeCADGui.ActiveDocument.setEdit(s.Name,0)")
FreeCAD.ActiveDocument.commitTransaction()
class _Command_Spreadsheet_Controller:
"the Spreadsheet_Controller FreeCAD command"
def GetResources(self):
return {'Pixmap' : 'SpreadsheetController',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Spreadsheet_Controller","Add controller"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Spreadsheet_Controller","Adds a cell controller to a selected spreadsheet")}
def IsActive(self):
if FreeCADGui.Selection.getSelection():
return True
else:
return False
def Activated(self):
import Draft
if Draft.getType(FreeCADGui.Selection.getSelection()[0]) == "Spreadsheet":
from DraftTools import translate
n = FreeCADGui.Selection.getSelection()[0].Name
FreeCAD.ActiveDocument.openTransaction(str(translate("Spreadsheet","Add controller")))
FreeCADGui.doCommand("import Spreadsheet")
FreeCADGui.doCommand("Spreadsheet.makeSpreadsheetController(FreeCAD.ActiveDocument."+n+")")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
class _Command_Spreadsheet_PropertyController:
"the Spreadsheet_Controller FreeCAD command"
def GetResources(self):
return {'Pixmap' : 'SpreadsheetPropertyController',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Spreadsheet_PropertyController","Add property controller"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Spreadsheet_PropertyController","Adds a property controller to a selected spreadsheet")}
def IsActive(self):
if FreeCADGui.Selection.getSelection():
return True
else:
return False
def Activated(self):
import Draft
from DraftTools import translate
sel = FreeCADGui.Selection.getSelection()
if (len(sel) == 1) and Draft.getType(sel[0]) == "Spreadsheet":
n = FreeCADGui.Selection.getSelection()[0].Name
FreeCAD.ActiveDocument.openTransaction(str(translate("Spreadsheet","Add property controller")))
FreeCADGui.doCommand("import Spreadsheet")
FreeCADGui.doCommand("Spreadsheet.makeSpreadsheetPropertyController(FreeCAD.ActiveDocument."+n+")")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
elif (len(sel) == 2):
if (Draft.getType(sel[0]) == "Spreadsheet") and (Draft.getType(sel[1]) == "SpreadsheetPropertyController"):
s = sel[0].Name
o = sel[1].Name
elif (Draft.getType(sel[1]) == "Spreadsheet") and (Draft.getType(sel[0]) == "SpreadsheetPropertyController"):
s = sel[1].Name
o = sel[0].Name
else:
return
FreeCAD.ActiveDocument.openTransaction(str(translate("Spreadsheet","Add property controller")))
FreeCADGui.doCommand("import Spreadsheet")
FreeCADGui.doCommand("Spreadsheet.makeSpreadsheetPropertyController(FreeCAD.ActiveDocument."+s+",FreeCAD.ActiveDocument."+o+")")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
def makeSpreadsheet():
"makeSpreadsheet(): adds a spreadsheet object to the active document"
obj = FreeCAD.ActiveDocument.addObject("App::FeaturePython","Spreadsheet")
Spreadsheet(obj)
if FreeCAD.GuiUp:
ViewProviderSpreadsheet(obj.ViewObject)
return obj
def makeSpreadsheetController(spreadsheet,cell=None,direction=None):
"""makeSpreadsheetController(spreadsheet,[cell,direction]): adds a
controller to the given spreadsheet. Call can be a starting cell such as "A5",
and direction can be "Horizontal" or "Vertical"."""
obj = FreeCAD.ActiveDocument.addObject("App::FeaturePython","CellController")
SpreadsheetController(obj)
if FreeCAD.GuiUp:
ViewProviderSpreadsheetController(obj.ViewObject)
conts = spreadsheet.Controllers
conts.append(obj)
spreadsheet.Controllers = conts
if cell:
obj.BaseCell = cell
if direction:
obj.Direction = direction
return obj
def makeSpreadsheetPropertyController(spreadsheet,object=None,prop=None,cell=None):
"""makeSpreadsheetPropertyController(spreadsheet,[object,prop,cell]): adds a
property controller, targetting the given object if any, to the given spreadsheet.
You can give a property (such as "Length" or "Proxy.Length") and a cell address
(such as "B6")."""
obj = FreeCAD.ActiveDocument.addObject("App::FeaturePython","PropertyController")
SpreadsheetPropertyController(obj)
if FreeCAD.GuiUp:
ViewProviderSpreadsheetPropertyController(obj.ViewObject)
conts = spreadsheet.Controllers
conts.append(obj)
spreadsheet.Controllers = conts
if cell:
obj.Cell = cell
if prop:
obj.Property = prop
return obj
def addSpreadsheetView(view):
"addSpreadsheetView(view): adds the given spreadsheet view to the FreeCAD MDI area"
if FreeCAD.GuiUp:
import Spreadsheet_rc
mw = FreeCADGui.getMainWindow()
mdi = mw.findChild(QtGui.QMdiArea)
sw = mdi.addSubWindow(view)
sw.setWindowIcon(QtGui.QIcon(":/icons/Spreadsheet.svg"))
sw.show()
mdi.setActiveSubWindow(sw)
def open(filename):
"called when freecad opens a csv file"
import os
docname = os.path.splitext(os.path.basename(filename))[0]
doc = FreeCAD.newDocument(docname)
FreeCAD.ActiveDocument = doc
read(filename)
doc.recompute()
return doc
def insert(filename,docname):
"called when freecad wants to import a csv file"
try:
doc = FreeCAD.getDocument(docname)
except NameError:
doc = FreeCAD.newDocument(docname)
FreeCAD.ActiveDocument = doc
read(filename)
doc.recompute()
return doc
def read(filename):
"creates a spreadsheet with the contents of a csv file"
sp = makeSpreadsheet()
import csv
with pyopen(filename, 'rb') as csvfile:
csvfile = csv.reader(csvfile)
rn = 1
for row in csvfile:
cn = 0
for c in row[:26]:
cl = "abcdefghijklmnopqrstuvwxyz"[cn]
#print "setting ",cl+str(rn)," ",c
try:
c = int(c)
except ValueError:
try:
c = float(c)
except ValueError:
c = str(c)
setattr(sp.Proxy,cl+str(rn),c)
cn += 1
rn += 1
print "successfully imported ",filename
def export(exportList,filename):
"called when freecad exports a csv file"
import csv, Draft
if not exportList:
print "Spreadsheet: Nothing to export"
return
obj = exportList[0]
if Draft.getType(obj) != "Spreadsheet":
print "Spreadhseet: The selected object is not a spreadsheet"
return
if not obj.Proxy._cells:
print "Spreadsheet: The selected spreadsheet contains no cell"
return
numcols = ("abcdefghijklmnopqrstuvwxyz".index(str(obj.Proxy.cols[-1])))+1
numrows = int(obj.Proxy.rows[-1])
with pyopen(filename, 'wb') as csvfile:
csvfile = csv.writer(csvfile)
for i in range(numrows):
r = []
for j in range(numcols):
key = "abcdefghijklmnopqrstuvwxyz"[j]+str(i+1)
if key in obj.Proxy._cells.keys():
r.append(str(obj.Proxy.getFunction(key)))
else:
r.append("")
csvfile.writerow(r)
print "successfully exported ",filename
#FreeCADGui.addCommand('Spreadsheet_Create',_Command_Spreadsheet_Create())
#FreeCADGui.addCommand('Spreadsheet_Controller',_Command_Spreadsheet_Controller())
#FreeCADGui.addCommand('Spreadsheet_PropertyController',_Command_Spreadsheet_PropertyController())
| lgpl-2.1 |
LomaxRx/service-seeker | api/models.py | 1 | 6016 | from django.db import models
from datetime import datetime
class Organization(models.Model):
name = models.CharField('Name', max_length=255)
description = models.TextField('Description', blank=True)
email = models.CharField(max_length=255, blank=True)
url = models.CharField(max_length=255, blank=True)
parent_organization = models.ForeignKey('self', verbose_name='Parent Organization', null=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class Service(models.Model):
organization = models.ForeignKey('Organization', verbose_name='Organization', related_name='services')
category = models.ForeignKey('Category', verbose_name='Category', related_name='services')
tags = models.ManyToManyField('Tag', verbose_name='Tags', related_name='services')
locations = models.ManyToManyField('Location', verbose_name='Locations', related_name='services')
name = models.CharField('Name', max_length=255)
description = models.TextField('Description', blank=True)
application_process = models.TextField('Appication Process', blank=True)
required_documents = models.TextField('Required Documents', blank=True)
url = models.CharField(max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class Location(models.Model):
organization = models.ForeignKey('Organization', verbose_name='Organization', related_name='locations')
name = models.CharField('Name', max_length=255, blank=True)
address_1 = models.CharField('Address 1', max_length=255)
address_2 = models.CharField('Address 2', max_length=255, blank=True)
city = models.CharField('City', max_length=255)
country = models.CharField('Country', max_length=255)
postal_code = models.CharField('Postal Code', max_length=10)
state_province = models.CharField('State/Province', max_length=2)
latitude = models.DecimalField('Latitude', max_digits=18, decimal_places=6, blank=True, null=True)
longitude = models.DecimalField('Longitude', max_digits=18, decimal_places=6, blank=True, null=True)
transportation = models.TextField('Transporation', blank=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class Contact(models.Model):
organization = models.ForeignKey('Organization', verbose_name='Organization', related_name='contacts')
service = models.ForeignKey('Service', verbose_name='Primary Service', blank=True, related_name='contacts')
location = models.ForeignKey('Location', verbose_name='Primary Work Location', blank=True, related_name='contacts')
email = models.CharField(max_length=255, blank=True)
name = models.CharField('Full Name', max_length=255)
title = models.CharField('Title', max_length=255)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class Hours(models.Model):
days = (
('mon', 'Monday'),
('tue', 'Tuesday'),
('wed', 'Wednesday'),
('thu', 'Thursday'),
('fri', 'Friday'),
('sat', 'Saturday'),
('sun', 'Sunday')
)
intervals = (
('1st', 'First'),
('2nd', 'Second'),
('3rd', 'Third'),
('4th', 'Fourth')
)
service = models.ForeignKey('Service', related_name='hours')
location = models.ForeignKey('Location', related_name='hours')
opens_at = models.TimeField('Opens at')
closes_at = models.TimeField('Closes at')
every = models.CharField('Occurs Every', max_length=255, choices=intervals, blank=True)
weekday = models.CharField('Weekday', max_length=255, choices=days)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class Phone(models.Model):
types = (
('voice', 'Voice'),
('text', 'Text'),
('fax', 'Fax'),
('tty', 'TTY'),
('hotline', 'Hotline')
)
contact = models.ForeignKey('Contact', blank=True, related_name="phones")
organization = models.ForeignKey('Organization', related_name="phones")
service = models.ForeignKey('Service', blank=True, related_name="phones")
number = models.CharField(max_length=255)
primary_phone = models.BooleanField('Organization\'s Primary Phone', default=False)
extension = models.CharField(max_length=255, blank=True)
type = models.CharField(max_length=255, choices=types)
vanity_number = models.CharField('Vanity Number', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class Category(models.Model):
level = models.CharField(max_length=255)
name = models.CharField(max_length=255)
class Eligibility(models.Model):
service = models.ForeignKey('Service', related_name='eligibilities')
age_range = models.CommaSeparatedIntegerField('Age Range', max_length=7, blank=True)
disability = models.TextField('Disability', blank=True)
gender = models.CharField('Gender', max_length=255, blank=True)
household = models.CharField('Household', max_length=255, blank=True)
monthly_income_range = models.CommaSeparatedIntegerField('Monthly Income Range', max_length=255, blank=True)
percent_poverty_range = models.CommaSeparatedIntegerField('Percent Poverty Range', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class Tag(models.Model):
level = models.CharField(max_length=255)
name = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
| mit |
sajeeshcs/nested_quota_latest | nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe_update.py | 5 | 4397 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.contrib import cloudpipe_update as clup_v2
from nova.api.openstack.compute.plugins.v3 import cloudpipe as clup_v21
from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network
fake_networks = [fake_network.fake_network(1),
fake_network.fake_network(2)]
def fake_project_get_networks(context, project_id, associate=True):
return fake_networks
def fake_network_update(context, network_id, values):
for network in fake_networks:
if network['id'] == network_id:
for key in values:
network[key] = values[key]
class CloudpipeUpdateTestV21(test.NoDBTestCase):
bad_request = exception.ValidationError
def setUp(self):
super(CloudpipeUpdateTestV21, self).setUp()
self.stubs.Set(db, "project_get_networks", fake_project_get_networks)
self.stubs.Set(db, "network_update", fake_network_update)
self._setup()
def _setup(self):
self.controller = clup_v21.CloudpipeController()
def _check_status(self, expected_status, res, controller_methord):
self.assertEqual(expected_status, controller_methord.wsgi_code)
def test_cloudpipe_configure_project(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
result = self.controller.update(req, 'configure-project',
body=body)
self._check_status(202, result, self.controller.update)
self.assertEqual(fake_networks[0]['vpn_public_address'], "1.2.3.4")
self.assertEqual(fake_networks[0]['vpn_public_port'], 222)
def test_cloudpipe_configure_project_bad_url(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-projectx')
body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
'configure-projectx', body=body)
def test_cloudpipe_configure_project_bad_data(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"configure_project": {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}}
self.assertRaises(self.bad_request,
self.controller.update, req,
'configure-project', body=body)
def test_cloudpipe_configure_project_bad_vpn_port(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"configure_project": {"vpn_ipxx": "1.2.3.4",
"vpn_port": "foo"}}
self.assertRaises(self.bad_request,
self.controller.update, req,
'configure-project', body=body)
def test_cloudpipe_configure_project_vpn_port_with_empty_string(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"configure_project": {"vpn_ipxx": "1.2.3.4",
"vpn_port": ""}}
self.assertRaises(self.bad_request,
self.controller.update, req,
'configure-project', body=body)
class CloudpipeUpdateTestV2(CloudpipeUpdateTestV21):
bad_request = webob.exc.HTTPBadRequest
def _setup(self):
self.controller = clup_v2.CloudpipeUpdateController()
def _check_status(self, expected_status, res, controller_methord):
self.assertEqual(expected_status, res.status_int)
| apache-2.0 |
moreati/numpy | numpy/random/tests/test_regression.py | 78 | 4513 | from __future__ import division, absolute_import, print_function
import sys
from numpy.testing import (TestCase, run_module_suite, assert_,
assert_array_equal, assert_raises)
from numpy import random
from numpy.compat import long
import numpy as np
class TestRegression(TestCase):
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) # Check for 64-bit systems
for arg in args:
assert_(np.random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / float(N)
msg = "Frequency was %f, should be > 0.45" % freq
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / float(N)
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
def test_permutation_longs(self):
np.random.seed(1234)
a = np.random.permutation(12)
np.random.seed(1234)
b = np.random.permutation(long(12))
assert_array_equal(a, b)
def test_randint_range(self):
# Test for ticket #1690
lmax = np.iinfo('l').max
lmin = np.iinfo('l').min
try:
random.randint(lmin, lmax)
except:
raise AssertionError
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
np.random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = np.random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
np.random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
np.random.multivariate_normal([0], [[0]], size=1)
np.random.multivariate_normal([0], [[0]], size=np.int_(1))
np.random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
np.random.seed(1234567890)
x = np.random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
np.random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = np.random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, np.random.choice, a, p=probs*0.9)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
punchagan/zulip | zerver/webhooks/ansibletower/tests.py | 4 | 6887 | from zerver.lib.test_classes import WebhookTestCase
class AnsibletowerHookTests(WebhookTestCase):
STREAM_NAME = "ansibletower"
URL_TEMPLATE = "/api/v1/external/ansibletower?api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = "ansibletower"
def test_ansibletower_project_update_successful_message(self) -> None:
"""
Tests if ansibletower project update successful notification is handled correctly
"""
expected_topic = "AWX - Project Update"
expected_message = (
"Project Update: [#2677 AWX - Project Update]"
"(http://awx.example.co.uk/#/jobs/project/2677) was successful."
)
self.check_webhook("project_update_successful", expected_topic, expected_message)
def test_ansibletower_project_update_failed_message(self) -> None:
"""
Tests if ansibletower project update failed notification is handled correctly
"""
expected_topic = "AWX - Project Update"
expected_message = (
"Project Update: [#2678 AWX - Project Update]"
"(http://awx.example.co.uk/#/jobs/project/2678) failed."
)
self.check_webhook("project_update_failed", expected_topic, expected_message)
def test_ansibletower_job_successful_multiple_hosts_message(self) -> None:
"""
Tests if ansibletower job successful multiple hosts notification is handled correctly
"""
expected_topic = "System - Deploy - Zabbix Agent"
expected_message = """
Job: [#2674 System - Deploy - Zabbix Agent](http://awx.example.co.uk/#/jobs/playbook/2674) was successful:
* chat.example.co.uk: Success
* devops.example.co.uk: Success
* gitlab.example.co.uk: Success
* ipa.example.co.uk: Success
* mail.example.co.uk: Success
""".strip()
self.check_webhook("job_successful_multiple_hosts", expected_topic, expected_message)
def test_ansibletower_job_successful_message(self) -> None:
"""
Tests if ansibletower job successful notification is handled correctly
"""
expected_topic = "System - Deploy - Zabbix Agent"
expected_message = """
Job: [#2674 System - Deploy - Zabbix Agent](http://awx.example.co.uk/#/jobs/playbook/2674) was successful:
* chat.example.co.uk: Success
""".strip()
self.check_webhook("job_successful", expected_topic, expected_message)
def test_ansibletower_nine_job_successful_message(self) -> None:
"""
Test to see if awx/ansibletower 9.x.x job successful notifications are
handled just as successfully as prior to 9.x.x.
"""
expected_topic = "Demo Job Template"
expected_message = """
Job: [#1 Demo Job Template](https://towerhost/#/jobs/playbook/1) was successful:
* localhost: Success
""".strip()
self.check_webhook("job_complete_successful_awx_9.1.1", expected_topic, expected_message)
def test_ansibletower_job_failed_message(self) -> None:
"""
Tests if ansibletower job failed notification is handled correctly
"""
expected_topic = "System - Updates - Ubuntu"
expected_message = """
Job: [#2722 System - Updates - Ubuntu](http://awx.example.co.uk/#/jobs/playbook/2722) failed:
* chat.example.co.uk: Failed
""".strip()
self.check_webhook("job_failed", expected_topic, expected_message)
def test_ansibletower_job_failed_multiple_hosts_message(self) -> None:
"""
Tests if ansibletower job failed notification is handled correctly
"""
expected_topic = "System - Updates - Ubuntu"
expected_message = """
Job: [#2722 System - Updates - Ubuntu](http://awx.example.co.uk/#/jobs/playbook/2722) failed:
* chat.example.co.uk: Failed
* devops.example.co.uk: Failed
* gitlab.example.co.uk: Failed
* ipa.example.co.uk: Failed
* mail.example.co.uk: Failed
""".strip()
self.check_webhook("job_failed_multiple_hosts", expected_topic, expected_message)
def test_ansibletower_inventory_update_successful_message(self) -> None:
"""
Tests if ansibletower inventory update successful notification is handled correctly
"""
expected_topic = "AWX - Inventory Update"
expected_message = (
"Inventory Update: [#2724 AWX - Inventory Update]"
"(http://awx.example.co.uk/#/jobs/inventory/2724) was successful."
)
self.check_webhook("inventory_update_successful", expected_topic, expected_message)
def test_ansibletower_inventory_update_failed_message(self) -> None:
"""
Tests if ansibletower inventory update failed notification is handled correctly
"""
expected_topic = "AWX - Inventory Update"
expected_message = (
"Inventory Update: [#2724 AWX - Inventory Update]"
"(http://awx.example.co.uk/#/jobs/inventory/2724) failed."
)
self.check_webhook("inventory_update_failed", expected_topic, expected_message)
def test_ansibletower_adhoc_command_successful_message(self) -> None:
"""
Tests if ansibletower adhoc command successful notification is handled correctly
"""
expected_topic = "shell: uname -r"
expected_message = (
"AdHoc Command: [#2726 shell: uname -r]"
"(http://awx.example.co.uk/#/jobs/command/2726) was successful."
)
self.check_webhook("adhoc_command_successful", expected_topic, expected_message)
def test_ansibletower_adhoc_command_failed_message(self) -> None:
"""
Tests if ansibletower adhoc command failed notification is handled correctly
"""
expected_topic = "shell: uname -r"
expected_message = (
"AdHoc Command: [#2726 shell: uname -r]"
"(http://awx.example.co.uk/#/jobs/command/2726) failed."
)
self.check_webhook("adhoc_command_failed", expected_topic, expected_message)
def test_ansibletower_system_job_successful_message(self) -> None:
"""
Tests if ansibletower system job successful notification is handled correctly
"""
expected_topic = "Cleanup Job Details"
expected_message = (
"System Job: [#2721 Cleanup Job Details]"
"(http://awx.example.co.uk/#/jobs/system/2721) was successful."
)
self.check_webhook("system_job_successful", expected_topic, expected_message)
def test_ansibletower_system_job_failed_message(self) -> None:
"""
Tests if ansibletower system job failed notification is handled correctly
"""
expected_topic = "Cleanup Job Details"
expected_message = (
"System Job: [#2721 Cleanup Job Details]"
"(http://awx.example.co.uk/#/jobs/system/2721) failed."
)
self.check_webhook("system_job_failed", expected_topic, expected_message)
| apache-2.0 |
Iamthelaw/jedi-academy | hr_app/models/person.py | 1 | 2393 | from __future__ import unicode_literals
from django.conf import settings
from django.urls import reverse
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from django.db import models
from django.utils.translation import ugettext as _
from .thing import Planet
class Candidate(models.Model):
name = models.CharField(_('Имя'), max_length=255)
planet = models.ForeignKey(
Planet, verbose_name=_('Планета обитания'))
age = models.PositiveSmallIntegerField(_('Возраст'))
email = models.EmailField(_('Email'))
is_padawan = models.BooleanField(
default=False, editable=False)
def get_absolute_url(self):
return reverse('hr:candidate:detail', args=[self.pk])
def __str__(self):
return self.name
class Meta:
ordering = ('name', )
verbose_name = _('Кандидат')
verbose_name_plural = _('Кандидаты')
class Jedi(models.Model):
name = models.CharField(_('Имя'), max_length=255)
planet = models.ForeignKey(
Planet, verbose_name=_('Планета'),
help_text=_('Планета на которой джедай обучает кандидатов'))
def add_padawan(self, candidate):
pdw, created = Padawan.objects.get_or_create(
jedi=self, candidate=candidate)
if created:
candidate.is_padawan = True
candidate.save()
send_mail(
_('Поздравляем!'),
_('Вы зачислены в падаваны.'),
settings.FROM_EMAIL,
[candidate.email])
def __str__(self):
return self.name
class Meta:
ordering = ('name', )
verbose_name = _('Джедай')
verbose_name_plural = _('Джедаи')
class Padawan(models.Model):
jedi = models.ForeignKey(Jedi, verbose_name=_(''))
candidate = models.ForeignKey(Candidate)
def clean(self):
if self.jedi.padawan_set.count() >= 3:
raise ValidationError(
_('У джедая не может быть более 3-х падаванов'))
def __str__(self):
return self.candidate.name
class Meta:
ordering = ('jedi', )
verbose_name = _('Падаван')
verbose_name_plural = _('Падаваны')
| mit |
lawrencejones/neuro | Exercise_2/Solutions/RobotConnectAvoidance.py | 4 | 2167 | """
Computational Neurodynamics
Exercise 2
(C) Murray Shanahan et al, 2015
"""
import numpy as np
import numpy.random as rn
from IzNetwork import IzNetwork
def RobotConnectAvoidance(Ns, Nm):
"""
Construct four layers of Izhikevich neurons and connect them together, just
as RobotConnect4L. Layers 0 and 1 comprise sensory neurons, while layers 2
and 3 comprise motor neurons. In this case, sensory neurons excite
ipsilateral motor neurons causing avoidance behaviour.
Inputs:
Ns -- Number of neurons in sensory layers
Nm -- Number of neurons in motor layers
"""
F = 50.0/np.sqrt(Ns) # Scaling factor
D = 4 # Conduction delay
Dmax = 5 # Maximum conduction delay
net = IzNetwork([Ns, Ns, Nm, Nm], Dmax)
# Layer 0 (Left sensory neurons)
r = rn.rand(Ns)
net.layer[0].N = Ns
net.layer[0].a = 0.02 * np.ones(Ns)
net.layer[0].b = 0.20 * np.ones(Ns)
net.layer[0].c = -65 + 15*(r**2)
net.layer[0].d = 8 - 6*(r**2)
# Layer 1 (Right sensory neurons)
r = rn.rand(Ns)
net.layer[1].N = Ns
net.layer[1].a = 0.02 * np.ones(Ns)
net.layer[1].b = 0.20 * np.ones(Ns)
net.layer[1].c = -65 + 15*(r**2)
net.layer[1].d = 8 - 6*(r**2)
# Layer 2 (Left motor neurons)
r = rn.rand(Nm)
net.layer[2].N = Nm
net.layer[2].a = 0.02 * np.ones(Nm)
net.layer[2].b = 0.20 * np.ones(Nm)
net.layer[2].c = -65 + 15*(r**2)
net.layer[2].d = 8 - 6*(r**2)
# Layer 3 (Right motor neurons)
r = rn.rand(Nm)
net.layer[3].N = Nm
net.layer[3].a = 0.02 * np.ones(Nm)
net.layer[3].b = 0.20 * np.ones(Nm)
net.layer[3].c = -65 + 15*(r**2)
net.layer[3].d = 8 - 6*(r**2)
# Connectivity matrix (synaptic weights)
# layer[i].S[j] is the connectivity matrix from layer j to layer i
# s[i,j] is the streght of the connection from neuron j to neuron i
# Connect 0 to 2 and 1 to 3 for seeking behaviour
net.layer[2].S[0] = np.ones([Nm, Ns])
net.layer[2].factor[0] = F
net.layer[2].delay[0] = D * np.ones([Nm, Ns], dtype=int)
net.layer[3].S[1] = np.ones([Nm, Ns])
net.layer[3].factor[1] = F
net.layer[3].delay[1] = D * np.ones([Nm, Ns], dtype=int)
return net
| gpl-3.0 |
havard024/prego | venv/lib/python2.7/site-packages/django/http/request.py | 98 | 18233 | from __future__ import absolute_import, unicode_literals
import copy
import os
import re
import sys
import warnings
from io import BytesIO
from pprint import pformat
try:
from urllib.parse import parse_qsl, urlencode, quote, urljoin
except ImportError:
from urllib import urlencode, quote
from urlparse import parse_qsl, urljoin
from django.conf import settings
from django.core import signing
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser
from django.utils import six
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import force_bytes, force_text, force_str, iri_to_uri
RAISE_ERROR = object()
absolute_http_url_re = re.compile(r"^https?://", re.I)
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
self._post_parse_error = False
def __repr__(self):
return build_request_repr(self)
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
allowed_hosts = ['*'] if settings.DEBUG else settings.ALLOWED_HOSTS
if validate_host(host, allowed_hosts):
return host
else:
raise SuspiciousOperation(
"Invalid HTTP_HOST header (you may need to set ALLOWED_HOSTS): %s" % host)
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '')
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % ('https' if self.is_secure() else 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_secure(self):
# First, check the SECURE_PROXY_SSL_HEADER setting.
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured('The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.')
if self.META.get(header, None) == value:
return True
# Failing that, fall back to _is_secure(), which is a hook for
# subclasses to implement.
return self._is_secure()
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise Exception("You cannot access body after reading from request's data stream")
try:
self._body = self.read()
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
self._stream = BytesIO(self._body)
return self._body
@property
def raw_post_data(self):
warnings.warn('HttpRequest.raw_post_data has been deprecated. Use HttpRequest.body instead.', DeprecationWarning)
return self.body
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'):
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'):
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
## File-like and iterator interface.
##
## Expects self._stream to be set to an appropriate source of bytes by
## a corresponding request subclass (e.g. WSGIRequest).
## Also when request data has already been read by request.POST or
## request.body, self._stream points to a BytesIO instance
## containing that data.
def read(self, *args, **kwargs):
self._read_started = True
return self._stream.read(*args, **kwargs)
def readline(self, *args, **kwargs):
self._read_started = True
return self._stream.readline(*args, **kwargs)
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
super(QueryDict, self).__init__()
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
if six.PY3:
if isinstance(query_string, bytes):
# query_string contains URL-encoded data, a subset of ASCII.
query_string = query_string.decode()
for key, value in parse_qsl(query_string or '',
keep_blank_values=True,
encoding=encoding):
self.appendlist(key, value)
else:
for key, value in parse_qsl(query_string or '',
keep_blank_values=True):
self.appendlist(force_text(key, encoding, errors='replace'),
force_text(value, encoding, errors='replace'))
self._mutable = mutable
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in six.iterlists(self):
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in six.iterlists(self):
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super(QueryDict, self).setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super(QueryDict, self).setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super(QueryDict, self).pop(key, *args)
def popitem(self):
self._assert_mutable()
return super(QueryDict, self).popitem()
def clear(self):
self._assert_mutable()
super(QueryDict, self).clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super(QueryDict, self).setdefault(key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = force_bytes(safe, self.encoding)
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = force_bytes(k, self.encoding)
output.extend([encode(k, force_bytes(v, self.encoding))
for v in list_])
return '&'.join(output)
def build_request_repr(request, path_override=None, GET_override=None,
POST_override=None, COOKIES_override=None,
META_override=None):
"""
Builds and returns the request's representation string. The request's
attributes may be overridden by pre-processed values.
"""
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = (pformat(GET_override)
if GET_override is not None
else pformat(request.GET))
except Exception:
get = '<could not parse>'
if request._post_parse_error:
post = '<could not parse>'
else:
try:
post = (pformat(POST_override)
if POST_override is not None
else pformat(request.POST))
except Exception:
post = '<could not parse>'
try:
cookies = (pformat(COOKIES_override)
if COOKIES_override is not None
else pformat(request.COOKIES))
except Exception:
cookies = '<could not parse>'
try:
meta = (pformat(META_override)
if META_override is not None
else pformat(request.META))
except Exception:
meta = '<could not parse>'
path = path_override if path_override is not None else request.path
return force_str('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(request.__class__.__name__,
path,
six.text_type(get),
six.text_type(post),
six.text_type(cookies),
six.text_type(meta)))
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, bytes):
return six.text_type(s, encoding, 'replace')
else:
return s
def validate_host(host, allowed_hosts):
"""
Validate the given host header value for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Return ``True`` for a valid host, ``False`` otherwise.
"""
# All validation is case-insensitive
host = host.lower()
# Basic sanity check
if not host_validation_re.match(host):
return False
# Validate only the domain part.
if host[-1] == ']':
# It's an IPv6 address without a port.
domain = host
else:
domain = host.rsplit(':', 1)[0]
for pattern in allowed_hosts:
pattern = pattern.lower()
match = (
pattern == '*' or
pattern.startswith('.') and (
domain.endswith(pattern) or domain == pattern[1:]
) or
pattern == domain
)
if match:
return True
return False
| mit |
rghe/ansible | lib/ansible/modules/network/iosxr/iosxr_interface.py | 5 | 26061 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_interface
version_added: "2.4"
author:
- "Ganesh Nalawade (@ganeshrn)"
- "Kedar Kekan (@kedarX)"
short_description: Manage Interface on Cisco IOS XR network devices
description:
- This module provides declarative management of Interfaces
on Cisco IOS XR network devices.
extends_documentation_fragment: iosxr
notes:
- Tested against IOS XRv 6.1.2
- Preconfiguration of physical interfaces is not supported with C(netconf) transport.
options:
name:
description:
- Name of the interface to configure in C(type + path) format. e.g. C(GigabitEthernet0/0/0/0)
required: true
description:
description:
- Description of Interface being configured.
enabled:
description:
- Removes the shutdown configuration, which removes the forced administrative down on the interface,
enabling it to move to an up or down state.
type: bool
default: True
active:
description:
- Whether the interface is C(active) or C(preconfigured). Preconfiguration allows you to configure modular
services cards before they are inserted into the router. When the cards are inserted, they are instantly
configured. Active cards are the ones already inserted.
choices: ['active', 'preconfigure']
default: active
version_added: 2.5
speed:
description:
- Configure the speed for an interface. Default is auto-negotiation when not configured.
choices: ['10', '100', '1000']
mtu:
description:
- Sets the MTU value for the interface. Range is between 64 and 65535'
duplex:
description:
- Configures the interface duplex mode. Default is auto-negotiation when not configured.
choices: ['full', 'half']
tx_rate:
description:
- Transmit rate in bits per second (bps).
- This is state check parameter only.
- Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html)
rx_rate:
description:
- Receiver rate in bits per second (bps).
- This is state check parameter only.
- Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html)
aggregate:
description:
- List of Interface definitions. Include multiple interface configurations together,
one each on a seperate line
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state argument which are
I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate).
default: 10
state:
description:
- State of the Interface configuration, C(up) means present and
operationally up and C(down) means present and operationally C(down)
default: present
choices: ['present', 'absent', 'up', 'down']
"""
EXAMPLES = """
- name: configure interface
iosxr_interface:
name: GigabitEthernet0/0/0/2
description: test-interface
speed: 100
duplex: half
mtu: 512
- name: remove interface
iosxr_interface:
name: GigabitEthernet0/0/0/2
state: absent
- name: make interface up
iosxr_interface:
name: GigabitEthernet0/0/0/2
enabled: True
- name: make interface down
iosxr_interface:
name: GigabitEthernet0/0/0/2
enabled: False
- name: Create interface using aggregate
iosxr_interface:
aggregate:
- name: GigabitEthernet0/0/0/3
- name: GigabitEthernet0/0/0/2
speed: 100
duplex: full
mtu: 512
state: present
- name: Create interface using aggregate along with additional params in aggregate
iosxr_interface:
aggregate:
- { name: GigabitEthernet0/0/0/3, description: test-interface 3 }
- { name: GigabitEthernet0/0/0/2, description: test-interface 2 }
speed: 100
duplex: full
mtu: 512
state: present
- name: Delete interface using aggregate
iosxr_interface:
aggregate:
- name: GigabitEthernet0/0/0/3
- name: GigabitEthernet0/0/0/2
state: absent
- name: Check intent arguments
iosxr_interface:
name: GigabitEthernet0/0/0/5
state: up
delay: 20
- name: Config + intent
iosxr_interface:
name: GigabitEthernet0/0/0/5
enabled: False
state: down
delay: 20
"""
RETURN = """
commands:
description: The list of configuration mode commands sent to device with transport C(cli)
returned: always (empty list when no commands to send)
type: list
sample:
- interface GigabitEthernet0/0/0/2
- description test-interface
- duplex half
- mtu 512
xml:
description: NetConf rpc xml sent to device with transport C(netconf)
returned: always (empty list when no xml rpc to send)
type: list
version_added: 2.5
sample:
- '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<interface-configurations xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg">
<interface-configuration xc:operation="merge">
<active>act</active>
<interface-name>GigabitEthernet0/0/0/0</interface-name>
<description>test-interface-0</description>
<mtus><mtu>
<owner>GigabitEthernet</owner>
<mtu>512</mtu>
</mtu></mtus>
<ethernet xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-drivers-media-eth-cfg">
<speed>100</speed>
<duplex>half</duplex>
</ethernet>
</interface-configuration>
</interface-configurations></config>'
"""
import re
from time import sleep
from copy import deepcopy
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.iosxr.iosxr import get_config, load_config, build_xml
from ansible.module_utils.network.iosxr.iosxr import run_commands, iosxr_argument_spec, get_oper
from ansible.module_utils.network.iosxr.iosxr import is_netconf, is_cliconf, etree_findall, etree_find
from ansible.module_utils.network.common.utils import conditional, remove_default_spec
def validate_mtu(value):
if value and not 64 <= int(value) <= 65535:
return False, 'mtu must be between 64 and 65535'
return True, None
class ConfigBase(object):
def __init__(self, module):
self._module = module
self._result = {'changed': False, 'warnings': []}
self._want = list()
self._have = list()
def validate_param_values(self, param=None):
for key, value in param.items():
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
rc, msg = validator(value)
if not rc:
self._module.fail_json(msg=msg)
def map_params_to_obj(self):
aggregate = self._module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = self._module.params[key]
self.validate_param_values(item)
d = item.copy()
match = re.match(r"(^[a-z]+)([0-9/]+$)", d['name'], re.I)
if match:
d['owner'] = match.groups()[0]
if d['active'] == 'preconfigure':
d['active'] = 'pre'
else:
d['active'] = 'act'
self._want.append(d)
else:
self.validate_param_values(self._module.params)
params = {
'name': self._module.params['name'],
'description': self._module.params['description'],
'speed': self._module.params['speed'],
'mtu': self._module.params['mtu'],
'duplex': self._module.params['duplex'],
'state': self._module.params['state'],
'delay': self._module.params['delay'],
'tx_rate': self._module.params['tx_rate'],
'rx_rate': self._module.params['rx_rate'],
'enabled': self._module.params['enabled'],
'active': self._module.params['active'],
}
match = re.match(r"(^[a-z]+)([0-9/]+$)", params['name'], re.I)
if match:
params['owner'] = match.groups()[0]
if params['active'] == 'preconfigure':
params['active'] = 'pre'
else:
params['active'] = 'act'
self._want.append(params)
class CliConfiguration(ConfigBase):
def __init__(self, module):
super(CliConfiguration, self).__init__(module)
def parse_shutdown(self, intf_config):
for cfg in intf_config:
match = re.search(r'%s' % 'shutdown', cfg, re.M)
if match:
return True
return False
def parse_config_argument(self, intf_config, arg):
for cfg in intf_config:
match = re.search(r'%s (.+)$' % arg, cfg, re.M)
if match:
return match.group(1)
def search_obj_in_list(self, name):
for obj in self._have:
if obj['name'] == name:
return obj
return None
def map_config_to_obj(self):
data = get_config(self._module, config_filter='interface')
interfaces = data.strip().rstrip('!').split('!')
if not interfaces:
return list()
for interface in interfaces:
intf_config = interface.strip().splitlines()
name = intf_config[0].strip().split()[1]
active = 'act'
if name == 'preconfigure':
active = 'pre'
name = intf_config[0].strip().split()[2]
obj = {
'name': name,
'description': self.parse_config_argument(intf_config, 'description'),
'speed': self.parse_config_argument(intf_config, 'speed'),
'duplex': self.parse_config_argument(intf_config, 'duplex'),
'mtu': self.parse_config_argument(intf_config, 'mtu'),
'enabled': True if not self.parse_shutdown(intf_config) else False,
'active': active,
'state': 'present'
}
self._have.append(obj)
def map_obj_to_commands(self):
commands = list()
args = ('speed', 'description', 'duplex', 'mtu')
for want_item in self._want:
name = want_item['name']
disable = not want_item['enabled']
state = want_item['state']
obj_in_have = self.search_obj_in_list(name)
interface = 'interface ' + name
if state == 'absent' and obj_in_have:
commands.append('no ' + interface)
elif state in ('present', 'up', 'down'):
if obj_in_have:
for item in args:
candidate = want_item.get(item)
running = obj_in_have.get(item)
if candidate != running:
if candidate:
cmd = interface + ' ' + item + ' ' + str(candidate)
commands.append(cmd)
if disable and obj_in_have.get('enabled', False):
commands.append(interface + ' shutdown')
elif not disable and not obj_in_have.get('enabled', False):
commands.append('no ' + interface + ' shutdown')
else:
for item in args:
value = want_item.get(item)
if value:
commands.append(interface + ' ' + item + ' ' + str(value))
if not disable:
commands.append('no ' + interface + ' shutdown')
self._result['commands'] = commands
if commands:
commit = not self._module.check_mode
diff = load_config(self._module, commands, commit=commit)
if diff:
self._result['diff'] = dict(prepared=diff)
self._result['changed'] = True
def check_declarative_intent_params(self):
failed_conditions = []
for want_item in self._want:
want_state = want_item.get('state')
want_tx_rate = want_item.get('tx_rate')
want_rx_rate = want_item.get('rx_rate')
if want_state not in ('up', 'down') and not want_tx_rate and not want_rx_rate:
continue
if self._result['changed']:
sleep(want_item['delay'])
command = 'show interfaces {!s}'.format(want_item['name'])
out = run_commands(self._module, command)[0]
if want_state in ('up', 'down'):
match = re.search(r'%s (\w+)' % 'line protocol is', out, re.M)
have_state = None
if match:
have_state = match.group(1)
if have_state.strip() == 'administratively':
match = re.search(r'%s (\w+)' % 'administratively', out, re.M)
if match:
have_state = match.group(1)
if have_state is None or not conditional(want_state, have_state.strip()):
failed_conditions.append('state ' + 'eq({!s})'.format(want_state))
if want_tx_rate:
match = re.search(r'%s (\d+)' % 'output rate', out, re.M)
have_tx_rate = None
if match:
have_tx_rate = match.group(1)
if have_tx_rate is None or not conditional(want_tx_rate, have_tx_rate.strip(), cast=int):
failed_conditions.append('tx_rate ' + want_tx_rate)
if want_rx_rate:
match = re.search(r'%s (\d+)' % 'input rate', out, re.M)
have_rx_rate = None
if match:
have_rx_rate = match.group(1)
if have_rx_rate is None or not conditional(want_rx_rate, have_rx_rate.strip(), cast=int):
failed_conditions.append('rx_rate ' + want_rx_rate)
if failed_conditions:
msg = 'One or more conditional statements have not been satisfied'
self._module.fail_json(msg=msg, failed_conditions=failed_conditions)
def run(self):
self.map_params_to_obj()
self.map_config_to_obj()
self.map_obj_to_commands()
self.check_declarative_intent_params()
return self._result
class NCConfiguration(ConfigBase):
def __init__(self, module):
super(NCConfiguration, self).__init__(module)
self._intf_meta = collections.OrderedDict()
self._shut_meta = collections.OrderedDict()
self._data_rate_meta = collections.OrderedDict()
self._line_state_meta = collections.OrderedDict()
def map_obj_to_xml_rpc(self):
self._intf_meta.update([
('interface-configuration', {'xpath': 'interface-configurations/interface-configuration', 'tag': True, 'attrib': 'operation'}),
('a:active', {'xpath': 'interface-configurations/interface-configuration/active', 'operation': 'edit'}),
('a:name', {'xpath': 'interface-configurations/interface-configuration/interface-name'}),
('a:description', {'xpath': 'interface-configurations/interface-configuration/description', 'operation': 'edit'}),
('mtus', {'xpath': 'interface-configurations/interface-configuration/mtus', 'tag': True, 'operation': 'edit'}),
('mtu', {'xpath': 'interface-configurations/interface-configuration/mtus/mtu', 'tag': True, 'operation': 'edit'}),
('a:owner', {'xpath': 'interface-configurations/interface-configuration/mtus/mtu/owner', 'operation': 'edit'}),
('a:mtu', {'xpath': 'interface-configurations/interface-configuration/mtus/mtu/mtu', 'operation': 'edit'}),
('CEthernet', {'xpath': 'interface-configurations/interface-configuration/ethernet', 'tag': True, 'operation': 'edit', 'ns': True}),
('a:speed', {'xpath': 'interface-configurations/interface-configuration/ethernet/speed', 'operation': 'edit'}),
('a:duplex', {'xpath': 'interface-configurations/interface-configuration/ethernet/duplex', 'operation': 'edit'}),
])
self._shut_meta.update([
('interface-configuration', {'xpath': 'interface-configurations/interface-configuration', 'tag': True}),
('a:active', {'xpath': 'interface-configurations/interface-configuration/active', 'operation': 'edit'}),
('a:name', {'xpath': 'interface-configurations/interface-configuration/interface-name'}),
('shutdown', {'xpath': 'interface-configurations/interface-configuration/shutdown', 'tag': True, 'operation': 'edit', 'attrib': 'operation'}),
])
state = self._module.params['state']
_get_filter = build_xml('interface-configurations', xmap=self._intf_meta, params=self._want, opcode="filter")
running = get_config(self._module, source='running', config_filter=_get_filter)
intfcfg_nodes = etree_findall(running, 'interface-configuration')
intf_list = set()
shut_list = set()
for item in intfcfg_nodes:
intf_name = etree_find(item, 'interface-name').text
if intf_name is not None:
intf_list.add(intf_name)
if etree_find(item, 'shutdown') is not None:
shut_list.add(intf_name)
intf_params = list()
shut_params = list()
noshut_params = list()
for index, item in enumerate(self._want):
if item['name'] in intf_list:
intf_params.append(item)
if not item['enabled']:
shut_params.append(item)
if item['name'] in shut_list and item['enabled']:
noshut_params.append(item)
opcode = None
if state == 'absent':
if intf_params:
opcode = "delete"
elif state in ('present', 'up', 'down'):
intf_params = self._want
opcode = 'merge'
self._result['xml'] = []
_edit_filter_list = list()
if opcode:
_edit_filter_list.append(build_xml('interface-configurations', xmap=self._intf_meta,
params=intf_params, opcode=opcode))
if opcode == 'merge':
if len(shut_params):
_edit_filter_list.append(build_xml('interface-configurations', xmap=self._shut_meta,
params=shut_params, opcode='merge'))
if len(noshut_params):
_edit_filter_list.append(build_xml('interface-configurations', xmap=self._shut_meta,
params=noshut_params, opcode='delete'))
diff = None
if len(_edit_filter_list):
commit = not self._module.check_mode
diff = load_config(self._module, _edit_filter_list, commit=commit, running=running,
nc_get_filter=_get_filter)
if diff:
if self._module._diff:
self._result['diff'] = dict(prepared=diff)
self._result['xml'] = _edit_filter_list
self._result['changed'] = True
def check_declarative_intent_params(self):
failed_conditions = []
self._data_rate_meta.update([
('interfaces', {'xpath': 'infra-statistics/interfaces', 'tag': True}),
('interface', {'xpath': 'infra-statistics/interfaces/interface', 'tag': True}),
('a:name', {'xpath': 'infra-statistics/interfaces/interface/interface-name'}),
('cache', {'xpath': 'infra-statistics/interfaces/interface/cache', 'tag': True}),
('data-rate', {'xpath': 'infra-statistics/interfaces/interface/cache/data-rate', 'tag': True}),
('input-data-rate', {'xpath': 'infra-statistics/interfaces/interface/cache/data-rate/input-data-rate', 'tag': True}),
('output-data-rate', {'xpath': 'infra-statistics/interfaces/interface/cache/data-rate/output-data-rate', 'tag': True}),
])
self._line_state_meta.update([
('data-nodes', {'xpath': 'interface-properties/data-nodes', 'tag': True}),
('data-node', {'xpath': 'interface-properties/data-nodes/data-node', 'tag': True}),
('system-view', {'xpath': 'interface-properties/data-nodes/data-node/system-view', 'tag': True}),
('interfaces', {'xpath': 'interface-properties/data-nodes/data-node/system-view/interfaces', 'tag': True}),
('interface', {'xpath': 'interface-properties/data-nodes/data-node/system-view/interfaces/interface', 'tag': True}),
('a:name', {'xpath': 'interface-properties/data-nodes/data-node/system-view/interfaces/interface/interface-name'}),
('line-state', {'xpath': 'interface-properties/data-nodes/data-node/system-view/interfaces/interface/line-state', 'tag': True}),
])
_rate_filter = build_xml('infra-statistics', xmap=self._data_rate_meta, params=self._want, opcode="filter")
out = get_oper(self._module, filter=_rate_filter)
data_rate_list = etree_findall(out, 'interface')
data_rate_map = dict()
for item in data_rate_list:
data_rate_map.update({etree_find(item, 'interface-name').text: dict()})
data_rate_map[etree_find(item, 'interface-name').text].update({'input-data-rate': etree_find(item, 'input-data-rate').text,
'output-data-rate': etree_find(item, 'output-data-rate').text})
_line_state_filter = build_xml('interface-properties', xmap=self._line_state_meta, params=self._want, opcode="filter")
out = get_oper(self._module, filter=_line_state_filter)
line_state_list = etree_findall(out, 'interface')
line_state_map = dict()
for item in line_state_list:
line_state_map.update({etree_find(item, 'interface-name').text: etree_find(item, 'line-state').text})
for want_item in self._want:
want_state = want_item.get('state')
want_tx_rate = want_item.get('tx_rate')
want_rx_rate = want_item.get('rx_rate')
if want_state not in ('up', 'down') and not want_tx_rate and not want_rx_rate:
continue
if self._result['changed']:
sleep(want_item['delay'])
if want_state in ('up', 'down'):
if want_state not in line_state_map[want_item['name']]:
failed_conditions.append('state ' + 'eq({!s})'.format(want_state))
if want_tx_rate:
if want_tx_rate != data_rate_map[want_item['name']]['output-data-rate']:
failed_conditions.append('tx_rate ' + want_tx_rate)
if want_rx_rate:
if want_rx_rate != data_rate_map[want_item['name']]['input-data-rate']:
failed_conditions.append('rx_rate ' + want_rx_rate)
if failed_conditions:
msg = 'One or more conditional statements have not been satisfied'
self._module.fail_json(msg=msg, failed_conditions=failed_conditions)
def run(self):
self.map_params_to_obj()
self.map_obj_to_xml_rpc()
self.check_declarative_intent_params()
return self._result
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(type='str'),
description=dict(type='str'),
speed=dict(choices=['10', '100', '1000']),
mtu=dict(),
duplex=dict(choices=['full', 'half']),
enabled=dict(default=True, type='bool'),
active=dict(default='active', type='str', choices=['active', 'preconfigure']),
tx_rate=dict(),
rx_rate=dict(),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent', 'up', 'down'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(iosxr_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
config_object = None
if is_cliconf(module):
module.deprecate("cli support for 'iosxr_interface' is deprecated. Use transport netconf instead",
version='2.9')
config_object = CliConfiguration(module)
elif is_netconf(module):
if module.params['active'] == 'preconfigure':
module.fail_json(msg="Physical interface pre-configuration is not supported with transport 'netconf'")
config_object = NCConfiguration(module)
result = {}
if config_object:
result = config_object.run()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
oracal/cineworld | test/test.py | 1 | 3229 | #!/usr/bin/env python
'''
Created on 17 Jul 2011
@author: oracal
'''
from cineworld import CW, cineworld
from mock import Mock
import unittest
try:
from urlparse import urlparse, parse_qs
except ImportError:
# For older versions of Python.
from urlparse import urlparse
from cgi import parse_qs
def set_up():
cineworld.urlopen = Mock()
films = {u'films':[{u'edi': 39523, u'title': u'3D - Harry Potter And The Deathly Hallows Pt 2'}, {u'edi': 40501, u'title': u'3D - Transformers: Dark Of The Moon'}]}
cineworld.json.loads = Mock(return_value=films)
cineworld.API_KEY = 'my_api_key'
class CWClassInitTest(unittest.TestCase):
def setUp(self):
set_up()
def test_uninitialized_api_key(self):
self.assertEqual(CW().api_key, 'my_api_key')
def test_initialized_api_key(self):
self.assertEqual(CW('called_api_key').api_key, 'called_api_key')
class GetListTest(unittest.TestCase):
def setUp(self):
set_up()
def test_get_list_path(self):
CW().get_list('films','some_url')
call = cineworld.urlopen.call_args[0][0]
parsed_call = urlparse(call)
self.assertEqual(parsed_call.path,'some_url')
def test_get_list_arguments(self):
CW().get_list('films','some_url', argument1 = 'argument1', argument2 = 'argument2')
call = cineworld.urlopen.call_args[0][0]
parsed_call = urlparse(call)
self.assertEqual(parse_qs(parsed_call.query)['argument1'],['argument1'])
self.assertEqual(parse_qs(parsed_call.query)['argument2'],['argument2'])
class FilmListTest(unittest.TestCase):
def setUp(self):
set_up()
def test_film_list_path(self):
CW().get_film_list()
call = cineworld.urlopen.call_args[0][0]
parsed_call = urlparse(call)
self.assertEqual(parsed_call.path,'/api/quickbook/films')
def test_film_list_cache(self):
cw = CW()
cw.get_film_list()
assert cw.film_list
def test_film_list_return(self):
self.assertEqual(CW().get_film_list(),[{u'edi': 39523, u'title': u'3D - Harry Potter And The Deathly Hallows Pt 2'}, {u'edi': 40501, u'title': u'3D - Transformers: Dark Of The Moon'}])
class FilmSearchTest(unittest.TestCase):
def setUp(self):
set_up()
def test_search_accuracy_1(self):
self.assertEquals(CW().film_search('harry potter and the deathly hallows'), [{u'edi': 39523, u'title': u'3D - Harry Potter And The Deathly Hallows Pt 2', u'strength':95}])
def test_search_accuracy_2(self):
self.assertEquals(CW().film_search('harry potter'), [{u'edi': 39523, u'title': u'3D - Harry Potter And The Deathly Hallows Pt 2', u'strength':90}])
def test_search_accuracy_3(self):
self.assertEquals(CW().film_search('horry putter'), [])
def test_search_accuracy_4(self):
self.assertEquals(CW().film_search('dark moon'), [{u'edi': 40501, u'title': u'3D - Transformers: Dark Of The Moon', u'strength':85}])
def test_search_accuracy_5(self):
self.assertEquals(CW().film_search('train spotting'), [])
if __name__ == "__main__":
unittest.main() | mit |
ScreamingUdder/mantid | scripts/test/SANS/algorithm_detail/crop_helper_test.py | 3 | 1754 | from __future__ import (absolute_import, division, print_function)
import unittest
import mantid
from sans.algorithm_detail.crop_helper import get_component_name
from sans.common.enums import DetectorType
from sans.common.constants import EMPTY_NAME
from sans.common.general_functions import create_unmanaged_algorithm
from mantid.api import FileFinder
class CropHelperTest(unittest.TestCase):
def _get_workspace(self, file_name):
full_file_name = FileFinder.findRuns(file_name)[0]
load_name = "Load"
load_options = {"Filename": full_file_name,
"OutputWorkspace": EMPTY_NAME}
load_alg = create_unmanaged_algorithm(load_name, **load_options)
load_alg.execute()
return load_alg.getProperty("OutputWorkspace").value
def test_that_can_get_component_name_for_sans2d(self):
workspace = self._get_workspace("SANS2D00022024")
self.assertTrue("front-detector" == get_component_name(workspace, DetectorType.HAB))
self.assertTrue("rear-detector" == get_component_name(workspace, DetectorType.LAB))
def test_that_can_get_component_name_for_loq(self):
workspace = self._get_workspace("LOQ48127")
self.assertTrue("HAB" == get_component_name(workspace, DetectorType.HAB))
self.assertTrue("main-detector-bank" == get_component_name(workspace, DetectorType.LAB))
def test_that_can_get_component_name_for_larmor(self):
workspace = self._get_workspace("LARMOR00002260")
self.assertTrue("DetectorBench" == get_component_name(workspace, DetectorType.HAB))
def test_that_can_get_component_name_for_zoom(self):
# TODO when test data is available
pass
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ZacariasBendeck/youtube-dl | youtube_dl/extractor/discovery.py | 104 | 2354 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
)
from ..compat import compat_str
class DiscoveryIE(InfoExtractor):
_VALID_URL = r'http://www\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9_\-]*)(?:\.htm)?'
_TESTS = [{
'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
'info_dict': {
'id': '20769',
'ext': 'mp4',
'title': 'Mission Impossible Outtakes',
'description': ('Watch Jamie Hyneman and Adam Savage practice being'
' each other -- to the point of confusing Jamie\'s dog -- and '
'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
' back.'),
'duration': 156,
'timestamp': 1303099200,
'upload_date': '20110418',
},
'params': {
'skip_download': True, # requires ffmpeg
}
}, {
'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mythbusters-the-simpsons',
'info_dict': {
'id': 'mythbusters-the-simpsons',
'title': 'MythBusters: The Simpsons',
},
'playlist_count': 9,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(url + '?flat=1', video_id)
video_title = info.get('playlist_title') or info.get('video_title')
entries = [{
'id': compat_str(video_info['id']),
'formats': self._extract_m3u8_formats(
video_info['src'], video_id, ext='mp4',
note='Download m3u8 information for video %d' % (idx + 1)),
'title': video_info['title'],
'description': video_info.get('description'),
'duration': parse_duration(video_info.get('video_length')),
'webpage_url': video_info.get('href'),
'thumbnail': video_info.get('thumbnailURL'),
'alt_title': video_info.get('secondary_title'),
'timestamp': parse_iso8601(video_info.get('publishedDate')),
} for idx, video_info in enumerate(info['playlist'])]
return self.playlist_result(entries, video_id, video_title)
| unlicense |
sfrenza/test-for-bot | venv/Lib/site-packages/nltk/stem/snowball.py | 2 | 145970 | # -*- coding: utf-8 -*-
#
# Natural Language Toolkit: Snowball Stemmer
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Peter Michael Stahl <pemistahl@gmail.com>
# Peter Ljunglof <peter.ljunglof@heatherleaf.se> (revisions)
# Algorithms: Dr Martin Porter <martin@tartarus.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Snowball stemmers
This module provides a port of the Snowball stemmers
developed by Martin Porter.
There is also a demo function: `snowball.demo()`.
"""
from __future__ import unicode_literals, print_function
from six.moves import input
from nltk import compat
from nltk.corpus import stopwords
from nltk.stem import porter
from nltk.stem.util import suffix_replace
from nltk.stem.api import StemmerI
class SnowballStemmer(StemmerI):
"""
Snowball Stemmer
The following languages are supported:
Danish, Dutch, English, Finnish, French, German,
Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian,
Spanish and Swedish.
The algorithm for English is documented here:
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
The algorithms have been developed by Martin Porter.
These stemmers are called Snowball, because Porter created
a programming language with this name for creating
new stemming algorithms. There is more information available
at http://snowball.tartarus.org/
The stemmer is invoked as shown below:
>>> from nltk.stem import SnowballStemmer
>>> print(" ".join(SnowballStemmer.languages)) # See which languages are supported
danish dutch english finnish french german hungarian
italian norwegian porter portuguese romanian russian
spanish swedish
>>> stemmer = SnowballStemmer("german") # Choose a language
>>> stemmer.stem("Autobahnen") # Stem a word
'autobahn'
Invoking the stemmers that way is useful if you do not know the
language to be stemmed at runtime. Alternatively, if you already know
the language, then you can invoke the language specific stemmer directly:
>>> from nltk.stem.snowball import GermanStemmer
>>> stemmer = GermanStemmer()
>>> stemmer.stem("Autobahnen")
'autobahn'
:param language: The language whose subclass is instantiated.
:type language: str or unicode
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
:raise ValueError: If there is no stemmer for the specified
language, a ValueError is raised.
"""
languages = ("danish", "dutch", "english", "finnish", "french", "german",
"hungarian", "italian", "norwegian", "porter", "portuguese",
"romanian", "russian", "spanish", "swedish")
def __init__(self, language, ignore_stopwords=False):
if language not in self.languages:
raise ValueError("The language '{0}' is not supported.".format(language))
stemmerclass = globals()[language.capitalize() + "Stemmer"]
self.stemmer = stemmerclass(ignore_stopwords)
self.stem = self.stemmer.stem
self.stopwords = self.stemmer.stopwords
def stem(self, token):
return self.stemmer.stem(self, token)
@compat.python_2_unicode_compatible
class _LanguageSpecificStemmer(StemmerI):
"""
This helper subclass offers the possibility
to invoke a specific stemmer directly.
This is useful if you already know the language to be stemmed at runtime.
Create an instance of the Snowball stemmer.
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
"""
def __init__(self, ignore_stopwords=False):
# The language is the name of the class, minus the final "Stemmer".
language = type(self).__name__.lower()
if language.endswith("stemmer"):
language = language[:-7]
self.stopwords = set()
if ignore_stopwords:
try:
for word in stopwords.words(language):
self.stopwords.add(word)
except IOError:
raise ValueError("{!r} has no list of stopwords. Please set"
" 'ignore_stopwords' to 'False'.".format(self))
def __repr__(self):
"""
Print out the string representation of the respective class.
"""
return "<{0}>".format(type(self).__name__)
class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer):
"""
A word stemmer based on the original Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of the module
nltk.stem.porter for more information.
"""
def __init__(self, ignore_stopwords=False):
_LanguageSpecificStemmer.__init__(self, ignore_stopwords)
porter.PorterStemmer.__init__(self)
class _ScandinavianStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) >= 3:
r1 = word[i+1:]
else:
return word
break
return r1
class _StandardStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = ""
r2 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = ""
if len(word) >= 2:
if word[1] not in vowels:
for i in range(2, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
elif word[0] in vowels and word[1] in vowels:
for i in range(2, len(word)):
if word[i] not in vowels:
rv = word[i+1:]
break
else:
rv = word[3:]
return rv
class DanishStemmer(_ScandinavianStemmer):
"""
The Danish Snowball stemmer.
:cvar __vowels: The Danish vowels.
:type __vowels: unicode
:cvar __consonants: The Danish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Danish double consonants.
:type __double_consonants: tuple
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Danish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/danish/stemmer.html
"""
# The language's vowels and other important characters are defined.
__vowels = "aeiouy\xE6\xE5\xF8"
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__s_ending = "abcdfghjklmnoprtvyz\xE5"
# The different suffixes, divided into the algorithm's steps
# and organized by length, are listed in tuples.
__step1_suffixes = ("erendes", "erende", "hedens", "ethed",
"erede", "heden", "heder", "endes",
"ernes", "erens", "erets", "ered",
"ende", "erne", "eren", "erer", "heds",
"enes", "eres", "eret", "hed", "ene", "ere",
"ens", "ers", "ets", "en", "er", "es", "et",
"e", "s")
__step2_suffixes = ("gd", "dt", "gt", "kt")
__step3_suffixes = ("elig", "l\xF8st", "lig", "els", "ig")
def stem(self, word):
"""
Stem a Danish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
# Every word is put into lower case for normalization.
word = word.lower()
if word in self.stopwords:
return word
# After this, the required regions are generated
# by the respective helper method.
r1 = self._r1_scandinavian(word, self.__vowels)
# Then the actual stemming process starts.
# Every new step is explicitly indicated
# according to the descriptions on the Snowball website.
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
if r1.endswith("igst"):
word = word[:-2]
r1 = r1[:-2]
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "l\xF8st":
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith(self.__step2_suffixes):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 4: Undouble
for double_cons in self.__double_consonants:
if word.endswith(double_cons) and len(word) > 3:
word = word[:-1]
break
return word
class DutchStemmer(_StandardStemmer):
"""
The Dutch Snowball stemmer.
:cvar __vowels: The Dutch vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm.
:type __step3b_suffixes: tuple
:note: A detailed description of the Dutch
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/dutch/stemmer.html
"""
__vowels = "aeiouy\xE8"
__step1_suffixes = ("heden", "ene", "en", "se", "s")
__step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig")
def stem(self, word):
"""
Stem a Dutch word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step2_success = False
# Vowel accents are removed.
word = (word.replace("\xE4", "a").replace("\xE1", "a")
.replace("\xEB", "e").replace("\xE9", "e")
.replace("\xED", "i").replace("\xEF", "i")
.replace("\xF6", "o").replace("\xF3", "o")
.replace("\xFC", "u").replace("\xFA", "u"))
# An initial 'y', a 'y' after a vowel,
# and an 'i' between self.__vowels is put into upper case.
# As from now these are treated as consonants.
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
for i in range(1, len(word)-1):
if (word[i-1] in self.__vowels and word[i] == "i" and
word[i+1] in self.__vowels):
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "heden":
word = suffix_replace(word, suffix, "heid")
r1 = suffix_replace(r1, suffix, "heid")
if r2.endswith("heden"):
r2 = suffix_replace(r2, suffix, "heid")
elif (suffix in ("ene", "en") and
not word.endswith("heden") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-3:-len(suffix)] != "gem"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif (suffix in ("se", "s") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-1] != "j"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
if r1.endswith("e") and word[-2] not in self.__vowels:
step2_success = True
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3a
if r2.endswith("heid") and word[-5] != "c":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
if (r1.endswith("en") and word[-3] not in self.__vowels and
word[-5:-2] != "gem"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3b: Derivational suffixes
for suffix in self.__step3b_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ing"):
word = word[:-3]
r2 = r2[:-3]
if r2.endswith("ig") and word[-3] != "e":
word = word[:-2]
else:
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "ig" and word[-3] != "e":
word = word[:-2]
elif suffix == "lijk":
word = word[:-4]
r1 = r1[:-4]
if r1.endswith("e") and word[-2] not in self.__vowels:
word = word[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "baar":
word = word[:-4]
elif suffix == "bar" and step2_success:
word = word[:-3]
break
# STEP 4: Undouble vowel
if len(word) >= 4:
if word[-1] not in self.__vowels and word[-1] != "I":
if word[-3:-1] in ("aa", "ee", "oo", "uu"):
if word[-4] not in self.__vowels:
word = "".join((word[:-3], word[-3], word[-1]))
# All occurrences of 'I' and 'Y' are put back into lower case.
word = word.replace("I", "i").replace("Y", "y")
return word
class EnglishStemmer(_StandardStemmer):
"""
The English Snowball stemmer.
:cvar __vowels: The English vowels.
:type __vowels: unicode
:cvar __double_consonants: The English double consonants.
:type __double_consonants: tuple
:cvar __li_ending: Letters that may directly appear before a word final 'li'.
:type __li_ending: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm.
:type __step1a_suffixes: tuple
:cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm.
:type __step1b_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __special_words: A dictionary containing words
which have to be stemmed specially.
:type __special_words: dict
:note: A detailed description of the English
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/english/stemmer.html
"""
__vowels = "aeiouy"
__double_consonants = ("bb", "dd", "ff", "gg", "mm", "nn",
"pp", "rr", "tt")
__li_ending = "cdeghkmnrt"
__step0_suffixes = ("'s'", "'s", "'")
__step1a_suffixes = ("sses", "ied", "ies", "us", "ss", "s")
__step1b_suffixes = ("eedly", "ingly", "edly", "eed", "ing", "ed")
__step2_suffixes = ('ization', 'ational', 'fulness', 'ousness',
'iveness', 'tional', 'biliti', 'lessli',
'entli', 'ation', 'alism', 'aliti', 'ousli',
'iviti', 'fulli', 'enci', 'anci', 'abli',
'izer', 'ator', 'alli', 'bli', 'ogi', 'li')
__step3_suffixes = ('ational', 'tional', 'alize', 'icate', 'iciti',
'ative', 'ical', 'ness', 'ful')
__step4_suffixes = ('ement', 'ance', 'ence', 'able', 'ible', 'ment',
'ant', 'ent', 'ism', 'ate', 'iti', 'ous',
'ive', 'ize', 'ion', 'al', 'er', 'ic')
__step5_suffixes = ("e", "l")
__special_words = {"skis" : "ski",
"skies" : "sky",
"dying" : "die",
"lying" : "lie",
"tying" : "tie",
"idly" : "idl",
"gently" : "gentl",
"ugly" : "ugli",
"early" : "earli",
"only" : "onli",
"singly" : "singl",
"sky" : "sky",
"news" : "news",
"howe" : "howe",
"atlas" : "atlas",
"cosmos" : "cosmos",
"bias" : "bias",
"andes" : "andes",
"inning" : "inning",
"innings" : "inning",
"outing" : "outing",
"outings" : "outing",
"canning" : "canning",
"cannings" : "canning",
"herring" : "herring",
"herrings" : "herring",
"earring" : "earring",
"earrings" : "earring",
"proceed" : "proceed",
"proceeds" : "proceed",
"proceeded" : "proceed",
"proceeding" : "proceed",
"exceed" : "exceed",
"exceeds" : "exceed",
"exceeded" : "exceed",
"exceeding" : "exceed",
"succeed" : "succeed",
"succeeds" : "succeed",
"succeeded" : "succeed",
"succeeding" : "succeed"}
def stem(self, word):
"""
Stem an English word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords or len(word) <= 2:
return word
elif word in self.__special_words:
return self.__special_words[word]
# Map the different apostrophe characters to a single consistent one
word = (word.replace("\u2019", "\x27")
.replace("\u2018", "\x27")
.replace("\u201B", "\x27"))
if word.startswith("\x27"):
word = word[1:]
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
step1a_vowel_found = False
step1b_vowel_found = False
r1 = ""
r2 = ""
if word.startswith(("gener", "commun", "arsen")):
if word.startswith(("gener", "arsen")):
r1 = word[5:]
else:
r1 = word[6:]
for i in range(1, len(r1)):
if r1[i] not in self.__vowels and r1[i-1] in self.__vowels:
r2 = r1[i+1:]
break
else:
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 0
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 1a
for suffix in self.__step1a_suffixes:
if word.endswith(suffix):
if suffix == "sses":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("ied", "ies"):
if len(word[:-len(suffix)]) > 1:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix == "s":
for letter in word[:-2]:
if letter in self.__vowels:
step1a_vowel_found = True
break
if step1a_vowel_found:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
break
# STEP 1b
for suffix in self.__step1b_suffixes:
if word.endswith(suffix):
if suffix in ("eed", "eedly"):
if r1.endswith(suffix):
word = suffix_replace(word, suffix, "ee")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ee")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ee")
else:
r2 = ""
else:
for letter in word[:-len(suffix)]:
if letter in self.__vowels:
step1b_vowel_found = True
break
if step1b_vowel_found:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("at", "bl", "iz")):
word = "".join((word, "e"))
r1 = "".join((r1, "e"))
if len(word) > 5 or len(r1) >=3:
r2 = "".join((r2, "e"))
elif word.endswith(self.__double_consonants):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif ((r1 == "" and len(word) >= 3 and
word[-1] not in self.__vowels and
word[-1] not in "wxY" and
word[-2] in self.__vowels and
word[-3] not in self.__vowels)
or
(r1 == "" and len(word) == 2 and
word[0] in self.__vowels and
word[1] not in self.__vowels)):
word = "".join((word, "e"))
if len(r1) > 0:
r1 = "".join((r1, "e"))
if len(r2) > 0:
r2 = "".join((r2, "e"))
break
# STEP 1c
if len(word) > 2 and word[-1] in "yY" and word[-2] not in self.__vowels:
word = "".join((word[:-1], "i"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "i"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "i"))
else:
r2 = ""
# STEP 2
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("enci", "anci", "abli"):
word = "".join((word[:-1], "e"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "e"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "e"))
else:
r2 = ""
elif suffix == "entli":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("izer", "ization"):
word = suffix_replace(word, suffix, "ize")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ize")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ize")
else:
r2 = ""
elif suffix in ("ational", "ation", "ator"):
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = "e"
elif suffix in ("alism", "aliti", "alli"):
word = suffix_replace(word, suffix, "al")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "al")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "al")
else:
r2 = ""
elif suffix == "fulness":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
elif suffix in ("ousli", "ousness"):
word = suffix_replace(word, suffix, "ous")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ous")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ous")
else:
r2 = ""
elif suffix in ("iveness", "iviti"):
word = suffix_replace(word, suffix, "ive")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ive")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ive")
else:
r2 = "e"
elif suffix in ("biliti", "bli"):
word = suffix_replace(word, suffix, "ble")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ble")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ble")
else:
r2 = ""
elif suffix == "ogi" and word[-4] == "l":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix in ("fulli", "lessli"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "li" and word[-3] in self.__li_ending:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
break
# STEP 3
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ational":
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = ""
elif suffix == "alize":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
elif suffix in ("icate", "iciti", "ical"):
word = suffix_replace(word, suffix, "ic")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ic")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ic")
else:
r2 = ""
elif suffix in ("ful", "ness"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
elif suffix == "ative" and r2.endswith(suffix):
word = word[:-5]
r1 = r1[:-5]
r2 = r2[:-5]
break
# STEP 4
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if r2.endswith(suffix):
if suffix == "ion":
if word[-4] in "st":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5
if r2.endswith("l") and word[-2] == "l":
word = word[:-1]
elif r2.endswith("e"):
word = word[:-1]
elif r1.endswith("e"):
if len(word) >= 4 and (word[-2] in self.__vowels or
word[-2] in "wxY" or
word[-3] not in self.__vowels or
word[-4] in self.__vowels):
word = word[:-1]
word = word.replace("Y", "y")
return word
class FinnishStemmer(_StandardStemmer):
"""
The Finnish Snowball stemmer.
:cvar __vowels: The Finnish vowels.
:type __vowels: unicode
:cvar __restricted_vowels: A subset of the Finnish vowels.
:type __restricted_vowels: unicode
:cvar __long_vowels: The Finnish vowels in their long forms.
:type __long_vowels: tuple
:cvar __consonants: The Finnish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Finnish double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Finnish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/finnish/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6"
__restricted_vowels = "aeiou\xE4\xF6"
__long_vowels = ("aa", "ee", "ii", "oo", "uu", "\xE4\xE4",
"\xF6\xF6")
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__step1_suffixes = ('kaan', 'k\xE4\xE4n', 'sti', 'kin', 'han',
'h\xE4n', 'ko', 'k\xF6', 'pa', 'p\xE4')
__step2_suffixes = ('nsa', 'ns\xE4', 'mme', 'nne', 'si', 'ni',
'an', '\xE4n', 'en')
__step3_suffixes = ('siin', 'tten', 'seen', 'han', 'hen', 'hin',
'hon', 'h\xE4n', 'h\xF6n', 'den', 'tta',
'tt\xE4', 'ssa', 'ss\xE4', 'sta',
'st\xE4', 'lla', 'll\xE4', 'lta',
'lt\xE4', 'lle', 'ksi', 'ine', 'ta',
't\xE4', 'na', 'n\xE4', 'a', '\xE4',
'n')
__step4_suffixes = ('impi', 'impa', 'imp\xE4', 'immi', 'imma',
'imm\xE4', 'mpi', 'mpa', 'mp\xE4', 'mmi',
'mma', 'mm\xE4', 'eja', 'ej\xE4')
def stem(self, word):
"""
Stem a Finnish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step3_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 1: Particles etc.
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "sti":
if suffix in r2:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
if word[-len(suffix)-1] in "ntaeiouy\xE4\xF6":
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2: Possessives
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "si":
if word[-3] != "k":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ni":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith("kse"):
word = suffix_replace(word, "kse", "ksi")
if r1.endswith("kse"):
r1 = suffix_replace(r1, "kse", "ksi")
if r2.endswith("kse"):
r2 = suffix_replace(r2, "kse", "ksi")
elif suffix == "an":
if (word[-4:-2] in ("ta", "na") or
word[-5:-2] in ("ssa", "sta", "lla", "lta")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "\xE4n":
if (word[-4:-2] in ("t\xE4", "n\xE4") or
word[-5:-2] in ("ss\xE4", "st\xE4",
"ll\xE4", "lt\xE4")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "en":
if word[-5:-2] in ("lle", "ine"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
break
# STEP 3: Cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("han", "hen", "hin", "hon", "h\xE4n",
"h\xF6n"):
if ((suffix == "han" and word[-4] == "a") or
(suffix == "hen" and word[-4] == "e") or
(suffix == "hin" and word[-4] == "i") or
(suffix == "hon" and word[-4] == "o") or
(suffix == "h\xE4n" and word[-4] == "\xE4") or
(suffix == "h\xF6n" and word[-4] == "\xF6")):
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix in ("siin", "den", "tten"):
if (word[-len(suffix)-1] == "i" and
word[-len(suffix)-2] in self.__restricted_vowels):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
else:
continue
elif suffix == "seen":
if word[-6:-4] in self.__long_vowels:
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
step3_success = True
else:
continue
elif suffix in ("a", "\xE4"):
if word[-2] in self.__vowels and word[-3] in self.__consonants:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
elif suffix in ("tta", "tt\xE4"):
if word[-4] == "e":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix == "n":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
if word[-2:] == "ie" or word[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
break
# STEP 4: Other endings
for suffix in self.__step4_suffixes:
if r2.endswith(suffix):
if suffix in ("mpi", "mpa", "mp\xE4", "mmi", "mma",
"mm\xE4"):
if word[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5: Plurals
if step3_success and len(r1) >= 1 and r1[-1] in "ij":
word = word[:-1]
r1 = r1[:-1]
elif (not step3_success and len(r1) >= 2 and
r1[-1] == "t" and r1[-2] in self.__vowels):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if r2.endswith("imma"):
word = word[:-4]
r1 = r1[:-4]
elif r2.endswith("mma") and r2[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
# STEP 6: Tidying up
if r1[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
if (len(r1) >= 2 and r1[-2] in self.__consonants and
r1[-1] in "a\xE4ei"):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith(("oj", "uj")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith("jo"):
word = word[:-1]
r1 = r1[:-1]
# If the word ends with a double consonant
# followed by zero or more vowels, the last consonant is removed.
for i in range(1, len(word)):
if word[-i] in self.__vowels:
continue
else:
if i == 1:
if word[-i-1:] in self.__double_consonants:
word = word[:-1]
else:
if word[-i-1:-i+1] in self.__double_consonants:
word = "".join((word[:-i], word[-i+1:]))
break
return word
class FrenchStemmer(_StandardStemmer):
"""
The French Snowball stemmer.
:cvar __vowels: The French vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the French
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/french/stemmer.html
"""
__vowels = "aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9"
__step1_suffixes = ('issements', 'issement', 'atrices', 'atrice',
'ateurs', 'ations', 'logies', 'usions',
'utions', 'ements', 'amment', 'emment',
'ances', 'iqUes', 'ismes', 'ables', 'istes',
'ateur', 'ation', 'logie', 'usion', 'ution',
'ences', 'ement', 'euses', 'ments', 'ance',
'iqUe', 'isme', 'able', 'iste', 'ence',
'it\xE9s', 'ives', 'eaux', 'euse', 'ment',
'eux', 'it\xE9', 'ive', 'ifs', 'aux', 'if')
__step2a_suffixes = ('issaIent', 'issantes', 'iraIent', 'issante',
'issants', 'issions', 'irions', 'issais',
'issait', 'issant', 'issent', 'issiez', 'issons',
'irais', 'irait', 'irent', 'iriez', 'irons',
'iront', 'isses', 'issez', '\xEEmes',
'\xEEtes', 'irai', 'iras', 'irez', 'isse',
'ies', 'ira', '\xEEt', 'ie', 'ir', 'is',
'it', 'i')
__step2b_suffixes = ('eraIent', 'assions', 'erions', 'assent',
'assiez', '\xE8rent', 'erais', 'erait',
'eriez', 'erons', 'eront', 'aIent', 'antes',
'asses', 'ions', 'erai', 'eras', 'erez',
'\xE2mes', '\xE2tes', 'ante', 'ants',
'asse', '\xE9es', 'era', 'iez', 'ais',
'ait', 'ant', '\xE9e', '\xE9s', 'er',
'ez', '\xE2t', 'ai', 'as', '\xE9', 'a')
__step4_suffixes = ('i\xE8re', 'I\xE8re', 'ion', 'ier', 'Ier',
'e', '\xEB')
def stem(self, word):
"""
Stem a French word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
rv_ending_found = False
step2a_success = False
step2b_success = False
# Every occurrence of 'u' after 'q' is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
# Every occurrence of 'y' preceded or
# followed by a vowel is also put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
if word[i-1] in self.__vowels or word[i+1] in self.__vowels:
if word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self.__rv_french(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "eaux":
word = word[:-1]
step1_success = True
elif suffix in ("euse", "euses"):
if suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in r1:
word = suffix_replace(word, suffix, "eux")
step1_success = True
elif suffix in ("ement", "ements") and suffix in rv:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "iv" and "iv" in r2:
word = word[:-2]
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
elif word[-3:] == "eus":
if "eus" in r2:
word = word[:-3]
elif "eus" in r1:
word = "".join((word[:-1], "x"))
elif word[-3:] in ("abl", "iqU"):
if "abl" in r2 or "iqU" in r2:
word = word[:-3]
elif word[-3:] in ("i\xE8r", "I\xE8r"):
if "i\xE8r" in rv or "I\xE8r" in rv:
word = "".join((word[:-3], "i"))
elif suffix == "amment" and suffix in rv:
word = suffix_replace(word, "amment", "ant")
rv = suffix_replace(rv, "amment", "ant")
rv_ending_found = True
elif suffix == "emment" and suffix in rv:
word = suffix_replace(word, "emment", "ent")
rv_ending_found = True
elif (suffix in ("ment", "ments") and suffix in rv and
not rv.startswith(suffix) and
rv[rv.rindex(suffix)-1] in self.__vowels):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
rv_ending_found = True
elif suffix == "aux" and suffix in r1:
word = "".join((word[:-2], "l"))
step1_success = True
elif (suffix in ("issement", "issements") and suffix in r1
and word[-len(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("ance", "iqUe", "isme", "able", "iste",
"eux", "ances", "iqUes", "ismes",
"ables", "istes") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("atrice", "ateur", "ation", "atrices",
"ateurs", "ations") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif suffix in ("logie", "logies") and suffix in r2:
word = suffix_replace(word, suffix, "log")
step1_success = True
elif (suffix in ("usion", "ution", "usions", "utions") and
suffix in r2):
word = suffix_replace(word, suffix, "u")
step1_success = True
elif suffix in ("ence", "ences") and suffix in r2:
word = suffix_replace(word, suffix, "ent")
step1_success = True
elif suffix in ("it\xE9", "it\xE9s") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-4:] == "abil":
if "abil" in r2:
word = word[:-4]
else:
word = "".join((word[:-2], "l"))
elif word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif word[-2:] == "iv":
if "iv" in r2:
word = word[:-2]
elif (suffix in ("if", "ive", "ifs", "ives") and
suffix in r2):
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
break
# STEP 2a: Verb suffixes beginning 'i'
if not step1_success or rv_ending_found:
for suffix in self.__step2a_suffixes:
if word.endswith(suffix):
if (suffix in rv and len(rv) > len(suffix) and
rv[rv.rindex(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step2a_success = True
break
# STEP 2b: Other verb suffixes
if not step2a_success:
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix == "ions" and "ions" in r2:
word = word[:-4]
step2b_success = True
elif suffix in ('eraIent', 'erions', '\xE8rent',
'erais', 'erait', 'eriez',
'erons', 'eront', 'erai', 'eras',
'erez', '\xE9es', 'era', 'iez',
'\xE9e', '\xE9s', 'er', 'ez',
'\xE9'):
word = word[:-len(suffix)]
step2b_success = True
elif suffix in ('assions', 'assent', 'assiez',
'aIent', 'antes', 'asses',
'\xE2mes', '\xE2tes', 'ante',
'ants', 'asse', 'ais', 'ait',
'ant', '\xE2t', 'ai', 'as',
'a'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
step2b_success = True
if rv.endswith("e"):
word = word[:-1]
break
# STEP 3
if step1_success or step2a_success or step2b_success:
if word[-1] == "Y":
word = "".join((word[:-1], "i"))
elif word[-1] == "\xE7":
word = "".join((word[:-1], "c"))
# STEP 4: Residual suffixes
else:
if (len(word) >= 2 and word[-1] == "s" and
word[-2] not in "aiou\xE8s"):
word = word[:-1]
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if suffix in rv:
if (suffix == "ion" and suffix in r2 and
rv[-4] in "st"):
word = word[:-3]
elif suffix in ("ier", "i\xE8re", "Ier",
"I\xE8re"):
word = suffix_replace(word, suffix, "i")
elif suffix == "e":
word = word[:-1]
elif suffix == "\xEB" and word[-3:-1] == "gu":
word = word[:-1]
break
# STEP 5: Undouble
if word.endswith(("enn", "onn", "ett", "ell", "eill")):
word = word[:-1]
# STEP 6: Un-accent
for i in range(1, len(word)):
if word[-i] not in self.__vowels:
i += 1
else:
if i != 1 and word[-i] in ("\xE9", "\xE8"):
word = "".join((word[:-i], "e", word[-i+1:]))
break
word = (word.replace("I", "i")
.replace("U", "u")
.replace("Y", "y"))
return word
def __rv_french(self, word, vowels):
"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = ""
if len(word) >= 2:
if (word.startswith(("par", "col", "tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in range(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv
class GermanStemmer(_StandardStemmer):
"""
The German Snowball stemmer.
:cvar __vowels: The German vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __st_ending: Letter that may directly appear before a word final 'st'.
:type __st_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the German
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/german/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6\xFC"
__s_ending = "bdfghklmnrt"
__st_ending = "bdfghklmnt"
__step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s")
__step2_suffixes = ("est", "en", "er", "st")
__step3_suffixes = ("isch", "lich", "heit", "keit",
"end", "ung", "ig", "ik")
def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
word = word.replace("\xDF", "ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in ("en", "es", "e") and
word[-len(suffix)-4:-len(suffix)] == "niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ung"):
if ("ig" in r2[-len(suffix)-2:-len(suffix)] and
"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in ("ig", "ik", "isch") and
"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in ("lich", "heit"):
if ("er" in r1[-len(suffix)-2:-len(suffix)] or
"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == "keit":
if "lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif "ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace("\xE4", "a").replace("\xF6", "o")
.replace("\xFC", "u").replace("U", "u")
.replace("Y", "y"))
return word
class HungarianStemmer(_LanguageSpecificStemmer):
"""
The Hungarian Snowball stemmer.
:cvar __vowels: The Hungarian vowels.
:type __vowels: unicode
:cvar __digraphs: The Hungarian digraphs.
:type __digraphs: tuple
:cvar __double_consonants: The Hungarian double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm.
:type __step6_suffixes: tuple
:cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm.
:type __step7_suffixes: tuple
:cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm.
:type __step8_suffixes: tuple
:cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm.
:type __step9_suffixes: tuple
:note: A detailed description of the Hungarian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/hungarian/stemmer.html
"""
__vowels = "aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB"
__digraphs = ("cs", "dz", "dzs", "gy", "ly", "ny", "ty", "zs")
__double_consonants = ("bb", "cc", "ccs", "dd", "ff", "gg",
"ggy", "jj", "kk", "ll", "lly", "mm",
"nn", "nny", "pp", "rr", "ss", "ssz",
"tt", "tty", "vv", "zz", "zzs")
__step1_suffixes = ("al", "el")
__step2_suffixes = ('k\xE9ppen', 'onk\xE9nt', 'enk\xE9nt',
'ank\xE9nt', 'k\xE9pp', 'k\xE9nt', 'ban',
'ben', 'nak', 'nek', 'val', 'vel', 't\xF3l',
't\xF5l', 'r\xF3l', 'r\xF5l', 'b\xF3l',
'b\xF5l', 'hoz', 'hez', 'h\xF6z',
'n\xE1l', 'n\xE9l', '\xE9rt', 'kor',
'ba', 'be', 'ra', 're', 'ig', 'at', 'et',
'ot', '\xF6t', 'ul', '\xFCl', 'v\xE1',
'v\xE9', 'en', 'on', 'an', '\xF6n',
'n', 't')
__step3_suffixes = ("\xE1nk\xE9nt", "\xE1n", "\xE9n")
__step4_suffixes = ('astul', 'est\xFCl', '\xE1stul',
'\xE9st\xFCl', 'stul', 'st\xFCl')
__step5_suffixes = ("\xE1", "\xE9")
__step6_suffixes = ('ok\xE9', '\xF6k\xE9', 'ak\xE9',
'ek\xE9', '\xE1k\xE9', '\xE1\xE9i',
'\xE9k\xE9', '\xE9\xE9i', 'k\xE9',
'\xE9i', '\xE9\xE9', '\xE9')
__step7_suffixes = ('\xE1juk', '\xE9j\xFCk', '\xFCnk',
'unk', 'juk', 'j\xFCk', '\xE1nk',
'\xE9nk', 'nk', 'uk', '\xFCk', 'em',
'om', 'am', 'od', 'ed', 'ad', '\xF6d',
'ja', 'je', '\xE1m', '\xE1d', '\xE9m',
'\xE9d', 'm', 'd', 'a', 'e', 'o',
'\xE1', '\xE9')
__step8_suffixes = ('jaitok', 'jeitek', 'jaink', 'jeink', 'aitok',
'eitek', '\xE1itok', '\xE9itek', 'jaim',
'jeim', 'jaid', 'jeid', 'eink', 'aink',
'itek', 'jeik', 'jaik', '\xE1ink',
'\xE9ink', 'aim', 'eim', 'aid', 'eid',
'jai', 'jei', 'ink', 'aik', 'eik',
'\xE1im', '\xE1id', '\xE1ik', '\xE9im',
'\xE9id', '\xE9ik', 'im', 'id', 'ai',
'ei', 'ik', '\xE1i', '\xE9i', 'i')
__step9_suffixes = ("\xE1k", "\xE9k", "\xF6k", "ok",
"ek", "ak", "k")
def stem(self, word):
"""
Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs)
# STEP 1: Remove instrumental case
if r1.endswith(self.__step1_suffixes):
for double_cons in self.__double_consonants:
if word[-2-len(double_cons):-2] == double_cons:
word = "".join((word[:-4], word[-3]))
if r1[-2-len(double_cons):-2] == double_cons:
r1 = "".join((r1[:-4], r1[-3]))
break
# STEP 2: Remove frequent cases
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith("\xE1"):
word = "".join((word[:-1], "a"))
r1 = suffix_replace(r1, "\xE1", "a")
elif r1.endswith("\xE9"):
word = "".join((word[:-1], "e"))
r1 = suffix_replace(r1, "\xE9", "e")
break
# STEP 3: Remove special cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "\xE9n":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
break
# STEP 4: Remove other cases
for suffix in self.__step4_suffixes:
if r1.endswith(suffix):
if suffix == "\xE1stul":
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix == "\xE9st\xFCl":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 5: Remove factive case
for suffix in self.__step5_suffixes:
if r1.endswith(suffix):
for double_cons in self.__double_consonants:
if word[-1-len(double_cons):-1] == double_cons:
word = "".join((word[:-3], word[-2]))
if r1[-1-len(double_cons):-1] == double_cons:
r1 = "".join((r1[:-3], r1[-2]))
break
# STEP 6: Remove owned
for suffix in self.__step6_suffixes:
if r1.endswith(suffix):
if suffix in ("\xE1k\xE9", "\xE1\xE9i"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9k\xE9", "\xE9\xE9i",
"\xE9\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 7: Remove singular owner suffixes
for suffix in self.__step7_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1nk", "\xE1juk", "\xE1m",
"\xE1d", "\xE1"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9nk", "\xE9j\xFCk",
"\xE9m", "\xE9d", "\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 8: Remove plural owner suffixes
for suffix in self.__step8_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1im", "\xE1id", "\xE1i",
"\xE1ink", "\xE1itok", "\xE1ik"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9im", "\xE9id", "\xE9i",
"\xE9ink", "\xE9itek", "\xE9ik"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 9: Remove plural suffixes
for suffix in self.__step9_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "\xE1k":
word = suffix_replace(word, suffix, "a")
elif suffix == "\xE9k":
word = suffix_replace(word, suffix, "e")
else:
word = word[:-len(suffix)]
break
return word
def __r1_hungarian(self, word, vowels, digraphs):
"""
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
"""
r1 = ""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in range(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in range(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
class ItalianStemmer(_StandardStemmer):
"""
The Italian Snowball stemmer.
:cvar __vowels: The Italian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:note: A detailed description of the Italian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/italian/stemmer.html
"""
__vowels = "aeiou\xE0\xE8\xEC\xF2\xF9"
__step0_suffixes = ('gliela', 'gliele', 'glieli', 'glielo',
'gliene', 'sene', 'mela', 'mele', 'meli',
'melo', 'mene', 'tela', 'tele', 'teli',
'telo', 'tene', 'cela', 'cele', 'celi',
'celo', 'cene', 'vela', 'vele', 'veli',
'velo', 'vene', 'gli', 'ci', 'la', 'le',
'li', 'lo', 'mi', 'ne', 'si', 'ti', 'vi')
__step1_suffixes = ('atrice', 'atrici', 'azione', 'azioni',
'uzione', 'uzioni', 'usione', 'usioni',
'amento', 'amenti', 'imento', 'imenti',
'amente', 'abile', 'abili', 'ibile', 'ibili',
'mente', 'atore', 'atori', 'logia', 'logie',
'anza', 'anze', 'iche', 'ichi', 'ismo',
'ismi', 'ista', 'iste', 'isti', 'ist\xE0',
'ist\xE8', 'ist\xEC', 'ante', 'anti',
'enza', 'enze', 'ico', 'ici', 'ica', 'ice',
'oso', 'osi', 'osa', 'ose', 'it\xE0',
'ivo', 'ivi', 'iva', 'ive')
__step2_suffixes = ('erebbero', 'irebbero', 'assero', 'assimo',
'eranno', 'erebbe', 'eremmo', 'ereste',
'eresti', 'essero', 'iranno', 'irebbe',
'iremmo', 'ireste', 'iresti', 'iscano',
'iscono', 'issero', 'arono', 'avamo', 'avano',
'avate', 'eremo', 'erete', 'erono', 'evamo',
'evano', 'evate', 'iremo', 'irete', 'irono',
'ivamo', 'ivano', 'ivate', 'ammo', 'ando',
'asse', 'assi', 'emmo', 'enda', 'ende',
'endi', 'endo', 'erai', 'erei', 'Yamo',
'iamo', 'immo', 'irai', 'irei', 'isca',
'isce', 'isci', 'isco', 'ano', 'are', 'ata',
'ate', 'ati', 'ato', 'ava', 'avi', 'avo',
'er\xE0', 'ere', 'er\xF2', 'ete', 'eva',
'evi', 'evo', 'ir\xE0', 'ire', 'ir\xF2',
'ita', 'ite', 'iti', 'ito', 'iva', 'ivi',
'ivo', 'ono', 'uta', 'ute', 'uti', 'uto',
'ar', 'ir')
def stem(self, word):
"""
Stem an Italian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
# All acute accents are replaced by grave accents.
word = (word.replace("\xE1", "\xE0")
.replace("\xE9", "\xE8")
.replace("\xED", "\xEC")
.replace("\xF3", "\xF2")
.replace("\xFA", "\xF9"))
# Every occurrence of 'u' after 'q'
# is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word [i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if rv.endswith(suffix):
if rv[-len(suffix)-4:-len(suffix)] in ("ando", "endo"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[-len(suffix)-2:-len(suffix)] in
("ar", "er", "ir")):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
r2 = suffix_replace(r2, suffix, "e")
rv = suffix_replace(rv, suffix, "e")
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic")):
word = word[:-2]
rv = rv[:-2]
elif r2 .endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif (suffix in ("amento", "amenti",
"imento", "imenti") and
rv.endswith(suffix)):
step1_success = True
word = word[:-6]
rv = rv[:-6]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("azione", "azioni", "atore", "atori"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("logia", "logie"):
word = word[:-2]
rv = word[:-2]
elif suffix in ("uzione", "uzioni",
"usione", "usioni"):
word = word[:-5]
rv = rv[:-5]
elif suffix in ("enza", "enze"):
word = suffix_replace(word, suffix, "te")
rv = suffix_replace(rv, suffix, "te")
elif suffix == "it\xE0":
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("ivo", "ivi", "iva", "ive"):
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith("at"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3a
if rv.endswith(("a", "e", "i", "o", "\xE0", "\xE8",
"\xEC", "\xF2")):
word = word[:-1]
rv = rv[:-1]
if rv.endswith("i"):
word = word[:-1]
rv = rv[:-1]
# STEP 3b
if rv.endswith(("ch", "gh")):
word = word[:-1]
word = word.replace("I", "i").replace("U", "u")
return word
class NorwegianStemmer(_ScandinavianStemmer):
"""
The Norwegian Snowball stemmer.
:cvar __vowels: The Norwegian vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Norwegian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/norwegian/stemmer.html
"""
__vowels = "aeiouy\xE6\xE5\xF8"
__s_ending = "bcdfghjlmnoprtvyz"
__step1_suffixes = ("hetenes", "hetene", "hetens", "heter",
"heten", "endes", "ande", "ende", "edes",
"enes", "erte", "ede", "ane", "ene", "ens",
"ers", "ets", "het", "ast", "ert", "en",
"ar", "er", "as", "es", "et", "a", "e", "s")
__step2_suffixes = ("dt", "vt")
__step3_suffixes = ("hetslov", "eleg", "elig", "elov", "slov",
"leg", "eig", "lig", "els", "lov", "ig")
def stem(self, word):
"""
Stem a Norwegian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix in ("erte", "ert"):
word = suffix_replace(word, suffix, "er")
r1 = suffix_replace(r1, suffix, "er")
elif suffix == "s":
if (word[-2] in self.__s_ending or
(word[-2] == "k" and word[-3] not in self.__vowels)):
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
word = word[:-len(suffix)]
break
return word
class PortugueseStemmer(_StandardStemmer):
"""
The Portuguese Snowball stemmer.
:cvar __vowels: The Portuguese vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Portuguese
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/portuguese/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4"
__step1_suffixes = ('amentos', 'imentos', 'uço~es', 'amento',
'imento', 'adoras', 'adores', 'a\xE7o~es',
'logias', '\xEAncias', 'amente',
'idades', 'an\xE7as', 'ismos', 'istas', 'adora',
'a\xE7a~o', 'antes', '\xE2ncia',
'logia', 'uça~o', '\xEAncia',
'mente', 'idade', 'an\xE7a', 'ezas', 'icos', 'icas',
'ismo', '\xE1vel', '\xEDvel', 'ista',
'osos', 'osas', 'ador', 'ante', 'ivas',
'ivos', 'iras', 'eza', 'ico', 'ica',
'oso', 'osa', 'iva', 'ivo', 'ira')
__step2_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'\xE1ssemos', '\xEAssemos', '\xEDssemos',
'ar\xEDeis', 'er\xEDeis', 'ir\xEDeis',
'\xE1sseis', '\xE9sseis', '\xEDsseis',
'\xE1ramos', '\xE9ramos', '\xEDramos',
'\xE1vamos', 'aremos', 'eremos', 'iremos',
'ariam', 'eriam', 'iriam', 'assem', 'essem',
'issem', 'ara~o', 'era~o', 'ira~o', 'arias',
'erias', 'irias', 'ardes', 'erdes', 'irdes',
'asses', 'esses', 'isses', 'astes', 'estes',
'istes', '\xE1reis', 'areis', '\xE9reis',
'ereis', '\xEDreis', 'ireis', '\xE1veis',
'\xEDamos', 'armos', 'ermos', 'irmos',
'aria', 'eria', 'iria', 'asse', 'esse',
'isse', 'aste', 'este', 'iste', 'arei',
'erei', 'irei', 'aram', 'eram', 'iram',
'avam', 'arem', 'erem', 'irem',
'ando', 'endo', 'indo', 'adas', 'idas',
'ar\xE1s', 'aras', 'er\xE1s', 'eras',
'ir\xE1s', 'avas', 'ares', 'eres', 'ires',
'\xEDeis', 'ados', 'idos', '\xE1mos',
'amos', 'emos', 'imos', 'iras', 'ada', 'ida',
'ar\xE1', 'ara', 'er\xE1', 'era',
'ir\xE1', 'ava', 'iam', 'ado', 'ido',
'ias', 'ais', 'eis', 'ira', 'ia', 'ei', 'am',
'em', 'ar', 'er', 'ir', 'as',
'es', 'is', 'eu', 'iu', 'ou')
__step4_suffixes = ("os", "a", "i", "o", "\xE1",
"\xED", "\xF3")
def stem(self, word):
"""
Stem a Portuguese word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
word = (word.replace("\xE3", "a~")
.replace("\xF5", "o~")
.replace("q\xFC", "qu")
.replace("g\xFC", "gu"))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif (suffix in ("ira", "iras") and rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "e"):
step1_success = True
word = suffix_replace(word, suffix, "ir")
rv = suffix_replace(rv, suffix, "ir")
elif r2.endswith(suffix):
step1_success = True
if suffix in ("logia", "logias"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uça~o", "uço~es"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("\xEAncia", "\xEAncias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith(("ante", "avel", "ivel")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idade", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("iva", "ivo", "ivas", "ivos"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
step2_success = True
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3
if step1_success or step2_success:
if rv.endswith("i") and word[-2] == "c":
word = word[:-1]
rv = rv[:-1]
### STEP 4: Residual suffix
if not step1_success and not step2_success:
for suffix in self.__step4_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 5
if rv.endswith(("e", "\xE9", "\xEA")):
word = word[:-1]
rv = rv[:-1]
if ((word.endswith("gu") and rv.endswith("u")) or
(word.endswith("ci") and rv.endswith("i"))):
word = word[:-1]
elif word.endswith("\xE7"):
word = suffix_replace(word, "\xE7", "c")
word = word.replace("a~", "\xE3").replace("o~", "\xF5")
return word
class RomanianStemmer(_StandardStemmer):
"""
The Romanian Snowball stemmer.
:cvar __vowels: The Romanian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Romanian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/romanian/stemmer.html
"""
__vowels = "aeiou\u0103\xE2\xEE"
__step0_suffixes = ('iilor', 'ului', 'elor', 'iile', 'ilor',
'atei', 'a\u0163ie', 'a\u0163ia', 'aua',
'ele', 'iua', 'iei', 'ile', 'ul', 'ea',
'ii')
__step1_suffixes = ('abilitate', 'abilitati', 'abilit\u0103\u0163i',
'ibilitate', 'abilit\u0103i', 'ivitate',
'ivitati', 'ivit\u0103\u0163i', 'icitate',
'icitati', 'icit\u0103\u0163i', 'icatori',
'ivit\u0103i', 'icit\u0103i', 'icator',
'a\u0163iune', 'atoare', '\u0103toare',
'i\u0163iune', 'itoare', 'iciva', 'icive',
'icivi', 'iciv\u0103', 'icala', 'icale',
'icali', 'ical\u0103', 'ativa', 'ative',
'ativi', 'ativ\u0103', 'atori', '\u0103tori',
'itiva', 'itive', 'itivi', 'itiv\u0103',
'itori', 'iciv', 'ical', 'ativ', 'ator',
'\u0103tor', 'itiv', 'itor')
__step2_suffixes = ('abila', 'abile', 'abili', 'abil\u0103',
'ibila', 'ibile', 'ibili', 'ibil\u0103',
'atori', 'itate', 'itati', 'it\u0103\u0163i',
'abil', 'ibil', 'oasa', 'oas\u0103', 'oase',
'anta', 'ante', 'anti', 'ant\u0103', 'ator',
'it\u0103i', 'iune', 'iuni', 'isme', 'ista',
'iste', 'isti', 'ist\u0103', 'i\u015Fti',
'ata', 'at\u0103', 'ati', 'ate', 'uta',
'ut\u0103', 'uti', 'ute', 'ita', 'it\u0103',
'iti', 'ite', 'ica', 'ice', 'ici', 'ic\u0103',
'osi', 'o\u015Fi', 'ant', 'iva', 'ive', 'ivi',
'iv\u0103', 'ism', 'ist', 'at', 'ut', 'it',
'ic', 'os', 'iv')
__step3_suffixes = ('seser\u0103\u0163i', 'aser\u0103\u0163i',
'iser\u0103\u0163i', '\xE2ser\u0103\u0163i',
'user\u0103\u0163i', 'seser\u0103m',
'aser\u0103m', 'iser\u0103m', '\xE2ser\u0103m',
'user\u0103m', 'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'easc\u0103', 'ar\u0103\u0163i',
'ur\u0103\u0163i', 'ir\u0103\u0163i',
'\xE2r\u0103\u0163i', 'ase\u015Fi',
'aser\u0103', 'ise\u015Fi', 'iser\u0103',
'\xe2se\u015Fi', '\xE2ser\u0103',
'use\u015Fi', 'user\u0103', 'ser\u0103m',
'sesem', 'indu', '\xE2ndu', 'eaz\u0103',
'e\u015Fti', 'e\u015Fte', '\u0103\u015Fti',
'\u0103\u015Fte', 'ea\u0163i', 'ia\u0163i',
'ar\u0103m', 'ur\u0103m', 'ir\u0103m',
'\xE2r\u0103m', 'asem', 'isem',
'\xE2sem', 'usem', 'se\u015Fi', 'ser\u0103',
'sese', 'are', 'ere', 'ire', '\xE2re',
'ind', '\xE2nd', 'eze', 'ezi', 'esc',
'\u0103sc', 'eam', 'eai', 'eau', 'iam',
'iai', 'iau', 'a\u015Fi', 'ar\u0103',
'u\u015Fi', 'ur\u0103', 'i\u015Fi', 'ir\u0103',
'\xE2\u015Fi', '\xe2r\u0103', 'ase',
'ise', '\xE2se', 'use', 'a\u0163i',
'e\u0163i', 'i\u0163i', '\xe2\u0163i', 'sei',
'ez', 'am', 'ai', 'au', 'ea', 'ia', 'ui',
'\xE2i', '\u0103m', 'em', 'im', '\xE2m',
'se')
def stem(self, word):
"""
Stem a Romanian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Removal of plurals and other simplifications
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if suffix in r1:
if suffix in ("ul", "ului"):
word = word[:-len(suffix)]
if suffix in rv:
rv = rv[:-len(suffix)]
else:
rv = ""
elif (suffix == "aua" or suffix == "atei" or
(suffix == "ile" and word[-5:-3] != "ab")):
word = word[:-2]
elif suffix in ("ea", "ele", "elor"):
word = suffix_replace(word, suffix, "e")
if suffix in rv:
rv = suffix_replace(rv, suffix, "e")
else:
rv = ""
elif suffix in ("ii", "iua", "iei",
"iile", "iilor", "ilor"):
word = suffix_replace(word, suffix, "i")
if suffix in rv:
rv = suffix_replace(rv, suffix, "i")
else:
rv = ""
elif suffix in ("a\u0163ie", "a\u0163ia"):
word = word[:-1]
break
# STEP 1: Reduction of combining suffixes
while True:
replacement_done = False
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix in r1:
step1_success = True
replacement_done = True
if suffix in ("abilitate", "abilitati",
"abilit\u0103i",
"abilit\u0103\u0163i"):
word = suffix_replace(word, suffix, "abil")
elif suffix == "ibilitate":
word = word[:-5]
elif suffix in ("ivitate", "ivitati",
"ivit\u0103i",
"ivit\u0103\u0163i"):
word = suffix_replace(word, suffix, "iv")
elif suffix in ("icitate", "icitati", "icit\u0103i",
"icit\u0103\u0163i", "icator",
"icatori", "iciv", "iciva",
"icive", "icivi", "iciv\u0103",
"ical", "icala", "icale", "icali",
"ical\u0103"):
word = suffix_replace(word, suffix, "ic")
elif suffix in ("ativ", "ativa", "ative", "ativi",
"ativ\u0103", "a\u0163iune",
"atoare", "ator", "atori",
"\u0103toare",
"\u0103tor", "\u0103tori"):
word = suffix_replace(word, suffix, "at")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "at")
elif suffix in ("itiv", "itiva", "itive", "itivi",
"itiv\u0103", "i\u0163iune",
"itoare", "itor", "itori"):
word = suffix_replace(word, suffix, "it")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "it")
else:
step1_success = False
break
if not replacement_done:
break
# STEP 2: Removal of standard suffixes
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if suffix in r2:
step2_success = True
if suffix in ("iune", "iuni"):
if word[-5] == "\u0163":
word = "".join((word[:-5], "t"))
elif suffix in ("ism", "isme", "ist", "ista", "iste",
"isti", "ist\u0103", "i\u015Fti"):
word = suffix_replace(word, suffix, "ist")
else:
word = word[:-len(suffix)]
break
# STEP 3: Removal of verb suffixes
if not step1_success and not step2_success:
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if suffix in rv:
if suffix in ('seser\u0103\u0163i', 'seser\u0103m',
'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'ser\u0103m', 'sesem',
'se\u015Fi', 'ser\u0103', 'sese',
'a\u0163i', 'e\u0163i', 'i\u0163i',
'\xE2\u0163i', 'sei', '\u0103m',
'em', 'im', '\xE2m', 'se'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
else:
if (not rv.startswith(suffix) and
rv[rv.index(suffix)-1] not in
"aeio\u0103\xE2\xEE"):
word = word[:-len(suffix)]
break
# STEP 4: Removal of final vowel
for suffix in ("ie", "a", "e", "i", "\u0103"):
if word.endswith(suffix):
if suffix in rv:
word = word[:-len(suffix)]
break
word = word.replace("I", "i").replace("U", "u")
return word
class RussianStemmer(_LanguageSpecificStemmer):
"""
The Russian Snowball stemmer.
:cvar __perfective_gerund_suffixes: Suffixes to be deleted.
:type __perfective_gerund_suffixes: tuple
:cvar __adjectival_suffixes: Suffixes to be deleted.
:type __adjectival_suffixes: tuple
:cvar __reflexive_suffixes: Suffixes to be deleted.
:type __reflexive_suffixes: tuple
:cvar __verb_suffixes: Suffixes to be deleted.
:type __verb_suffixes: tuple
:cvar __noun_suffixes: Suffixes to be deleted.
:type __noun_suffixes: tuple
:cvar __superlative_suffixes: Suffixes to be deleted.
:type __superlative_suffixes: tuple
:cvar __derivational_suffixes: Suffixes to be deleted.
:type __derivational_suffixes: tuple
:note: A detailed description of the Russian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/russian/stemmer.html
"""
__perfective_gerund_suffixes = ("ivshis'", "yvshis'", "vshis'",
"ivshi", "yvshi", "vshi", "iv",
"yv", "v")
__adjectival_suffixes = ('ui^ushchi^ui^u', 'ui^ushchi^ai^a',
'ui^ushchimi', 'ui^ushchymi', 'ui^ushchego',
'ui^ushchogo', 'ui^ushchemu', 'ui^ushchomu',
'ui^ushchikh', 'ui^ushchykh',
'ui^ushchui^u', 'ui^ushchaia',
'ui^ushchoi^u', 'ui^ushchei^u',
'i^ushchi^ui^u', 'i^ushchi^ai^a',
'ui^ushchee', 'ui^ushchie',
'ui^ushchye', 'ui^ushchoe', 'ui^ushchei`',
'ui^ushchii`', 'ui^ushchyi`',
'ui^ushchoi`', 'ui^ushchem', 'ui^ushchim',
'ui^ushchym', 'ui^ushchom', 'i^ushchimi',
'i^ushchymi', 'i^ushchego', 'i^ushchogo',
'i^ushchemu', 'i^ushchomu', 'i^ushchikh',
'i^ushchykh', 'i^ushchui^u', 'i^ushchai^a',
'i^ushchoi^u', 'i^ushchei^u', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`',
'i^ushchyi`', 'i^ushchoi`', 'i^ushchem',
'i^ushchim', 'i^ushchym', 'i^ushchom',
'shchi^ui^u', 'shchi^ai^a', 'ivshi^ui^u',
'ivshi^ai^a', 'yvshi^ui^u', 'yvshi^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'shchui^u', 'shchai^a', 'shchoi^u',
'shchei^u', 'ivshimi', 'ivshymi',
'ivshego', 'ivshogo', 'ivshemu', 'ivshomu',
'ivshikh', 'ivshykh', 'ivshui^u',
'ivshai^a', 'ivshoi^u', 'ivshei^u',
'yvshimi', 'yvshymi', 'yvshego', 'yvshogo',
'yvshemu', 'yvshomu', 'yvshikh', 'yvshykh',
'yvshui^u', 'yvshai^a', 'yvshoi^u',
'yvshei^u', 'vshi^ui^u', 'vshi^ai^a',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'ivshee', 'ivshie', 'ivshye', 'ivshoe',
'ivshei`', 'ivshii`', 'ivshyi`',
'ivshoi`', 'ivshem', 'ivshim', 'ivshym',
'ivshom', 'yvshee', 'yvshie', 'yvshye',
'yvshoe', 'yvshei`', 'yvshii`',
'yvshyi`', 'yvshoi`', 'yvshem',
'yvshim', 'yvshym', 'yvshom', 'vshimi',
'vshymi', 'vshego', 'vshogo', 'vshemu',
'vshomu', 'vshikh', 'vshykh', 'vshui^u',
'vshai^a', 'vshoi^u', 'vshei^u',
'emi^ui^u', 'emi^ai^a', 'nni^ui^u',
'nni^ai^a', 'vshee',
'vshie', 'vshye', 'vshoe', 'vshei`',
'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'emee', 'emie', 'emye', 'emoe',
'emei`', 'emii`', 'emyi`',
'emoi`', 'emem', 'emim', 'emym',
'emom', 'nnee', 'nnie', 'nnye', 'nnoe',
'nnei`', 'nnii`', 'nnyi`',
'nnoi`', 'nnem', 'nnim', 'nnym',
'nnom', 'i^ui^u', 'i^ai^a', 'imi', 'ymi',
'ego', 'ogo', 'emu', 'omu', 'ikh',
'ykh', 'ui^u', 'ai^a', 'oi^u', 'ei^u',
'ee', 'ie', 'ye', 'oe', 'ei`',
'ii`', 'yi`', 'oi`', 'em',
'im', 'ym', 'om')
__reflexive_suffixes = ("si^a", "s'")
__verb_suffixes = ("esh'", 'ei`te', 'ui`te', 'ui^ut',
"ish'", 'ete', 'i`te', 'i^ut', 'nno',
'ila', 'yla', 'ena', 'ite', 'ili', 'yli',
'ilo', 'ylo', 'eno', 'i^at', 'uet', 'eny',
"it'", "yt'", 'ui^u', 'la', 'na', 'li',
'em', 'lo', 'no', 'et', 'ny', "t'",
'ei`', 'ui`', 'il', 'yl', 'im',
'ym', 'en', 'it', 'yt', 'i^u', 'i`',
'l', 'n')
__noun_suffixes = ('ii^ami', 'ii^akh', 'i^ami', 'ii^am', 'i^akh',
'ami', 'iei`', 'i^am', 'iem', 'akh',
'ii^u', "'i^u", 'ii^a', "'i^a", 'ev', 'ov',
'ie', "'e", 'ei', 'ii', 'ei`',
'oi`', 'ii`', 'em', 'am', 'om',
'i^u', 'i^a', 'a', 'e', 'i', 'i`',
'o', 'u', 'y', "'")
__superlative_suffixes = ("ei`she", "ei`sh")
__derivational_suffixes = ("ost'", "ost")
def stem(self, word):
"""
Stem a Russian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
if word in self.stopwords:
return word
chr_exceeded = False
for i in range(len(word)):
if ord(word[i]) > 255:
chr_exceeded = True
break
if chr_exceeded:
word = self.__cyrillic_to_roman(word)
step1_success = False
adjectival_removed = False
verb_removed = False
undouble_success = False
superlative_removed = False
rv, r2 = self.__regions_russian(word)
# Step 1
for suffix in self.__perfective_gerund_suffixes:
if rv.endswith(suffix):
if suffix in ("v", "vshi", "vshis'"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
if not step1_success:
for suffix in self.__reflexive_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
for suffix in self.__adjectival_suffixes:
if rv.endswith(suffix):
if suffix in ('i^ushchi^ui^u', 'i^ushchi^ai^a',
'i^ushchui^u', 'i^ushchai^a', 'i^ushchoi^u',
'i^ushchei^u', 'i^ushchimi', 'i^ushchymi',
'i^ushchego', 'i^ushchogo', 'i^ushchemu',
'i^ushchomu', 'i^ushchikh', 'i^ushchykh',
'shchi^ui^u', 'shchi^ai^a', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`', 'i^ushchyi`',
'i^ushchoi`', 'i^ushchem', 'i^ushchim',
'i^ushchym', 'i^ushchom', 'vshi^ui^u',
'vshi^ai^a', 'shchui^u', 'shchai^a',
'shchoi^u', 'shchei^u', 'emi^ui^u',
'emi^ai^a', 'nni^ui^u', 'nni^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'vshui^u', 'vshai^a', 'vshoi^u', 'vshei^u',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'vshimi', 'vshymi', 'vshego', 'vshogo',
'vshemu', 'vshomu', 'vshikh', 'vshykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'vshee', 'vshie', 'vshye', 'vshoe',
'vshei`', 'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'emee', 'emie', 'emye', 'emoe', 'emei`',
'emii`', 'emyi`', 'emoi`', 'emem', 'emim',
'emym', 'emom', 'nnee', 'nnie', 'nnye',
'nnoe', 'nnei`', 'nnii`', 'nnyi`', 'nnoi`',
'nnem', 'nnim', 'nnym', 'nnom'):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
if not adjectival_removed:
for suffix in self.__verb_suffixes:
if rv.endswith(suffix):
if suffix in ("la", "na", "ete", "i`te", "li",
"i`", "l", "em", "n", "lo", "no",
"et", "i^ut", "ny", "t'", "esh'",
"nno"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
if not adjectival_removed and not verb_removed:
for suffix in self.__noun_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# Step 2
if rv.endswith("i"):
word = word[:-1]
r2 = r2[:-1]
# Step 3
for suffix in self.__derivational_suffixes:
if r2.endswith(suffix):
word = word[:-len(suffix)]
break
# Step 4
if word.endswith("nn"):
word = word[:-1]
undouble_success = True
if not undouble_success:
for suffix in self.__superlative_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
superlative_removed = True
break
if word.endswith("nn"):
word = word[:-1]
if not undouble_success and not superlative_removed:
if word.endswith("'"):
word = word[:-1]
if chr_exceeded:
word = self.__roman_to_cyrillic(word)
return word
def __regions_russian(self, word):
"""
Return the regions RV and R2 which are used by the Russian stemmer.
In any word, RV is the region after the first vowel,
or the end of the word if it contains no vowel.
R2 is the region after the first non-vowel following
a vowel in R1, or the end of the word if there is no such non-vowel.
R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
:param word: The Russian word whose regions RV and R2 are determined.
:type word: str or unicode
:return: the regions RV and R2 for the respective Russian word.
:rtype: tuple
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
r1 = ""
r2 = ""
rv = ""
vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y")
word = (word.replace("i^a", "A")
.replace("i^u", "U")
.replace("e`", "E"))
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
for i in range(len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
r2 = (r2.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
rv = (rv.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
return (rv, r2)
def __cyrillic_to_roman(self, word):
"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("\u0410", "a").replace("\u0430", "a")
.replace("\u0411", "b").replace("\u0431", "b")
.replace("\u0412", "v").replace("\u0432", "v")
.replace("\u0413", "g").replace("\u0433", "g")
.replace("\u0414", "d").replace("\u0434", "d")
.replace("\u0415", "e").replace("\u0435", "e")
.replace("\u0401", "e").replace("\u0451", "e")
.replace("\u0416", "zh").replace("\u0436", "zh")
.replace("\u0417", "z").replace("\u0437", "z")
.replace("\u0418", "i").replace("\u0438", "i")
.replace("\u0419", "i`").replace("\u0439", "i`")
.replace("\u041A", "k").replace("\u043A", "k")
.replace("\u041B", "l").replace("\u043B", "l")
.replace("\u041C", "m").replace("\u043C", "m")
.replace("\u041D", "n").replace("\u043D", "n")
.replace("\u041E", "o").replace("\u043E", "o")
.replace("\u041F", "p").replace("\u043F", "p")
.replace("\u0420", "r").replace("\u0440", "r")
.replace("\u0421", "s").replace("\u0441", "s")
.replace("\u0422", "t").replace("\u0442", "t")
.replace("\u0423", "u").replace("\u0443", "u")
.replace("\u0424", "f").replace("\u0444", "f")
.replace("\u0425", "kh").replace("\u0445", "kh")
.replace("\u0426", "t^s").replace("\u0446", "t^s")
.replace("\u0427", "ch").replace("\u0447", "ch")
.replace("\u0428", "sh").replace("\u0448", "sh")
.replace("\u0429", "shch").replace("\u0449", "shch")
.replace("\u042A", "''").replace("\u044A", "''")
.replace("\u042B", "y").replace("\u044B", "y")
.replace("\u042C", "'").replace("\u044C", "'")
.replace("\u042D", "e`").replace("\u044D", "e`")
.replace("\u042E", "i^u").replace("\u044E", "i^u")
.replace("\u042F", "i^a").replace("\u044F", "i^a"))
return word
def __roman_to_cyrillic(self, word):
"""
Transliterate a Russian word back into the Cyrillic alphabet.
A Russian word formerly transliterated into the Roman alphabet
in order to ease the stemming process, is transliterated back
into the Cyrillic alphabet, its original form.
:param word: The word that is transliterated.
:type word: str or unicode
:return: word, the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("i^u", "\u044E").replace("i^a", "\u044F")
.replace("shch", "\u0449").replace("kh", "\u0445")
.replace("t^s", "\u0446").replace("ch", "\u0447")
.replace("e`", "\u044D").replace("i`", "\u0439")
.replace("sh", "\u0448").replace("k", "\u043A")
.replace("e", "\u0435").replace("zh", "\u0436")
.replace("a", "\u0430").replace("b", "\u0431")
.replace("v", "\u0432").replace("g", "\u0433")
.replace("d", "\u0434").replace("e", "\u0435")
.replace("z", "\u0437").replace("i", "\u0438")
.replace("l", "\u043B").replace("m", "\u043C")
.replace("n", "\u043D").replace("o", "\u043E")
.replace("p", "\u043F").replace("r", "\u0440")
.replace("s", "\u0441").replace("t", "\u0442")
.replace("u", "\u0443").replace("f", "\u0444")
.replace("''", "\u044A").replace("y", "\u044B")
.replace("'", "\u044C"))
return word
class SpanishStemmer(_StandardStemmer):
"""
The Spanish Snowball stemmer.
:cvar __vowels: The Spanish vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Spanish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/spanish/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xFC"
__step0_suffixes = ("selas", "selos", "sela", "selo", "las",
"les", "los", "nos", "me", "se", "la", "le",
"lo")
__step1_suffixes = ('amientos', 'imientos', 'amiento', 'imiento',
'aciones', 'uciones', 'adoras', 'adores',
'ancias', 'log\xEDas', 'encias', 'amente',
'idades', 'anzas', 'ismos', 'ables', 'ibles',
'istas', 'adora', 'aci\xF3n', 'antes',
'ancia', 'log\xEDa', 'uci\xf3n', 'encia',
'mente', 'anza', 'icos', 'icas', 'ismo',
'able', 'ible', 'ista', 'osos', 'osas',
'ador', 'ante', 'idad', 'ivas', 'ivos',
'ico',
'ica', 'oso', 'osa', 'iva', 'ivo')
__step2a_suffixes = ('yeron', 'yendo', 'yamos', 'yais', 'yan',
'yen', 'yas', 'yes', 'ya', 'ye', 'yo',
'y\xF3')
__step2b_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'i\xE9ramos', 'i\xE9semos', 'ar\xEDais',
'aremos', 'er\xEDais', 'eremos',
'ir\xEDais', 'iremos', 'ierais', 'ieseis',
'asteis', 'isteis', '\xE1bamos',
'\xE1ramos', '\xE1semos', 'ar\xEDan',
'ar\xEDas', 'ar\xE9is', 'er\xEDan',
'er\xEDas', 'er\xE9is', 'ir\xEDan',
'ir\xEDas', 'ir\xE9is',
'ieran', 'iesen', 'ieron', 'iendo', 'ieras',
'ieses', 'abais', 'arais', 'aseis',
'\xE9amos', 'ar\xE1n', 'ar\xE1s',
'ar\xEDa', 'er\xE1n', 'er\xE1s',
'er\xEDa', 'ir\xE1n', 'ir\xE1s',
'ir\xEDa', 'iera', 'iese', 'aste', 'iste',
'aban', 'aran', 'asen', 'aron', 'ando',
'abas', 'adas', 'idas', 'aras', 'ases',
'\xEDais', 'ados', 'idos', 'amos', 'imos',
'emos', 'ar\xE1', 'ar\xE9', 'er\xE1',
'er\xE9', 'ir\xE1', 'ir\xE9', 'aba',
'ada', 'ida', 'ara', 'ase', '\xEDan',
'ado', 'ido', '\xEDas', '\xE1is',
'\xE9is', '\xEDa', 'ad', 'ed', 'id',
'an', 'i\xF3', 'ar', 'er', 'ir', 'as',
'\xEDs', 'en', 'es')
__step3_suffixes = ("os", "a", "e", "o", "\xE1",
"\xE9", "\xED", "\xF3")
def stem(self, word):
"""
Stem a Spanish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if not (word.endswith(suffix) and rv.endswith(suffix)):
continue
if ((rv[:-len(suffix)].endswith(("ando", "\xE1ndo",
"ar", "\xE1r",
"er", "\xE9r",
"iendo", "i\xE9ndo",
"ir", "\xEDr"))) or
(rv[:-len(suffix)].endswith("yendo") and
word[:-len(suffix)].endswith("uyendo"))):
word = self.__replace_accented(word[:-len(suffix)])
r1 = self.__replace_accented(r1[:-len(suffix)])
r2 = self.__replace_accented(r2[:-len(suffix)])
rv = self.__replace_accented(rv[:-len(suffix)])
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if not word.endswith(suffix):
continue
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("adora", "ador", "aci\xF3n", "adoras",
"adores", "aciones", "ante", "antes",
"ancia", "ancias"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("log\xEDa", "log\xEDas"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uci\xF3n", "uciones"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("encia", "encias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ante", "able", "ible")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idad", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
for pre_suff in ("abil", "ic", "iv"):
if r2.endswith(pre_suff):
word = word[:-len(pre_suff)]
rv = rv[:-len(pre_suff)]
elif suffix in ("ivo", "iva", "ivos", "ivas"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2a: Verb suffixes beginning 'y'
if not step1_success:
for suffix in self.__step2a_suffixes:
if (rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "u"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2b: Other verb suffixes
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if suffix in ("en", "es", "\xE9is", "emos"):
if word.endswith("gu"):
word = word[:-1]
if rv.endswith("gu"):
rv = rv[:-1]
break
# STEP 3: Residual suffix
for suffix in self.__step3_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
if suffix in ("e", "\xE9"):
rv = rv[:-len(suffix)]
if word[-2:] == "gu" and rv.endswith("u"):
word = word[:-1]
break
word = self.__replace_accented(word)
return word
def __replace_accented(self, word):
"""
Replaces all accented letters on a word with their non-accented
counterparts.
:param word: A spanish word, with or without accents
:type word: str or unicode
:return: a word with the accented letters (á, é, í, ó, ú) replaced with
their non-accented counterparts (a, e, i, o, u)
:rtype: str or unicode
"""
return (word.replace("\xE1", "a")
.replace("\xE9", "e")
.replace("\xED", "i")
.replace("\xF3", "o")
.replace("\xFA", "u"))
class SwedishStemmer(_ScandinavianStemmer):
"""
The Swedish Snowball stemmer.
:cvar __vowels: The Swedish vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Swedish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/swedish/stemmer.html
"""
__vowels = "aeiouy\xE4\xE5\xF6"
__s_ending = "bcdfghjklmnoprtvy"
__step1_suffixes = ("heterna", "hetens", "heter", "heten",
"anden", "arnas", "ernas", "ornas", "andes",
"andet", "arens", "arna", "erna", "orna",
"ande", "arne", "aste", "aren", "ades",
"erns", "ade", "are", "ern", "ens", "het",
"ast", "ad", "en", "ar", "er", "or", "as",
"es", "at", "a", "e", "s")
__step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt")
__step3_suffixes = ("fullt", "l\xF6st", "els", "lig", "ig")
def stem(self, word):
"""
Stem a Swedish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("els", "lig", "ig"):
word = word[:-len(suffix)]
elif suffix in ("fullt", "l\xF6st"):
word = word[:-1]
break
return word
def demo():
"""
This function provides a demonstration of the Snowball stemmers.
After invoking this function and specifying a language,
it stems an excerpt of the Universal Declaration of Human Rights
(which is a part of the NLTK corpus collection) and then prints
out the original and the stemmed text.
"""
import re
from nltk.corpus import udhr
udhr_corpus = {"danish": "Danish_Dansk-Latin1",
"dutch": "Dutch_Nederlands-Latin1",
"english": "English-Latin1",
"finnish": "Finnish_Suomi-Latin1",
"french": "French_Francais-Latin1",
"german": "German_Deutsch-Latin1",
"hungarian": "Hungarian_Magyar-UTF8",
"italian": "Italian_Italiano-Latin1",
"norwegian": "Norwegian-Latin1",
"porter": "English-Latin1",
"portuguese": "Portuguese_Portugues-Latin1",
"romanian": "Romanian_Romana-Latin2",
"russian": "Russian-UTF8",
"spanish": "Spanish-Latin1",
"swedish": "Swedish_Svenska-Latin1",
}
print("\n")
print("******************************")
print("Demo for the Snowball stemmers")
print("******************************")
while True:
language = input("Please enter the name of the language " +
"to be demonstrated\n" +
"/".join(SnowballStemmer.languages) +
"\n" +
"(enter 'exit' in order to leave): ")
if language == "exit":
break
if language not in SnowballStemmer.languages:
print(("\nOops, there is no stemmer for this language. " +
"Please try again.\n"))
continue
stemmer = SnowballStemmer(language)
excerpt = udhr.words(udhr_corpus[language]) [:300]
stemmed = " ".join(stemmer.stem(word) for word in excerpt)
stemmed = re.sub(r"(.{,70})\s", r'\1\n', stemmed+' ').rstrip()
excerpt = " ".join(excerpt)
excerpt = re.sub(r"(.{,70})\s", r'\1\n', excerpt+' ').rstrip()
print("\n")
print('-' * 70)
print('ORIGINAL'.center(70))
print(excerpt)
print("\n\n")
print('STEMMED RESULTS'.center(70))
print(stemmed)
print('-' * 70)
print("\n")
| mit |
burke-software/BSC-website | wiki/plugins/attachments/settings.py | 4 | 3436 | from __future__ import absolute_import
from __future__ import unicode_literals
from django import VERSION
from django.conf import settings as django_settings
from wiki.conf import settings as wiki_settings
from django.core.exceptions import ImproperlyConfigured
# This is not used in django 1.7+
APP_LABEL = 'attachments' if VERSION < (1, 7) else None
SLUG = "attachments"
# Please see this note about support for UTF-8 files on django/apache:
# https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/modwsgi/#if-you-get-a-unicodeencodeerror
# Allow anonymous users upload access (not nice on an open network)
# WIKI_ATTACHMENTS_ANONYMOUS can override this, otherwise the default
# in wiki.conf.settings is used.
ANONYMOUS = getattr( django_settings, 'WIKI_ATTACHMENTS_ANONYMOUS', wiki_settings.ANONYMOUS_UPLOAD )
# Maximum file sizes: Please using something like LimitRequestBody on
# your web server.
# http://httpd.apache.org/docs/2.2/mod/core.html#LimitRequestBody
# Where to store article attachments, relative to MEDIA_ROOT
# You should NEVER enable directory indexing in MEDIA_ROOT/UPLOAD_PATH !
# Actually, you can completely disable serving it, if you want. Files are
# sent to the user through a Django view that reads and streams a file.
UPLOAD_PATH = getattr(django_settings, 'WIKI_ATTACHMENTS_PATH', 'wiki/attachments/%aid/')
# Should the upload path be obscurified? If so, a random hash will be added to the path
# such that someone can not guess the location of files (if you have
# restricted permissions and the files are still located within the web server's
UPLOAD_PATH_OBSCURIFY = getattr(django_settings, 'WIKI_ATTACHMENTS_PATH_OBSCURIFY', True)
# Allowed extensions. Empty to disallow uploads completely.
# No files are saved without appending ".upload" to the file to ensure that
# your web server never actually executes some script.
# Case insensitive.
# You are asked to explicitly enter all file extensions that you want
# to allow. For your own safety.
FILE_EXTENSIONS = getattr(django_settings, 'WIKI_ATTACHMENTS_EXTENSIONS', ['pdf', 'doc', 'odt', 'docx', 'txt'])
# Storage backend to use, default is to use the same as the rest of the
# wiki, which is set in WIKI_STORAGE_BACKEND, but you can override it
# with WIKI_ATTACHMENTS_STORAGE_BACKEND
STORAGE_BACKEND = getattr(django_settings, 'WIKI_ATTACHMENTS_STORAGE_BACKEND', wiki_settings.STORAGE_BACKEND)
# SAFETY FIRST! Only store files with an appended .upload extension to be sure
# that something nasty does not get executed on the server.
APPEND_EXTENSION = getattr(django_settings, 'WIKI_ATTACHMENTS_APPEND_EXTENSION', True)
# Important for S3 backends etc.: If your storage backend does not have a .path
# attribute for the file, but only a .url attribute, you should use False.
# This will reveal the direct download URL so it does not work perfectly for
# files you wish to be kept private.
USE_LOCAL_PATH = getattr(django_settings, 'WIKI_ATTACHMENTS_LOCAL_PATH', True)
if (not USE_LOCAL_PATH) and APPEND_EXTENSION:
raise ImproperlyConfigured(
"django-wiki (attachment plugin) not USE_LOCAL_PATH and APPEND_EXTENSION: "
"You have configured to append .upload and not use local paths. That won't "
"work as all your attachments will be stored and sent with a .upload "
"extension. You have to trust your storage backend to be safe for storing"
"the extensions you have allowed."
)
| gpl-3.0 |
aewhatley/scikit-learn | benchmarks/bench_multilabel_metrics.py | 86 | 7286 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
mwrightevent38/MissionPlanner | Lib/site-packages/numpy/f2py/f2py2e.py | 51 | 21653 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
"""
f2py2e - Fortran to Python C/API generator. 2nd Edition.
See __usage__ below.
Copyright 1999--2005 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 08:31:19 $
Pearu Peterson
"""
__version__ = "$Revision: 1.90 $"[10:-1]
import __version__
f2py_version = __version__.version
import sys
import os
import pprint
import types
import re
errmess=sys.stderr.write
#outmess=sys.stdout.write
show=pprint.pprint
import crackfortran
import rules
import cb_rules
import auxfuncs
import cfuncs
import f90mod_rules
outmess = auxfuncs.outmess
try:
from numpy import __version__ as numpy_version
except ImportError:
numpy_version = 'N/A'
__usage__ = """\
Usage:
1) To construct extension module sources:
f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
<fortran functions> ] \\
[: <fortran files> ...]
2) To compile fortran files and build extension modules:
f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
3) To generate signature files:
f2py -h <filename.pyf> ...< same options as in (1) >
Description: This program generates a Python C/API file (<modulename>module.c)
that contains wrappers for given fortran functions so that they
can be called from Python. With the -c option the corresponding
extension modules are built.
Options:
--2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT]
--2d-numeric Use f2py2e tool with Numeric support.
--2d-numarray Use f2py2e tool with Numarray support.
--g3-numpy Use 3rd generation f2py from the separate f2py package.
[NOT AVAILABLE YET]
-h <filename> Write signatures of the fortran routines to file <filename>
and exit. You can then edit <filename> and use it instead
of <fortran files>. If <filename>==stdout then the
signatures are printed to stdout.
<fortran functions> Names of fortran routines for which Python C/API
functions will be generated. Default is all that are found
in <fortran files>.
<fortran files> Paths to fortran/signature files that will be scanned for
<fortran functions> in order to determine their signatures.
skip: Ignore fortran functions that follow until `:'.
only: Use only fortran functions that follow until `:'.
: Get back to <fortran files> mode.
-m <modulename> Name of the module; f2py generates a Python/C API
file <modulename>module.c or extension module <modulename>.
Default is 'untitled'.
--[no-]lower Do [not] lower the cases in <fortran files>. By default,
--lower is assumed with -h key, and --no-lower without -h key.
--build-dir <dirname> All f2py generated files are created in <dirname>.
Default is tempfile.mktemp().
--overwrite-signature Overwrite existing signature file.
--[no-]latex-doc Create (or not) <modulename>module.tex.
Default is --no-latex-doc.
--short-latex Create 'incomplete' LaTeX document (without commands
\\documentclass, \\tableofcontents, and \\begin{document},
\\end{document}).
--[no-]rest-doc Create (or not) <modulename>module.rst.
Default is --no-rest-doc.
--debug-capi Create C/API code that reports the state of the wrappers
during runtime. Useful for debugging.
--[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
functions. --wrap-functions is default because it ensures
maximum portability/compiler independence.
--include_paths <path1>:<path2>:... Search include files from the given
directories.
--help-link [..] List system resources found by system_info.py. See also
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
--quiet Run quietly.
--verbose Run with extra verbosity.
-v Print f2py version ID and exit.
numpy.distutils options (only effective with -c):
--fcompiler= Specify Fortran compiler type by vendor
--compiler= Specify C compiler type (as defined by distutils)
--help-fcompiler List available Fortran compilers and exit
--f77exec= Specify the path to F77 compiler
--f90exec= Specify the path to F90 compiler
--f77flags= Specify F77 compiler flags
--f90flags= Specify F90 compiler flags
--opt= Specify optimization flags
--arch= Specify architecture specific optimization flags
--noopt Compile without optimization
--noarch Compile without arch-dependent optimization
--debug Compile with debugging information
Extra options (only effective with -c):
--link-<resource> Link extension module with <resource> as defined
by numpy.distutils/system_info.py. E.g. to link
with optimized LAPACK libraries (vecLib on MacOSX,
ATLAS elsewhere), use --link-lapack_opt.
See also --help-link switch.
-L/path/to/lib/ -l<libname>
-D<define> -U<name>
-I/path/to/include/
<filename>.o <filename>.so <filename>.a
Using the following macros may be required with non-gcc Fortran
compilers:
-DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
-DUNDERSCORE_G77
When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
interface is printed out at exit (platforms: Linux).
When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
sent to stderr whenever F2PY interface makes a copy of an
array. Integer <int> sets the threshold for array sizes when
a message should be shown.
Version: %s
numpy Version: %s
Requires: Python 2.3 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2005 Pearu Peterson all rights reserved.
http://cens.ioc.ee/projects/f2py2e/"""%(f2py_version, numpy_version)
def scaninputline(inputline):
files,funcs,skipfuncs,onlyfuncs,debug=[],[],[],[],[]
f,f2,f3,f4,f5,f6,f7,f8,f9=1,0,0,0,0,0,0,0,0
verbose = 1
dolc=-1
dolatexdoc = 0
dorestdoc = 0
wrapfuncs = 1
buildpath = '.'
include_paths = []
signsfile,modulename=None,None
options = {'buildpath':buildpath,
'coutput': None,
'f2py_wrapper_output': None}
for l in inputline:
if l=='': pass
elif l=='only:': f=0
elif l=='skip:': f=-1
elif l==':': f=1;f4=0
elif l[:8]=='--debug-': debug.append(l[8:])
elif l=='--lower': dolc=1
elif l=='--build-dir': f6=1
elif l=='--no-lower': dolc=0
elif l=='--quiet': verbose = 0
elif l=='--verbose': verbose += 1
elif l=='--latex-doc': dolatexdoc=1
elif l=='--no-latex-doc': dolatexdoc=0
elif l=='--rest-doc': dorestdoc=1
elif l=='--no-rest-doc': dorestdoc=0
elif l=='--wrap-functions': wrapfuncs=1
elif l=='--no-wrap-functions': wrapfuncs=0
elif l=='--short-latex': options['shortlatex']=1
elif l=='--coutput': f8=1
elif l=='--f2py-wrapper-output': f9=1
elif l=='--overwrite-signature': options['h-overwrite']=1
elif l=='-h': f2=1
elif l=='-m': f3=1
elif l[:2]=='-v':
print f2py_version
sys.exit()
elif l=='--show-compilers':
f5=1
elif l[:8]=='-include':
cfuncs.outneeds['userincludes'].append(l[9:-1])
cfuncs.userincludes[l[9:-1]]='#include '+l[8:]
elif l[:15]=='--include_paths':
f7=1
elif l[0]=='-':
errmess('Unknown option %s\n'%`l`)
sys.exit()
elif f2: f2=0;signsfile=l
elif f3: f3=0;modulename=l
elif f6: f6=0;buildpath=l
elif f7: f7=0;include_paths.extend(l.split(os.pathsep))
elif f8: f8=0;options["coutput"]=l
elif f9: f9=0;options["f2py_wrapper_output"]=l
elif f==1:
try:
open(l).close()
files.append(l)
except IOError,detail:
errmess('IOError: %s. Skipping file "%s".\n'%(str(detail),l))
elif f==-1: skipfuncs.append(l)
elif f==0: onlyfuncs.append(l)
if not f5 and not files and not modulename:
print __usage__
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
outmess('Creating build directory %s'%(buildpath))
os.mkdir(buildpath)
if signsfile:
signsfile = os.path.join(buildpath,signsfile)
if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options:
errmess('Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n'%(signsfile))
sys.exit()
options['debug']=debug
options['verbose']=verbose
if dolc==-1 and not signsfile: options['do-lower']=0
else: options['do-lower']=dolc
if modulename: options['module']=modulename
if signsfile: options['signsfile']=signsfile
if onlyfuncs: options['onlyfuncs']=onlyfuncs
if skipfuncs: options['skipfuncs']=skipfuncs
options['dolatexdoc'] = dolatexdoc
options['dorestdoc'] = dorestdoc
options['wrapfuncs'] = wrapfuncs
options['buildpath']=buildpath
options['include_paths']=include_paths
return files,options
def callcrackfortran(files,options):
rules.options=options
funcs=[]
crackfortran.debug=options['debug']
crackfortran.verbose=options['verbose']
if 'module' in options:
crackfortran.f77modulename=options['module']
if 'skipfuncs' in options:
crackfortran.skipfuncs=options['skipfuncs']
if 'onlyfuncs' in options:
crackfortran.onlyfuncs=options['onlyfuncs']
crackfortran.include_paths[:]=options['include_paths']
crackfortran.dolowercase=options['do-lower']
postlist=crackfortran.crackfortran(files)
if 'signsfile' in options:
outmess('Saving signatures to file "%s"\n'%(options['signsfile']))
pyf=crackfortran.crack2fortran(postlist)
if options['signsfile'][-6:]=='stdout':
sys.stdout.write(pyf)
else:
f=open(options['signsfile'],'w')
f.write(pyf)
f.close()
if options["coutput"] is None:
for mod in postlist:
mod["coutput"] = "%smodule.c" % mod["name"]
else:
for mod in postlist:
mod["coutput"] = options["coutput"]
if options["f2py_wrapper_output"] is None:
for mod in postlist:
mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"]
else:
for mod in postlist:
mod["f2py_wrapper_output"] = options["f2py_wrapper_output"]
return postlist
def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules,mnames,isusedby=[],[],{}
for i in range(len(lst)):
if '__user__' in lst[i]['name']:
cb_rules.buildcallbacks(lst[i])
else:
if 'use' in lst[i]:
for u in lst[i]['use'].keys():
if u not in isusedby:
isusedby[u]=[]
isusedby[u].append(lst[i]['name'])
modules.append(lst[i])
mnames.append(lst[i]['name'])
ret = {}
for i in range(len(mnames)):
if mnames[i] in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i],','.join(map(lambda s:'"%s"'%s,isusedby[mnames[i]]))))
else:
um=[]
if 'use' in modules[i]:
for u in modules[i]['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i],u))
ret[mnames[i]] = {}
dict_append(ret[mnames[i]],rules.buildmodule(modules[i],um))
return ret
def dict_append(d_out,d_in):
for (k,v) in d_in.items():
if k not in d_out:
d_out[k] = []
if type(v) is types.ListType:
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def run_main(comline_list):
"""Run f2py as if string.join(comline_list,' ') is used as a command line.
In case of using -h flag, return None.
"""
if sys.version_info[0] >= 3:
import imp
imp.reload(crackfortran)
else:
reload(crackfortran)
f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__))
fobjhsrc = os.path.join(f2pydir,'src','fortranobject.h')
fobjcsrc = os.path.join(f2pydir,'src','fortranobject.c')
files,options=scaninputline(comline_list)
auxfuncs.options=options
postlist=callcrackfortran(files,options)
isusedby={}
for i in range(len(postlist)):
if 'use' in postlist[i]:
for u in postlist[i]['use'].keys():
if u not in isusedby:
isusedby[u]=[]
isusedby[u].append(postlist[i]['name'])
for i in range(len(postlist)):
if postlist[i]['block']=='python module' and '__user__' in postlist[i]['name']:
if postlist[i]['name'] in isusedby:
#if not quiet:
outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'],','.join(map(lambda s:'"%s"'%s,isusedby[postlist[i]['name']]))))
if 'signsfile' in options:
if options['verbose']>1:
outmess('Stopping. Edit the signature file and then run f2py on the signature file: ')
outmess('%s %s\n'%(os.path.basename(sys.argv[0]),options['signsfile']))
return
for i in range(len(postlist)):
if postlist[i]['block']!='python module':
if 'python module' not in options:
errmess('Tip: If your original code is Fortran source then you must use -m option.\n')
raise TypeError,'All blocks must be python module blocks but got %s'%(`postlist[i]['block']`)
auxfuncs.debugoptions=options['debug']
f90mod_rules.options=options
auxfuncs.wrapfuncs=options['wrapfuncs']
ret=buildmodules(postlist)
for mn in ret.keys():
dict_append(ret[mn],{'csrc':fobjcsrc,'h':fobjhsrc})
return ret
def filter_files(prefix,suffix,files,remove_prefix=None):
"""
Filter files by prefix and suffix.
"""
filtered,rest = [],[]
match = re.compile(prefix+r'.*'+suffix+r'\Z').match
if remove_prefix:
ind = len(prefix)
else:
ind = 0
for file in [x.strip() for x in files]:
if match(file): filtered.append(file[ind:])
else: rest.append(file)
return filtered,rest
def get_prefix(module):
p = os.path.dirname(os.path.dirname(module.__file__))
return p
def run_compile():
"""
Do it all in one call!
"""
import tempfile
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try: i = sys.argv.index('--build-dir')
except ValueError: i=None
if i is not None:
build_dir = sys.argv[i+1]
del sys.argv[i+1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = os.path.join(tempfile.mktemp())
sysinfo_flags = filter(re.compile(r'[-][-]link[-]').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=sysinfo_flags:a not in flags,sys.argv)
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
f2py_flags = filter(re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=f2py_flags:a not in flags,sys.argv)
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:','skip:']:
fl = 1
elif a==':':
fl = 0
if fl or a==':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1]!=':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = filter(lambda a,flags=f2py_flags2:a not in flags,sys.argv)
flib_flags = filter(re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=flib_flags:a not in flags,sys.argv)
fc_flags = filter(re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=fc_flags:a not in flags,sys.argv)
if 1:
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)]==v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = fcompiler.fcompiler_class.keys()
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print 'Unknown vendor: "%s"' % (s[len(v):])
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags)<=2,`flib_flags`
setup_flags = filter(re.compile(r'[-][-](verbose)').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=setup_flags:a not in flags,sys.argv)
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i+1]
del sys.argv[i+1],sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files,sources = filter_files('','[.]pyf([.]src|)',sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('','[.](o|a|so)',sources)
include_dirs, sources = filter_files('-I','',sources,remove_prefix=1)
library_dirs, sources = filter_files('-L','',sources,remove_prefix=1)
libraries, sources = filter_files('-l','',sources,remove_prefix=1)
undef_macros, sources = filter_files('-U','',sources,remove_prefix=1)
define_macros, sources = filter_files('-D','',sources,remove_prefix=1)
using_numarray = 0
using_numeric = 0
for i in range(len(define_macros)):
name_value = define_macros[i].split('=',1)
if len(name_value)==1:
name_value.append(None)
if len(name_value)==2:
define_macros[i] = tuple(name_value)
else:
print 'Invalid use of -D:',name_value
from numpy.distutils.system_info import get_info
num_include_dir = None
num_info = {}
#import numpy
#n = 'numpy'
#p = get_prefix(numpy)
#from numpy.distutils.misc_util import get_numpy_include_dirs
#num_info = {'include_dirs': get_numpy_include_dirs()}
if num_info:
include_dirs.extend(num_info.get('include_dirs',[]))
from numpy.distutils.core import setup,Extension
ext_args = {'name':modulename,'sources':sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'\
' (try `f2py --help-link`)\n' % (`n`))
dict_append(ext_args,**i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp',build_dir,
'--build-base',build_dir,
'--build-platlib','.'])
if fc_flags:
sys.argv.extend(['config_fc']+fc_flags)
if flib_flags:
sys.argv.extend(['build_ext']+flib_flags)
setup(ext_modules = [ext])
if remove_build_dir and os.path.exists(build_dir):
import shutil
outmess('Removing build directory %s\n'%(build_dir))
shutil.rmtree(build_dir)
def main():
if '--help-link' in sys.argv[1:]:
sys.argv.remove('--help-link')
from numpy.distutils.system_info import show_all
show_all()
return
if '-c' in sys.argv[1:]:
run_compile()
else:
run_main(sys.argv[1:])
#if __name__ == "__main__":
# main()
# EOF
| gpl-3.0 |
saurabh6790/trufil_lib | core/page/messages/messages.py | 16 | 3451 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
from core.doctype.notification_count.notification_count import delete_notification_count_for
@webnotes.whitelist()
def get_list(arg=None):
"""get list of messages"""
webnotes.form_dict['limit_start'] = int(webnotes.form_dict['limit_start'])
webnotes.form_dict['limit_page_length'] = int(webnotes.form_dict['limit_page_length'])
webnotes.form_dict['user'] = webnotes.session['user']
# set all messages as read
webnotes.conn.begin()
webnotes.conn.sql("""UPDATE `tabComment`
set docstatus = 1 where comment_doctype in ('My Company', 'Message')
and comment_docname = %s
""", webnotes.user.name)
delete_notification_count_for("Messages")
webnotes.conn.commit()
if webnotes.form_dict['contact'] == webnotes.session['user']:
# return messages
return webnotes.conn.sql("""select * from `tabComment`
where (owner=%(contact)s
or comment_docname=%(user)s
or (owner=comment_docname and ifnull(parenttype, "")!="Assignment"))
and comment_doctype ='Message'
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", webnotes.local.form_dict, as_dict=1)
else:
return webnotes.conn.sql("""select * from `tabComment`
where (owner=%(contact)s and comment_docname=%(user)s)
or (owner=%(user)s and comment_docname=%(contact)s)
or (owner=%(contact)s and comment_docname=%(contact)s)
and comment_doctype ='Message'
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", webnotes.local.form_dict, as_dict=1)
@webnotes.whitelist()
def get_active_users(arg=None):
return webnotes.conn.sql("""select name,
(select count(*) from tabSessions where user=tabProfile.name
and timediff(now(), lastupdate) < time("01:00:00")) as has_session
from tabProfile
where ifnull(enabled,0)=1 and
docstatus < 2 and
ifnull(user_type, '')!='Website User' and
name not in ('Administrator', 'Guest')
order by first_name""", as_dict=1)
@webnotes.whitelist()
def post(arg=None):
import webnotes
"""post message"""
if not arg:
arg = {}
arg.update(webnotes.local.form_dict)
if isinstance(arg, basestring):
import json
arg = json.loads(arg)
from webnotes.model.doc import Document
d = Document('Comment')
d.parenttype = arg.get("parenttype")
d.comment = arg['txt']
d.comment_docname = arg['contact']
d.comment_doctype = 'Message'
d.save()
delete_notification_count_for("Messages")
import webnotes.utils
if webnotes.utils.cint(arg.get('notify')):
notify(arg)
@webnotes.whitelist()
def delete(arg=None):
webnotes.conn.sql("""delete from `tabComment` where name=%s""",
webnotes.form_dict['name']);
def notify(arg=None):
from webnotes.utils import cstr, get_fullname, get_url
fn = get_fullname(webnotes.user.name) or webnotes.user.name
url = get_url()
message = '''You have a message from <b>%s</b>:
%s
To answer, please login to your erpnext account at \
<a href=\"%s\" target='_blank'>%s</a>
''' % (fn, arg['txt'], url, url)
sender = webnotes.conn.get_value("Profile", webnotes.user.name, "email") \
or webnotes.user.name
recipient = [webnotes.conn.get_value("Profile", arg["contact"], "email") \
or arg["contact"]]
from webnotes.utils.email_lib import sendmail
sendmail(recipient, sender, message, arg.get("subject") or "You have a message from %s" % (fn,))
| mit |
Tatwi/legend-of-hondo | MMOCoreORB/utils/gmock-1.6.0/gtest/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| agpl-3.0 |
kingvuplus/boom2 | lib/python/Components/Converter/Poll.py | 2 | 1160 | # Embedded file name: /usr/lib/enigma2/python/Components/Converter/Poll.py
from enigma import eTimer
class Poll(object):
def __init__(self):
self.__poll_timer = eTimer()
self.__poll_timer.callback.append(self.poll)
self.__interval = 1000
self.__enabled = False
def __setInterval(self, interval):
self.__interval = interval
if self.__enabled:
self.__poll_timer.start(self.__interval)
else:
self.__poll_timer.stop()
def __setEnable(self, enabled):
self.__enabled = enabled
self.poll_interval = self.__interval
poll_interval = property(lambda self: self.__interval, __setInterval)
poll_enabled = property(lambda self: self.__enabled, __setEnable)
def poll(self):
self.changed((self.CHANGED_POLL,))
def doSuspend(self, suspended):
if self.__enabled:
if suspended:
self.__poll_timer.stop()
else:
self.poll()
self.poll_enabled = True
def destroy(self):
self.__poll_timer.callback.remove(self.poll) | gpl-2.0 |
domob1812/huntercore | test/functional/feature_includeconf.py | 1 | 3470 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "namecoin.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg. subversion should still end with 'main; relative)/'")
self.stop_node(0)
self.start_node(0, extra_args=["-includeconf=relative2.conf"])
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from commandline; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "namecoin.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| mit |
newerthcom/savagerebirth | libs/python-2.72/Lib/encodings/mac_croatian.py | 593 | 13889 | """ Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
Antiun/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/LoginTest.py | 384 | 1320 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
if __name__<>"package":
from ServerParameter import *
from lib.gui import *
class LoginTest:
def __init__(self):
if not loginstatus:
Change(None)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ghtmtt/QGIS | tests/src/python/test_qgspoint.py | 29 | 3554 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPoint.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
from qgis.core import QgsPointXY, QgsPoint, QgsWkbTypes
from qgis.PyQt.QtCore import QPointF
from qgis.testing import start_app, unittest
start_app()
class TestQgsPointXY(unittest.TestCase):
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
def setUp(self):
self.mPoint = QgsPointXY(10.0, 10.0)
def test_Point(self):
myExpectedValue = 10.0
myActualValue = self.mPoint.x()
myMessage = 'Expected: %s Got: %s' % (myExpectedValue, myActualValue)
assert myExpectedValue == myActualValue, myMessage
def test_pointToString(self):
myExpectedValue = '10, 10'
myActualValue = self.mPoint.toString()
myMessage = 'Expected: %s Got: %s' % (myExpectedValue, myActualValue)
assert myExpectedValue == myActualValue, myMessage
def test_hash(self):
a = QgsPointXY(2.0, 1.0)
b = QgsPointXY(2.0, 2.0)
c = QgsPointXY(1.0, 2.0)
d = QgsPointXY(1.0, 1.0)
e = QgsPointXY(2.0, 1.0)
assert a.__hash__() != b.__hash__()
assert e.__hash__() == a.__hash__()
mySet = set([a, b, c, d, e])
assert len(mySet) == 4
def test_issue_32443(self):
p = QgsPoint()
assert p.wkbType() == QgsWkbTypes.Point and p.x() != p.x() and p.y() != p.y()
# ctor from QgsPointXY should be available
p = QgsPoint(QgsPointXY(1, 2))
assert p.wkbType() == QgsWkbTypes.Point and p.x() == 1 and p.y() == 2
# ctor from QPointF should be available
p = QgsPoint(QPointF(1, 2))
assert p.wkbType() == QgsWkbTypes.Point and p.x() == 1 and p.y() == 2
p = QgsPoint(1, 2)
assert p.wkbType() == QgsWkbTypes.Point and p.x() == 1 and p.y() == 2
p = QgsPoint(1, 2, 3)
assert p.wkbType() == QgsWkbTypes.PointZ and p.x() == 1 and p.y() == 2 and p.z() == 3
p = QgsPoint(1, 2, z=3)
assert p.wkbType() == QgsWkbTypes.PointZ and p.x() == 1 and p.y() == 2 and p.z() == 3
p = QgsPoint(1, 2, m=3)
assert p.wkbType() == QgsWkbTypes.PointM and p.x() == 1 and p.y() == 2 and p.m() == 3
p = QgsPoint(1, 2, wkbType=QgsWkbTypes.PointM)
assert p.wkbType() == QgsWkbTypes.PointM and p.x() == 1 and p.y() == 2 and p.m() != p.m()
p = QgsPoint(1, 2, 3, 4)
assert p.wkbType() == QgsWkbTypes.PointZM and p.x() == 1 and p.y() == 2 and p.z() == 3 and p.m() == 4
p = QgsPoint(1, 2, m=4, z=3)
assert p.wkbType() == QgsWkbTypes.PointZM and p.x() == 1 and p.y() == 2 and p.z() == 3 and p.m() == 4
def test_empty_QgsPointXY(self):
p = QgsPoint(QgsPointXY())
assert p.isEmpty()
class TestQgsPoint(unittest.TestCase):
def testInvalidConstructorArguments(self):
"""Test GH #34557"""
with self.assertRaises(TypeError):
point_0 = QgsPoint('a string')
with self.assertRaises(TypeError):
point_a = QgsPoint(10, 20)
point_b = QgsPoint(point_a)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
liukeke-start/HelloWorld | node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| apache-2.0 |
kiwitcms/Kiwi | tcms/core/tests/test_views.py | 1 | 4622 | # -*- coding: utf-8 -*-
# pylint: disable=too-many-ancestors
import os
import unittest
from http import HTTPStatus
from django import test
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.management import call_command
from django.urls import include, path, reverse
from django.utils.translation import gettext_lazy as _
from tcms import urls
from tcms.tests import LoggedInTestCase
from tcms.tests.factories import (
TestExecutionFactory,
TestPlanFactory,
TestRunFactory,
UserFactory,
)
class TestDashboard(LoggedInTestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
# used to reproduce Sentry #KIWI-TCMS-38 where rendering fails
# with that particular value
cls.chinese_tp = TestPlanFactory(name="缺货反馈测试需求", author=cls.tester)
doc_url = "https://kiwitcms.readthedocs.io/en/latest/admin.html#configure-kiwi-s-base-url"
cls.base_url_error_message = _(
"Base URL is not configured! "
'See <a href="%(doc_url)s">documentation</a> and '
'<a href="%(admin_url)s">change it</a>'
) % {
"doc_url": doc_url,
"admin_url": reverse("admin:sites_site_change", args=[settings.SITE_ID]),
}
def test_when_not_logged_in_redirects_to_login(self):
self.client.logout()
response = self.client.get(reverse("core-views-index"))
self.assertRedirects(
response,
reverse("tcms-login") + "?next=/",
target_status_code=HTTPStatus.OK,
)
def test_when_logged_in_renders_dashboard(self):
response = self.client.get(reverse("core-views-index"))
self.assertContains(response, _("Test executions"))
self.assertContains(response, _("Dashboard"))
self.assertContains(response, _("Your Test plans"))
def test_dashboard_shows_testruns_for_manager(self):
test_run = TestRunFactory(manager=self.tester)
response = self.client.get(reverse("core-views-index"))
self.assertContains(response, test_run.summary)
def test_dashboard_shows_testruns_for_default_tester(self):
test_run = TestRunFactory(default_tester=self.tester)
response = self.client.get(reverse("core-views-index"))
self.assertContains(response, test_run.summary)
def test_dashboard_shows_testruns_for_execution_assignee(self):
execution = TestExecutionFactory(assignee=self.tester)
response = self.client.get(reverse("core-views-index"))
self.assertContains(response, execution.run.summary)
def test_check_base_url_not_configured(self):
response = self.client.get("/", follow=True)
self.assertContains(response, self.base_url_error_message)
def test_check_base_url_configured(self):
site = Site.objects.create(domain="example.com", name="example")
with test.override_settings(SITE_ID=site.pk):
response = self.client.get("/", follow=True)
self.assertNotContains(response, self.base_url_error_message)
@unittest.skipUnless(
os.getenv("TEST_DASHBOARD_CHECK_UNAPPLIED_MIGRATIONS"),
"Check for missing migrations testing is not enabled",
)
class TestDashboardCheckMigrations(test.TransactionTestCase):
unapplied_migration_message = _(
"unapplied migration(s). See "
'<a href="https://kiwitcms.readthedocs.io/en/latest/'
"installing_docker.html#initial-configuration-of-running-"
'container">documentation</a>'
)
def test_check_unapplied_migrations(self):
call_command("migrate", "bugs", "zero", verbosity=2, interactive=False)
tester = UserFactory()
tester.set_password("password")
tester.save()
self.client.login( # nosec:B106:hardcoded_password_funcarg
username=tester.username,
password="password",
)
response = self.client.get("/", follow=True)
self.assertContains(response, self.unapplied_migration_message)
def exception_view(request):
raise Exception
urlpatterns = [
path("will-trigger-500/", exception_view),
path("", include(urls)),
]
handler500 = "tcms.core.views.server_error"
@test.override_settings(ROOT_URLCONF=__name__)
class TestServerError(test.TestCase):
def test_custom_server_error_view(self):
client = test.Client(raise_request_exception=False)
response = client.get("/will-trigger-500/")
self.assertEqual(response.status_code, 500)
self.assertTemplateUsed(response, "500.html")
| gpl-2.0 |
carlvlewis/bokeh | sphinx/source/docs/user_guide/source_examples/interaction_callbacks_for_selections.py | 17 | 1057 | from random import random
from bokeh.models import Callback, ColumnDataSource
from bokeh.plotting import hplot, figure, output_file, show
output_file("callback.html")
x = [random() for x in range(500)]
y = [random() for y in range(500)]
s1 = ColumnDataSource(data=dict(x=x, y=y))
p1 = figure(plot_width=400, plot_height=400, tools="lasso_select", title="Select Here")
p1.circle('x', 'y', source=s1, alpha=0.6)
s2 = ColumnDataSource(data=dict(x=[], y=[]))
p2 = figure(plot_width=400, plot_height=400, x_range=(0, 1), y_range=(0, 1),
tools="", title="Watch Here")
p2.circle('x', 'y', source=s2, alpha=0.6)
s1.callback = Callback(args=dict(s2=s2), code="""
var inds = cb_obj.get('selected')['1d'].indices;
var d1 = cb_obj.get('data');
var d2 = s2.get('data');
d2['x'] = []
d2['y'] = []
for (i = 0; i < inds.length; i++) {
d2['x'].push(d1['x'][inds[i]])
d2['y'].push(d1['y'][inds[i]])
}
s2.trigger('change');
""")
layout = hplot(p1, p2)
show(layout)
| bsd-3-clause |
Johnzero/erp | openerp/addons/claim_from_delivery/__openerp__.py | 9 | 1595 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Claim on Deliveries",
"version" : "1.0",
"author" : "OpenERP SA",
"category" : "Warehouse Management",
"depends" : ["base", "crm_claim", "stock"],
"init_xml" : [],
"demo_xml" : [],
"description": '''
Create a claim from a delivery order.
=====================================
Adds a Claim link to the delivery order.
''',
"update_xml" : ["claim_delivery_view.xml"],
"auto_install": False,
"installable": True,
"certificate" : "001101649349223746957",
'images': ['images/1_claim_link_delivery_order.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ajaygarg84/sugar | src/jarabe/model/buddy.py | 4 | 6712 | # Copyright (C) 2006-2007 Red Hat, Inc.
# Copyright (C) 2010 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from gi.repository import GObject
from gi.repository import GConf
import dbus
from telepathy.client import Connection
from telepathy.interfaces import CONNECTION
from sugar3.graphics.xocolor import XoColor
from sugar3.profile import get_profile
from jarabe.util.telepathy import connection_watcher
CONNECTION_INTERFACE_BUDDY_INFO = 'org.laptop.Telepathy.BuddyInfo'
_owner_instance = None
class BaseBuddyModel(GObject.GObject):
__gtype_name__ = 'SugarBaseBuddyModel'
def __init__(self, **kwargs):
self._key = None
self._nick = None
self._color = None
self._tags = None
self._current_activity = None
GObject.GObject.__init__(self, **kwargs)
def get_nick(self):
return self._nick
def set_nick(self, nick):
self._nick = nick
nick = GObject.property(type=object, getter=get_nick, setter=set_nick)
def get_key(self):
return self._key
def set_key(self, key):
self._key = key
key = GObject.property(type=object, getter=get_key, setter=set_key)
def get_color(self):
return self._color
def set_color(self, color):
self._color = color
color = GObject.property(type=object, getter=get_color, setter=set_color)
def get_tags(self):
return self._tags
tags = GObject.property(type=object, getter=get_tags)
def get_current_activity(self):
return self._current_activity
def set_current_activity(self, current_activity):
if self._current_activity != current_activity:
self._current_activity = current_activity
self.notify('current-activity')
current_activity = GObject.property(type=object,
getter=get_current_activity,
setter=set_current_activity)
def is_owner(self):
raise NotImplementedError
class OwnerBuddyModel(BaseBuddyModel):
__gtype_name__ = 'SugarOwnerBuddyModel'
def __init__(self):
BaseBuddyModel.__init__(self)
client = GConf.Client.get_default()
self.props.nick = client.get_string('/desktop/sugar/user/nick')
color = client.get_string('/desktop/sugar/user/color')
self.props.color = XoColor(color)
self.props.key = get_profile().pubkey
self.connect('notify::nick', self.__property_changed_cb)
self.connect('notify::color', self.__property_changed_cb)
bus = dbus.SessionBus()
bus.add_signal_receiver(
self.__name_owner_changed_cb,
signal_name='NameOwnerChanged',
dbus_interface='org.freedesktop.DBus')
bus_object = bus.get_object(dbus.BUS_DAEMON_NAME, dbus.BUS_DAEMON_PATH)
for service in bus_object.ListNames(
dbus_interface=dbus.BUS_DAEMON_IFACE):
if service.startswith(CONNECTION + '.'):
path = '/%s' % service.replace('.', '/')
Connection(service, path, bus,
ready_handler=self.__connection_ready_cb)
def __connection_ready_cb(self, connection):
self._sync_properties_on_connection(connection)
def __name_owner_changed_cb(self, name, old, new):
if name.startswith(CONNECTION + '.') and not old and new:
path = '/' + name.replace('.', '/')
Connection(name, path, ready_handler=self.__connection_ready_cb)
def __property_changed_cb(self, buddy, pspec):
self._sync_properties()
def _sync_properties(self):
conn_watcher = connection_watcher.get_instance()
for connection in conn_watcher.get_connections():
self._sync_properties_on_connection(connection)
def _sync_properties_on_connection(self, connection):
if CONNECTION_INTERFACE_BUDDY_INFO in connection:
properties = {}
if self.props.key is not None:
properties['key'] = dbus.ByteArray(self.props.key)
if self.props.color is not None:
properties['color'] = self.props.color.to_string()
logging.debug('calling SetProperties with %r', properties)
connection[CONNECTION_INTERFACE_BUDDY_INFO].SetProperties(
properties,
reply_handler=self.__set_properties_cb,
error_handler=self.__error_handler_cb)
def __set_properties_cb(self):
logging.debug('__set_properties_cb')
def __error_handler_cb(self, error):
raise RuntimeError(error)
def __connection_added_cb(self, conn_watcher, connection):
self._sync_properties_on_connection(connection)
def is_owner(self):
return True
def get_owner_instance():
global _owner_instance
if _owner_instance is None:
_owner_instance = OwnerBuddyModel()
return _owner_instance
class BuddyModel(BaseBuddyModel):
__gtype_name__ = 'SugarBuddyModel'
def __init__(self, **kwargs):
self._account = None
self._contact_id = None
self._handle = None
BaseBuddyModel.__init__(self, **kwargs)
def is_owner(self):
return False
def get_account(self):
return self._account
def set_account(self, account):
self._account = account
account = GObject.property(type=object, getter=get_account,
setter=set_account)
def get_contact_id(self):
return self._contact_id
def set_contact_id(self, contact_id):
self._contact_id = contact_id
contact_id = GObject.property(type=object, getter=get_contact_id,
setter=set_contact_id)
def get_handle(self):
return self._handle
def set_handle(self, handle):
self._handle = handle
handle = GObject.property(type=object, getter=get_handle,
setter=set_handle)
| gpl-2.0 |
shizhai/wprobe | build_dir/host/scons-2.1.0/engine/SCons/Scanner/RC.py | 21 | 2075 | """SCons.Scanner.RC
This module implements the depenency scanner for RC (Interface
Definition Language) files.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/RC.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Node.FS
import SCons.Scanner
import re
def RCScan():
"""Return a prototype Scanner instance for scanning RC source files"""
res_re= r'^(?:\s*#\s*(?:include)|' \
'.*?\s+(?:ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)' \
'\s*.*?)' \
'\s*(<|"| )([^>"\s]+)(?:[>"\s])*$'
resScanner = SCons.Scanner.ClassicCPP( "ResourceScanner",
"$RCSUFFIXES",
"CPPPATH",
res_re )
return resScanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
theuni/bitcoin | qa/rpc-tests/test_framework/util.py | 77 | 13035 | # Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bitcoind and bitcoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: bitcoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("BITCOIND", "bitcoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling bitcoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| mit |
rjschwei/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/resources/models/sku.py | 5 | 1492 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Sku(Model):
"""SKU for the resource.
:param name: The SKU name.
:type name: str
:param tier: The SKU tier.
:type tier: str
:param size: The SKU size.
:type size: str
:param family: The SKU family.
:type family: str
:param model: The SKU model.
:type model: str
:param capacity: The SKU capacity.
:type capacity: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'model': {'key': 'model', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(self, name=None, tier=None, size=None, family=None, model=None, capacity=None):
self.name = name
self.tier = tier
self.size = size
self.family = family
self.model = model
self.capacity = capacity
| mit |
sahildua2305/eden | modules/tests/suite.py | 1 | 11030 | # -*- coding: utf-8 -*-
# This script is designed to be run as a Web2Py application:
# python web2py.py -S eden -M -R applications/eden/modules/tests/suite.py
# or
# python web2py.py -S eden -M -R applications/eden/modules/tests/suite.py -A testscript
import argparse
import unittest
from gluon import current
from gluon.storage import Storage
current.data = Storage()
# @ToDo: Load these only when running Selenium tests
# (shouldn't be required for Smoke tests)
# (means removing the *)
from selenium import webdriver
from tests.asset import *
from tests.inv import *
from tests.member import *
from tests.org import *
from tests.project import *
from tests.staff import *
from tests.volunteer import *
from tests.helpers import *
from tests.event import *
def loadAllTests():
# Run the file private/templates/<current_template>/tests.py to get tests list.
path = os.path.join(request.folder,
"private", "templates",
settings.get_template(),
"tests.py")
if os.path.exists(path):
settings.exec_template(path)
else:
# Fallback to the default template tests.
path = os.path.join(request.folder,
"private", "templates",
"default",
"tests.py")
settings.exec_template(path)
tests_list = current.selenium_tests
loadTests = unittest.TestLoader().loadTestsFromTestCase
# Initialise the suite with the first test.
exec("suite = loadTests(%s)" % tests_list[0])
# Shortcut
addTests = suite.addTests
# Add all tests to the suite.
for i in range(1, len(tests_list)):
exec("addTests(loadTests(%s))" % tests_list[i])
return suite
# Set up the command line arguments
desc = "Script to run the Sahana Eden test suite."
parser = argparse.ArgumentParser(description = desc)
parser.add_argument("-C", "--class",
help = "Name of class to run")
method_desc = """Name of method to run, this is used in conjunction with the
class argument or with the name of the class followed by the name of the method
separated with a period, class.method.
"""
parser.add_argument("-M",
"--method",
"--test",
help = method_desc)
parser.add_argument("-A",
"--auth",
help = "web2py default argument feed")
parser.add_argument("-V", "--verbose",
type = int,
default = 2,
help = "The level of verbose reporting")
parser.add_argument("--nohtml",
action='store_const',
const=True,
help = "Disable HTML reporting.")
parser.add_argument("--html-path",
help = "Path where the HTML report will be saved.",
default = "")
parser.add_argument("--html-name-date",
action='store_const',
const=True,
help = "Include just the date in the name of the HTML report.")
suite_desc = """This will execute a standard testing schedule. The valid values
are, smoke, quick, complete and full. If a method or class options is selected
the the suite will be ignored.
The suite options can be described as follows:
smoke: This will run the broken link test
quick: This will run all the tests marked as essential
complete: This will run all tests except those marked as long
full: This will run all tests
"""
parser.add_argument("--suite",
help = suite_desc,
choices = ["smoke", "roles", "quick", "complete", "full"],
default = "quick")
parser.add_argument("--link-depth",
type = int,
default = 16,
help = "The recursive depth when looking for links")
desc = """This will record the timings in a spreadsheet file. The data
will be accumulated over time holding a maximum of 100 results, The file will
automatically rotated. This will hold details for another program to analyse.
The file will be written to the same location as the HTML report.
"""
parser.add_argument("-r",
"--record-timings",
action='store_const',
const=True,
help = desc)
up_desc = """The user name and password, separated by a /. Multiple user name
and passwords can be added by separating them with a comma. If multiple user
name and passwords are provided then the same test will be run sequentially
using the given user in each case.
"""
parser.add_argument("--user-password",
default = "admin@example.com/testing",
help = up_desc)
parser.add_argument("--keep-browser-open",
help = "Keep the browser open once the tests have finished running",
action='store_const',
const = True)
parser.add_argument("--browser",
help = "Set the browser to use (Firefox/Chrome)",
action = "store",
default = "Firefox")
desc = """Run the smoke tests even if debug is set to true.
With debug on it can add up to a second per link and given that a full run
of the smoke tests will include thousands of links the difference of having
this setting on can be measured in hours.
"""
parser.add_argument("--force-debug",
action='store_const',
const=True,
help = desc)
desc = """Set a threshold in seconds.
If in the smoke tests it takes longer than this to get the link then it will be reported.
"""
parser.add_argument("--threshold",
type = int,
default = 10,
help = desc)
desc = """Smoke test report only.
Don't actually run the smoke tests but rebuild the smoke test report.
"""
parser.add_argument("--smoke-report",
action='store_const',
const=True,
help = desc)
argsObj = parser.parse_args()
args = argsObj.__dict__
active_driver = {'firefox': webdriver.Firefox,
'chrome': webdriver.Chrome}[args['browser'].lower()]
# Read Settings
settings = current.deployment_settings
public_url = settings.get_base_public_url()
base_url = "%s/%s" % (public_url, current.request.application)
system_name = settings.get_system_name()
# Store these to be available to modules
config = current.test_config = Storage()
config.system_name = system_name
config.timeout = 5 # seconds
config.url = base_url
base_dir = os.path.join(os.getcwd(), "applications", current.request.application)
test_dir = os.path.join(base_dir, "modules", "tests")
config.base_dir = base_dir
if not args["suite"] == "smoke" and settings.get_ui_navigate_away_confirm():
print "The tests will fail unless you have settings.ui.navigate_away_confirm = False in models/000_config.py"
exit()
if args["suite"] == "smoke" or args["suite"] == "complete":
if settings.get_base_debug() and not args["force_debug"]:
print "settings.base.debug is set to True in 000_config.py, either set it to False or use the --force-debug switch"
exit()
config.record_timings = args["record_timings"]
if config.record_timings:
path = args["html_path"]
config.record_timings_filename = os.path.join(path, "Sahana-Eden-record-timings.xls")
config.record_summary_filename = os.path.join(path, "Sahana-Eden-record-summary.xls")
config.verbose = args["verbose"]
browser_open = False
# @todo test with invalid class and methods passed as CLA
if args["method"]:
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
if args["class"]:
name = "%s.%s" % (args["class"], args["method"])
else:
name = args["method"]
suite = unittest.TestLoader().loadTestsFromName(args["method"],
globals()[args["class"]]
)
elif args["class"]:
# Run a single Selenium test
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = unittest.TestLoader().loadTestsFromTestCase(globals()[args["class"]])
elif args["suite"] == "smoke":
# Run Smoke tests
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setReportOnly( args["smoke_report"])
broken_links.setDepth(args["link_depth"])
broken_links.setThreshold(args["threshold"])
broken_links.setUser(args["user_password"])
suite = unittest.TestSuite()
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
elif args["suite"] == "roles":
# Run Roles tests
from tests.roles.test_roles import *
suite = test_roles()
elif args["suite"] == "complete":
# Run all Selenium Tests & Smoke Tests
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = loadAllTests()
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setReportOnly( args["smoke_report"])
broken_links.setDepth(args["link_depth"])
broken_links.setThreshold(args["threshold"])
broken_links.setUser(args["user_password"])
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
else:
# Run all Selenium Tests
browser = config.browser = active_driver()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = loadAllTests()
config.html = False
if args["nohtml"]:
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
else:
try:
path = args["html_path"]
if args["html_name_date"]:
filename = "Sahana-Eden-%s.html" % current.request.now.date()
else:
filename = "Sahana-Eden-%s.html" % current.request.now
# Windows compatibility
filename = filename.replace(":", "-")
fullname = os.path.join(path, filename)
fp = open(fullname, "wb")
config.html = True
from tests.runner import EdenHTMLTestRunner
runner = EdenHTMLTestRunner(stream = fp,
title = "Sahana Eden",
verbosity = config.verbose,
)
runner.run(suite)
except ImportError:
config.html = False
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
# Cleanup
if browser_open and not args["keep_browser_open"]:
browser.close()
# END =========================================================================
| mit |
neilLasrado/frappe | frappe/core/doctype/activity_log/feed.py | 5 | 2912 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: See license.txt
from __future__ import unicode_literals
import frappe
import frappe.permissions
from frappe.utils import get_fullname
from frappe import _
from frappe.core.doctype.activity_log.activity_log import add_authentication_log
from six import string_types
def update_feed(doc, method=None):
if frappe.flags.in_patch or frappe.flags.in_install or frappe.flags.in_import:
return
if doc._action!="save" or doc.flags.ignore_feed:
return
if doc.doctype == "Activity Log" or doc.meta.issingle:
return
if hasattr(doc, "get_feed"):
feed = doc.get_feed()
if feed:
if isinstance(feed, string_types):
feed = {"subject": feed}
feed = frappe._dict(feed)
doctype = feed.doctype or doc.doctype
name = feed.name or doc.name
# delete earlier feed
frappe.db.sql("""delete from `tabActivity Log`
where
reference_doctype=%s and reference_name=%s
and link_doctype=%s""", (doctype, name,feed.link_doctype))
frappe.get_doc({
"doctype": "Activity Log",
"reference_doctype": doctype,
"reference_name": name,
"subject": feed.subject,
"full_name": get_fullname(doc.owner),
"reference_owner": frappe.db.get_value(doctype, name, "owner"),
"link_doctype": feed.link_doctype,
"link_name": feed.link_name
}).insert(ignore_permissions=True)
def login_feed(login_manager):
if login_manager.user != "Guest":
subject = _("{0} logged in").format(get_fullname(login_manager.user))
add_authentication_log(subject, login_manager.user)
def logout_feed(user, reason):
if user and user != "Guest":
subject = _("{0} logged out: {1}").format(get_fullname(user), frappe.bold(reason))
add_authentication_log(subject, user, operation="Logout")
def get_feed_match_conditions(user=None, force=True):
if not user: user = frappe.session.user
conditions = ['`tabCommunication`.owner="{user}" or `tabCommunication`.reference_owner="{user}"'.format(user=frappe.db.escape(user))]
user_permissions = frappe.permissions.get_user_permissions(user)
can_read = frappe.get_user().get_can_read()
can_read_doctypes = ['"{}"'.format(doctype) for doctype in
list(set(can_read) - set(list(user_permissions)))]
if can_read_doctypes:
conditions += ["""(`tabCommunication`.reference_doctype is null
or `tabCommunication`.reference_doctype = ''
or `tabCommunication`.reference_doctype in ({}))""".format(", ".join(can_read_doctypes))]
if user_permissions:
can_read_docs = []
for doctype, obj in user_permissions.items():
for n in obj.get("docs", []):
can_read_docs.append('"{}|{}"'.format(doctype, frappe.db.escape(n)))
if can_read_docs:
conditions.append("concat_ws('|', `tabCommunication`.reference_doctype, `tabCommunication`.reference_name) in ({})".format(
", ".join(can_read_docs)))
return "(" + " or ".join(conditions) + ")"
| mit |
DomenicPuzio/incubator-metron | metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/0.3.0/package/scripts/enrichment_commands.py | 1 | 7328 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, File
from resource_management.libraries.resources.hdfs_resource import HdfsResource
import metron_service
# Wrap major operations and functionality in this class
class EnrichmentCommands:
__params = None
__enrichment_topology = None
__enrichment_topic = None
__configured = False
def __init__(self, params):
if params is None:
raise ValueError("params argument is required for initialization")
self.__params = params
self.__enrichment_topology = params.metron_enrichment_topology
self.__enrichment_topic = params.metron_enrichment_topic
self.__configured = os.path.isfile(self.__params.enrichment_configured_flag_file)
def is_configured(self):
return self.__configured
def set_configured(self):
File(self.__params.enrichment_configured_flag_file,
content="",
owner=self.__params.metron_user,
mode=0775)
def setup_repo(self):
def local_repo():
Logger.info("Setting up local repo")
Execute("yum -y install createrepo")
Execute("createrepo /localrepo")
Execute("chmod -R o-w+r /localrepo")
def remote_repo():
Logger.info('Using remote repo')
yum_repo_types = {
'local': local_repo,
'remote': remote_repo
}
repo_type = self.__params.yum_repo_type
if repo_type in yum_repo_types:
yum_repo_types[repo_type]()
Logger.info("Writing out repo file")
repo_template = ("echo \"[METRON-0.3.0]\n"
"name=Metron 0.3.0 packages\n"
"baseurl={0}\n"
"gpgcheck=0\n"
"enabled=1\n\""
" > /etc/yum.repos.d/metron.repo")
Execute(repo_template.format(self.__params.repo_url))
else:
raise ValueError("Unsupported repo type '{0}'".format(repo_type))
def init_kafka_topics(self):
Logger.info('Creating Kafka topics')
command_template = """{0}/kafka-topics.sh \
--zookeeper {1} \
--create \
--topic {2} \
--partitions {3} \
--replication-factor {4} \
--config retention.bytes={5}"""
num_partitions = 1
replication_factor = 1
retention_gigabytes = int(self.__params.metron_topic_retention)
retention_bytes = retention_gigabytes * 1024 * 1024 * 1024
Logger.info("Creating topics for enrichment")
Logger.info("Creating topic'{0}'".format(self.__enrichment_topic))
Execute(command_template.format(self.__params.kafka_bin_dir,
self.__params.zookeeper_quorum,
self.__enrichment_topic,
num_partitions,
replication_factor,
retention_bytes))
Logger.info("Done creating Kafka topics")
def init_hdfs_dir(self):
self.__params.HdfsResource(self.__params.metron_apps_enrichment_dir,
type="directory",
action="create_on_execute",
owner=self.__params.metron_user,
group=self.__params.user_group,
mode=0775,
)
def start_enrichment_topology(self):
Logger.info("Starting Metron enrichment topology: {0}".format(self.__enrichment_topology))
start_cmd_template = """{0}/bin/start_enrichment_topology.sh \
-s {1} \
-z {2}"""
Logger.info('Starting ' + self.__enrichment_topology)
Execute(start_cmd_template.format(self.__params.metron_home, self.__enrichment_topology, self.__params.zookeeper_quorum))
Logger.info('Finished starting enrichment topology')
def stop_enrichment_topology(self):
Logger.info('Stopping ' + self.__enrichment_topology)
stop_cmd = 'storm kill ' + self.__enrichment_topology
Execute(stop_cmd)
Logger.info('Done stopping enrichment topologies')
def restart_enrichment_topology(self, env):
Logger.info('Restarting the enrichment topologies')
self.stop_enrichment_topology()
# Wait for old topology to be cleaned up by Storm, before starting again.
retries = 0
topology_active = self.is_topology_active(env)
while topology_active and retries < 3:
Logger.info('Existing topology still active. Will wait and retry')
time.sleep(40)
topology_active = self.is_topology_active(env)
retries += 1
if not topology_active:
self.start_enrichment_topology()
Logger.info('Done restarting the enrichment topology')
else:
Logger.warning('Retries exhausted. Existing topology not cleaned up. Aborting topology start.')
def is_topology_active(self, env):
env.set_params(self.__params)
active = True
topologies = metron_service.get_running_topologies()
is_running = False
if self.__enrichment_topology in topologies:
is_running = topologies[self.__enrichment_topology] in ['ACTIVE', 'REBALANCING']
active &= is_running
return active
def create_hbase_tables(self):
add_enrichment_cmd = "echo \"create '{0}','{1}'\" | hbase shell -n".format(self.__params.enrichment_table, self.__params.enrichment_cf)
Execute(add_enrichment_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
)
add_threatintel_cmd = "echo \"create '{0}','{1}'\" | hbase shell -n".format(self.__params.threatintel_table, self.__params.threatintel_cf)
Execute(add_threatintel_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
)
| apache-2.0 |
astrophysicist87/iEBE-Plumberg | generateJobs_guillimin.py | 2 | 5935 | #! /usr/bin/env python
"""
This script duplicates the EBE-Node folder and generate a collection of pbs
files to be batch-submitted. For efficiency all codes inside EBE-Node should
be compiled.
"""
from sys import argv, exit
from os import makedirs, path, unlink
from shutil import copytree, copy, rmtree
from subprocess import call
from check_prerequisites import checkEnvironment, checkExecutables, greetings
# check argv
try:
# set parameters
numberOfJobs = int(argv[1])
numberOfEventsPerJob = int(argv[2])
# set optional parameters
argId = 2
argId += 1
if len(argv)>=argId+1: # set working folder
workingFolder = path.abspath(argv[argId])
else:
workingFolder = path.abspath("./PlayGround")
argId += 1
if len(argv)>=argId+1: # folder to store results
resultsFolder = path.abspath(argv[argId])
else:
resultsFolder = path.abspath("./RESULTS")
argId += 1
if len(argv)>=argId+1: # set wall time
walltime = argv[argId]
else:
walltime = "%d:00:00" % (1.5*numberOfEventsPerJob) # 3 hours per job
argId += 1
if len(argv)>=argId+1: # whether to compress final results folder
compressResultsFolderAnswer = argv[argId]
else:
compressResultsFolderAnswer = "yes"
except:
print('Usage: generateJobs.py number_of_jobs number_of_events_per_job [working_folder="./PlayGround"] [results_folder="./RESULTS"] [walltime="03:00:00" (per event)] [compress_results_folder="yes"]')
exit()
# save config files
open("saved_configs.py", "w").writelines("""
iEbeConfigs = {
"number_of_jobs" : %d,
"number_of_events_per_job" : %d,
"working_folder" : "%s",
"results_folder" : "%s",
"walltime" : "%s",
"compress_results_folder" : "%s",
}
""" % (numberOfJobs, numberOfEventsPerJob, workingFolder, resultsFolder, walltime, compressResultsFolderAnswer)
)
# define colors
purple = "\033[95m"
green = "\033[92m"
blue = "\033[94m"
yellow = "\033[93m"
red = "\033[91m"
normal = "\033[0m"
# print welcome message
print(yellow)
greetings(3)
print(purple + "\n" + "-"*80 + "\n>>>>> Welcome to the event generator! <<<<<\n" + "-"*80 + normal)
# check prerequisites
print(green + "\n>>>>> Checking for required libraries <<<<<\n" + normal)
if not checkEnvironment():
print("Prerequisites not met. Install the required library first please. Aborting.")
exit()
# check existence of executables
print(green + "\n>>>>> Checking for existence of executables <<<<<\n" + normal)
if not checkExecutables():
print("Not all executables can be generated. Aborting.")
exit()
# clean up check_prerequisites.pyc
if path.exists("check_prerequisites.pyc"): unlink("check_prerequisites.pyc")
# generate events
print(green + "\n>>>>> Generating events <<<<<\n" + normal)
# prepare directories
if not path.exists(resultsFolder): makedirs(resultsFolder)
if path.exists(workingFolder): rmtree(workingFolder)
makedirs(workingFolder)
ebeNodeFolder = "EBE-Node"
crankFolderName = "crank"
crankFolder = path.join(ebeNodeFolder, crankFolderName)
# copy parameter file into the crank folder
copy("ParameterDict.py", crankFolder)
# backup parameter files to the result folder
copy(path.join(crankFolder, "SequentialEventDriver.py"), resultsFolder)
copy(path.join(crankFolder, "ParameterDict.py"), resultsFolder)
# duplicate EBE-Node folder to working directory, write .pbs file
for i in range(1, numberOfJobs+1):
targetWorkingFolder = path.join(workingFolder, "job-%d" % i)
# copy folder
copytree(ebeNodeFolder, targetWorkingFolder)
open(path.join(targetWorkingFolder, "job-%d.pbs" % i), "w").write(
"""
#!/usr/bin/env bash
#PBS -N iEBE-%d
#PBS -l walltime=%s
#PBS -l nodes=1:ppn=1
#PBS -A cqn-654-ad
#PBS -q sw
#PBS -S /bin/bash
#PBS -d %s
(cd %s
ulimit -n 1000
python ./SequentialEventDriver_shell.py %d 1> RunRecord.txt 2> ErrorRecord.txt
cp RunRecord.txt ErrorRecord.txt ../finalResults/
)
mv ./finalResults %s/job-%d
""" % (i, walltime, targetWorkingFolder, crankFolderName, numberOfEventsPerJob, resultsFolder, i)
)
if compressResultsFolderAnswer == "yes":
open(path.join(targetWorkingFolder, "job-%d.pbs" % i), "a").write(
"""
(cd %s
zip -r -m -q job-%d.zip job-%d
)
""" % (resultsFolder, i, i)
)
# add a data collector watcher
if compressResultsFolderAnswer == "yes":
EbeCollectorFolder = "EbeCollector"
utilitiesFolder = "utilities"
watcherDirectory = path.join(workingFolder, "watcher")
makedirs(path.join(watcherDirectory, ebeNodeFolder))
copytree(path.join(ebeNodeFolder, EbeCollectorFolder), path.join(watcherDirectory, ebeNodeFolder, EbeCollectorFolder))
copytree(utilitiesFolder, path.join(watcherDirectory, utilitiesFolder))
open(path.join(watcherDirectory, "watcher.pbs"), "w").write(
"""
#!/usr/bin/env bash
#PBS -N watcher
#PBS -l walltime=%s
#PBS -l nodes=1:ppn=1
#PBS -A cqn-654-ad
#PBS -q sw
#PBS -S /bin/bash
#PBS -d %s
(cd %s
python autoZippedResultsCombiner.py %s %d "job-(\d*).zip" 60 1> WatcherReport.txt
mv WatcherReport.txt %s
)
""" % (walltime, watcherDirectory, utilitiesFolder, resultsFolder, numberOfJobs, resultsFolder)
)
import ParameterDict
initial_condition_type = (
ParameterDict.initial_condition_control['initial_condition_type'])
if initial_condition_type == 'pre-generated':
initial_file_path = (ParameterDict.initial_condition_control[
'pre-generated_initial_file_path'])
call("./copy_pre_generated_initial_conditions.sh %d %d %s %s"
% (numberOfJobs, numberOfEventsPerJob, initial_file_path,
workingFolder), shell=True)
print("Jobs generated. Submit them using submitJobs scripts.")
###########################################################################
# 05-23-2013:
# Bugfix: "cd %s" added to the pbs files.
| gpl-3.0 |
rhndg/openedx | common/test/acceptance/tests/lms/test_lms_dashboard_search.py | 49 | 7031 | """
Test dashboard search
"""
import os
import json
from bok_choy.web_app_test import WebAppTest
from ..helpers import generate_course_key
from ...pages.common.logout import LogoutPage
from ...pages.studio.utils import add_html_component, click_css, type_in_codemirror
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.container import ContainerPage
from ...pages.lms.dashboard_search import DashboardSearchPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class DashboardSearchTest(WebAppTest):
"""
Test dashboard search.
"""
USERNAME = 'STUDENT_TESTER'
EMAIL = 'student101@example.com'
STAFF_USERNAME = "STAFF_TESTER"
STAFF_EMAIL = "staff101@example.com"
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def setUp(self):
"""
Create the search page and courses to search.
"""
# create test file in which index for this test will live
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
super(DashboardSearchTest, self).setUp()
self.dashboard = DashboardSearchPage(self.browser)
self.courses = {
'A': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_A',
'display_name': 'Test Course A '
},
'B': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_B',
'display_name': 'Test Course B '
},
'C': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_C',
'display_name': 'Test Course C '
}
}
# generate course fixtures and outline pages
self.course_outlines = {}
self.course_fixtures = {}
for key, course_info in self.courses.iteritems():
course_outline = CourseOutlinePage(
self.browser,
course_info['org'],
course_info['number'],
course_info['run']
)
course_fix = CourseFixture(
course_info['org'],
course_info['number'],
course_info['run'],
course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Section 1').add_children(
XBlockFixtureDesc('sequential', 'Subsection 1').add_children(
XBlockFixtureDesc('problem', 'dashboard search')
)
)
).add_children(
XBlockFixtureDesc('chapter', 'Section 2').add_children(
XBlockFixtureDesc('sequential', 'Subsection 2')
)
).install()
self.course_outlines[key] = course_outline
self.course_fixtures[key] = course_fix
def tearDown(self):
"""
Remove index file
"""
super(DashboardSearchTest, self).tearDown()
os.remove(self.TEST_INDEX_FILENAME)
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, username=username, email=email, staff=staff).visit()
def _studio_add_content(self, course_outline, html_content):
"""
Add content to first section on studio course page.
"""
# create a unit in course outline
course_outline.visit()
subsection = course_outline.section_at(0).subsection_at(0)
subsection.expand_subsection()
subsection.add_unit()
# got to unit and create an HTML component and save (not publish)
unit_page = ContainerPage(self.browser, None)
unit_page.wait_for_page()
add_html_component(unit_page, 0)
unit_page.wait_for_element_presence('.edit-button', 'Edit button is visible')
click_css(unit_page, '.edit-button', 0, require_notification=False)
unit_page.wait_for_element_visibility('.modal-editor', 'Modal editor is visible')
type_in_codemirror(unit_page, 0, html_content)
click_css(unit_page, '.action-save', 0)
def _studio_publish_content(self, course_outline):
"""
Publish content in first section on studio course page.
"""
course_outline.visit()
subsection = course_outline.section_at(0).subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
unit.publish()
def test_page_existence(self):
"""
Make sure that the page exists.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.dashboard.visit()
def test_search(self):
"""
Make sure that you can search courses.
"""
search_string = "dashboard"
html_content = "dashboard search"
# Enroll student in courses A & B, but not C
for course_info in [self.courses['A'], self.courses['B']]:
course_key = generate_course_key(
course_info['org'],
course_info['number'],
course_info['run']
)
AutoAuthPage(
self.browser,
username=self.USERNAME,
email=self.EMAIL,
course_id=course_key
).visit()
# Create content in studio without publishing.
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self._studio_add_content(self.course_outlines['A'], html_content)
self._studio_add_content(self.course_outlines['B'], html_content)
self._studio_add_content(self.course_outlines['C'], html_content)
# Do a search, there should be no results shown.
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.dashboard.visit()
self.dashboard.search_for_term(search_string)
assert search_string not in self.dashboard.search_results.html[0]
# Publish in studio to trigger indexing.
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self._studio_publish_content(self.course_outlines['A'])
self._studio_publish_content(self.course_outlines['B'])
self._studio_publish_content(self.course_outlines['C'])
# Do the search again, this time we expect results from courses A & B, but not C
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.dashboard.visit()
self.dashboard.search_for_term(search_string)
assert self.dashboard.search_results.html[0].count(search_string) == 2
assert self.dashboard.search_results.html[0].count(self.courses['A']['display_name']) == 1
assert self.dashboard.search_results.html[0].count(self.courses['B']['display_name']) == 1
| agpl-3.0 |
TheTypoMaster/chromium-crosswalk | tools/telemetry/telemetry/internal/browser/user_agent.py | 36 | 1195 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
UA_TYPE_MAPPING = {
'desktop':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/40.0.2194.2 Safari/537.36',
'mobile':
'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) '
'AppleWebKit/535.36 (KHTML, like Gecko) Chrome/40.0.2194.2 Mobile '
'Safari/535.36',
'tablet':
'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus 7 Build/IMM76B) '
'AppleWebKit/535.36 (KHTML, like Gecko) Chrome/40.0.2194.2 '
'Safari/535.36',
'tablet_10_inch':
'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus 10 Build/IMM76B) '
'AppleWebKit/535.36 (KHTML, like Gecko) Chrome/40.0.2194.2 '
'Safari/535.36',
}
def GetChromeUserAgentArgumentFromType(user_agent_type):
"""Returns a chrome user agent based on a user agent type.
This is derived from:
https://developers.google.com/chrome/mobile/docs/user-agent
"""
if user_agent_type:
return ['--user-agent=%s' % UA_TYPE_MAPPING[user_agent_type]]
return []
| bsd-3-clause |
SaschaMester/delicium | third_party/mojo/src/mojo/public/third_party/jinja2/constants.py | 1169 | 1626 | # -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''
| bsd-3-clause |
MrCubanfrog/NorDB | nordb/database/sql2response.py | 1 | 9724 | """
This module contains all information for getting the response information out
of the database.
Functions and Classes
---------------------
"""
import datetime
import time
from nordb.core import usernameUtilities
from nordb.nordic.response import FapResponse, PazResponse
SELECT_RESPONSE_ID = (
"SELECT "
" file_name, source, stage, description, "
" format, author, id "
"FROM "
" response "
"WHERE "
" id = %s"
)
SELECT_FAP = (
"SELECT "
" frequency, amplitude, phase, amplitude_error, phase_error "
"FROM "
" fap, fap_response "
"WHERE "
" response_id = %s AND "
" fap.fap_id = fap_response.id "
"ORDER BY "
" frequency "
)
SELECT_PAZ = (
"SELECT "
" scale_factor "
"FROM "
" paz_response, response "
"WHERE "
" paz_response.response_id = response.id "
"AND "
" response.id = %s"
)
SELECT_POLES = (
"SELECT "
" real, imag, real_error, imag_error "
"FROM "
" pole, paz_response, response "
"WHERE "
" response.id = %s AND "
" pole.paz_id = paz_response.id AND "
" paz_response.response_id = response.id "
"ORDER BY "
" real "
)
SELECT_ZEROS = (
"SELECT "
" real, imag, real_error, imag_error "
"FROM "
" zero, paz_response, response "
"WHERE "
" response.id = %s AND "
" zero.paz_id = paz_response.id AND "
" paz_response.response_id = response.id "
"ORDER BY "
" ABS(real) DESC"
)
SELECT_RESPONSE = (
"SELECT "
" response.id "
"FROM "
" response, instrument, sitechan, station, sensor "
"WHERE "
" response.id = instrument.response_id AND "
" instrument.id = sensor.instrument_id AND "
" sensor.sitechan_id = sitechan.id AND "
" sitechan.station_id = station.id AND "
" station_code = %s AND "
" sitechan.channel_code = %s AND "
" ("
" (sensor.time <= %s AND sensor.endtime >= %s) "
" OR "
" (sensor.time <= %s AND sensor.endtime IS NULL) "
" )"
)
SELECT_RESPONSES = (
"SELECT "
" response.file_name, response.source, "
" response.stage, response.description, "
" response.format, response.author, response.id "
"FROM "
" response "
"WHERE "
" response.id IN %(response_ids)s"
)
SELECT_FAPS = (
"SELECT "
" frequency, amplitude, phase, amplitude_error, phase_error, "
" response_id "
"FROM "
" fap, fap_response "
"WHERE "
" response_id IN %(response_ids)s AND "
" fap.fap_id = fap_response.id "
"ORDER BY "
" frequency "
)
SELECT_PAZS = (
"SELECT "
" scale_factor, response_id "
"FROM "
" paz_response, response "
"WHERE "
" paz_response.response_id = response.id "
"AND "
" response.id IN %(response_ids)s"
)
SELECT_ALL_POLES = (
"SELECT "
" real, imag, real_error, imag_error, response_id "
"FROM "
" pole, paz_response, response "
"WHERE "
" response.id IN %(response_ids)s AND "
" pole.paz_id = paz_response.id AND "
" paz_response.response_id = response.id "
"ORDER BY "
" real "
)
SELECT_ALL_ZEROS = (
"SELECT "
" real, imag, real_error, imag_error, response_id "
"FROM "
" zero, paz_response, response "
"WHERE "
" response.id IN %(response_ids)s AND "
" zero.paz_id = paz_response.id AND "
" paz_response.response_id = response.id "
"ORDER BY "
" real"
)
def responses2instruments(instruments, db_conn = None):
"""
Function for attaching responses to instrument information
:param list instruments: List of instruments to which the responses will be attached to
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
response_ids = []
for instrument in instruments:
response_ids.append(instrument.response_id)
response_ids = tuple(response_ids)
if len(response_ids) == 0:
if db_conn is None:
conn.close()
return
cur = conn.cursor()
cur.execute(SELECT_RESPONSES, {'response_ids':response_ids})
ans = cur.fetchall()
responses = []
response_ids = []
for a in ans:
response_ids.append(a[-1])
response_ids = tuple(response_ids)
cur.execute(SELECT_FAPS, {'response_ids':response_ids})
fap_resp = cur.fetchall()
cur.execute(SELECT_PAZS, {'response_ids':response_ids})
paz_resp = cur.fetchall()
cur.execute(SELECT_ALL_POLES, {'response_ids':response_ids})
poles_resp = cur.fetchall()
cur.execute(SELECT_ALL_ZEROS, {'response_ids':response_ids})
zeros_resp = cur.fetchall()
for resp in ans:
for instrument in instruments:
if instrument.response_id == resp[-1]:
if resp[4] == 'fap':
faps = []
for f in fap_resp:
if f[-1] == resp[-1]:
faps.append(f[:-1])
instrument.response = FapResponse(resp, faps)
elif resp[4] == 'paz':
poles = []
zeros = []
for pole in poles_resp:
if pole[-1] == resp[-1]:
poles.append(pole[:-1])
for zero in zeros_resp:
if zero[-1] == resp[-1]:
zeros.append(zero[:-1])
for paz in paz_resp:
if paz[-1] == resp[-1]:
instrument.response = PazResponse(resp,
paz[0],
poles,
zeros)
break
if db_conn is None:
conn.close()
def getResponseFromDB(response_id, db_conn = None):
"""
Function for reading a response from database by id
:param int response_id: id of the Response wanted
:returns: :class:`PazResponse` or :class:`FapResponse` object
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
cur = conn.cursor()
response = None
cur.execute(SELECT_RESPONSE_ID, (response_id, ))
response_data = cur.fetchone()
if response_data is None:
return None
if response_data[FapResponse.RESPONSE_FORMAT] == 'fap':
cur.execute(SELECT_FAP, (response_id,))
fap = cur.fetchall()
response = FapResponse(response_data, fap)
elif response_data[FapResponse.RESPONSE_FORMAT] == 'paz':
cur.execute(SELECT_PAZ, (response_id,))
scale_factor = cur.fetchone()[0]
cur.execute(SELECT_POLES, (response_id,))
poles = cur.fetchall()
cur.execute(SELECT_ZEROS, (response_id,))
zeros = cur.fetchall()
response = PazResponse(response_data, scale_factor, poles, zeros)
if db_conn is None:
conn.close()
return response
def getResponse(station, channel, date=datetime.datetime.now(), db_conn = None):
"""
Function for getting response information from the database.
:param string station: Station code of the station
:param string channel: Channel code of the channel
:param datetime date: date for which you want the response
:returns: Response object
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
cur = conn.cursor()
timestamp = time.mktime(date.timetuple())
cur.execute(SELECT_RESPONSE, (station, channel, timestamp, timestamp,
timestamp))
resp_id = cur.fetchone()
if resp_id is None:
return None
response = getResponseFromDB(resp_id[0], conn)
if db_conn is None:
conn.close()
return response
| mit |
Kazade/NeHe-Website | google_appengine/lib/django-1.2/docs/_ext/djangodocs.py | 39 | 9475 | """
Sphinx plugins for Django documentation.
"""
import os
import re
from docutils import nodes, transforms
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
json = None
from sphinx import addnodes, roles
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.writers.html import SmartyPantsHTMLTranslator
from sphinx.util.console import bold
from sphinx.util.compat import Directive
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag"
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
indextemplate = "pair: %s; template filter"
)
app.add_crossref_type(
directivename = "fieldlookup",
rolename = "lookup",
indextemplate = "pair: %s; field lookup type",
)
app.add_description_unit(
directivename = "django-admin",
rolename = "djadmin",
indextemplate = "pair: %s; django-admin command",
parse_node = parse_django_admin_node,
)
app.add_description_unit(
directivename = "django-admin-option",
rolename = "djadminopt",
indextemplate = "pair: %s; django-admin command-line option",
parse_node = parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_transform(SuppressBlockquotes)
app.add_builder(DjangoStandaloneHTMLBuilder)
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
env = self.state.document.settings.env
arg0 = self.arguments[0]
is_nextversion = env.config.django_next_version == arg0
ret = []
node = addnodes.versionmodified()
ret.append(node)
if not is_nextversion:
if len(self.arguments) == 1:
linktext = 'Please, see the release notes </releases/%s>' % (arg0)
try:
xrefs = roles.XRefRole()('doc', linktext, linktext, self.lineno, self.state) # Sphinx >= 1.0
except AttributeError:
xrefs = roles.xfileref_role('doc', linktext, linktext, self.lineno, self.state) # Sphinx < 1.0
node.extend(xrefs[0])
node['version'] = arg0
else:
node['version'] = "Development version"
node['type'] = self.name
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1], self.lineno+1)
node.extend(inodes)
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
ret = ret + messages
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class SuppressBlockquotes(transforms.Transform):
"""
Remove the default blockquotes that encase indented list, tables, etc.
"""
default_priority = 300
suppress_blockquote_child_nodes = (
nodes.bullet_list,
nodes.enumerated_list,
nodes.definition_list,
nodes.literal_block,
nodes.doctest_block,
nodes.line_block,
nodes.table
)
def apply(self):
for node in self.document.traverse(nodes.block_quote):
if len(node.children) == 1 and isinstance(node.children[0], self.suppress_blockquote_child_nodes):
node.replace_self(node.children[0])
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
# <big>? Really?
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.body.append(')')
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accomodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'deprecated': 'Deprecated in Django %s',
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
title = "%s%s" % (
self.version_text[node['type']] % node['version'],
len(node) and ":" or "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
try:
from sphinx.domains.std import option_desc_re # Sphinx >= 1.0
except ImportError:
from sphinx.directives.desc import option_desc_re # Sphinx < 1.0
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
if json is None:
self.warn("cannot create templatebuiltins.js due to missing simplejson dependency")
return
self.info(bold("writing templatebuiltins.js..."))
try:
# Sphinx < 1.0
xrefs = self.env.reftargets.items()
templatebuiltins = dict([('ttags', [n for ((t,n),(l,a)) in xrefs
if t == 'ttag' and
l == 'ref/templates/builtins']),
('tfilters', [n for ((t,n),(l,a)) in xrefs
if t == 'tfilter' and
l == 'ref/templates/builtins'])])
except AttributeError:
# Sphinx >= 1.0
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = dict([('ttags', [n for ((t,n), (l,a)) in xrefs.items()
if t == 'templatetag' and
l == 'ref/templates/builtins' ]),
('tfilters', [n for ((t,n), (l,a)) in xrefs.items()
if t == 'templatefilter' and
t == 'ref/templates/builtins'])])
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
f = open(outfilename, 'wb')
f.write('var django_template_builtins = ')
json.dump(templatebuiltins, f)
f.write(';\n')
f.close();
| bsd-3-clause |
safwanrahman/readthedocs.org | readthedocs/payments/forms.py | 2 | 6647 | """Payment forms."""
from __future__ import absolute_import
from builtins import str
from builtins import object
import logging
from stripe.resource import Customer, Charge
from stripe.error import InvalidRequestError
from django import forms
from django.utils.translation import ugettext_lazy as _
from .utils import stripe
log = logging.getLogger(__name__)
class StripeResourceMixin(object):
"""Stripe actions for resources, available as a Form mixin class."""
def ensure_stripe_resource(self, resource, attrs):
try:
instance = resource.retrieve(attrs['id'])
except (KeyError, InvalidRequestError):
try:
del attrs['id']
except KeyError:
pass
return resource.create(**attrs)
else:
for (key, val) in list(attrs.items()):
setattr(instance, key, val)
instance.save()
return instance
def get_customer_kwargs(self):
raise NotImplementedError
def get_customer(self):
return self.ensure_stripe_resource(resource=Customer,
attrs=self.get_customer_kwargs())
def get_subscription_kwargs(self):
raise NotImplementedError
def get_subscription(self):
customer = self.get_customer()
return self.ensure_stripe_resource(resource=customer.subscriptions,
attrs=self.get_subscription_kwargs())
def get_charge_kwargs(self):
raise NotImplementedError
def get_charge(self):
return self.ensure_stripe_resource(resource=Charge,
attrs=self.get_charge_kwargs())
class StripeModelForm(forms.ModelForm):
"""
Payment form base for Stripe interaction.
Use this as a base class for payment forms. It includes the necessary fields
for card input and manipulates the Knockout field data bindings correctly.
:cvar stripe_token: Stripe token passed from Stripe.js
:cvar cc_number: Credit card number field, used only by Stripe.js
:cvar cc_expiry: Credit card expiry field, used only by Stripe.js
:cvar cc_cvv: Credit card security code field, used only by Stripe.js
"""
# Stripe token input from Stripe.js
stripe_token = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={
'data-bind': 'valueInit: stripe_token',
})
)
# Fields used for fetching token with javascript, listed as form fields so
# that data can survive validation errors
cc_number = forms.CharField(
label=_('Card number'),
widget=forms.TextInput(attrs={
'data-bind': ('valueInit: cc_number, '
'textInput: cc_number, '
'''css: {'field-error': error_cc_number() != null}''')
}),
max_length=25,
required=False)
cc_expiry = forms.CharField(
label=_('Card expiration'),
widget=forms.TextInput(attrs={
'data-bind': ('valueInit: cc_expiry, '
'textInput: cc_expiry, '
'''css: {'field-error': error_cc_expiry() != null}''')
}),
max_length=10,
required=False)
cc_cvv = forms.CharField(
label=_('Card CVV'),
widget=forms.TextInput(attrs={
'data-bind': ('valueInit: cc_cvv, '
'textInput: cc_cvv, '
'''css: {'field-error': error_cc_cvv() != null}'''),
'autocomplete': 'off',
}),
max_length=8,
required=False)
def __init__(self, *args, **kwargs):
self.customer = kwargs.pop('customer', None)
super(StripeModelForm, self).__init__(*args, **kwargs)
def validate_stripe(self):
"""
Run validation against Stripe.
This is what will create several objects using the Stripe API. We need
to actually create the objects, as that is what will provide us with
validation errors to throw back at the form.
Form fields can be accessed here via ``self.cleaned_data`` as this
method is triggered from the :py:meth:`clean` method. Cleaned form data
should already exist on the form at this point.
"""
raise NotImplementedError
def clean_stripe_token(self):
data = self.cleaned_data['stripe_token']
if not data:
data = None
return data
def clean(self):
"""
Clean form to add Stripe objects via API during validation phase.
This will handle ensuring a customer and subscription exist and will
raise any issues as validation errors. This is required because part of
Stripe's validation happens on the API call to establish a subscription.
"""
cleaned_data = super(StripeModelForm, self).clean()
# Form isn't valid, no need to try to associate a card now
if not self.is_valid():
self.clear_card_data()
return
try:
self.validate_stripe()
except stripe.error.CardError as e:
self.clear_card_data()
field_lookup = {
'cvc': 'cc_cvv',
'number': 'cc_number',
'expiry': 'cc_expiry',
'exp_month': 'cc_expiry',
'exp_year': 'cc_expiry',
}
error_field = field_lookup.get(e.param, None)
self.add_error(
error_field,
forms.ValidationError(str(e)),
)
except stripe.error.StripeError as e:
log.exception('There was a problem communicating with Stripe')
raise forms.ValidationError(
_('There was a problem communicating with Stripe'))
return cleaned_data
def clear_card_data(self):
"""
Clear card data on validation errors.
This requires the form was created by passing in a mutable QueryDict
instance, see :py:class:`readthedocs.payments.mixin.StripeMixin`
"""
try:
self.data['stripe_token'] = None
except AttributeError:
raise AttributeError('Form was passed immutable QueryDict POST data')
def fields_with_cc_group(self):
group = {
'is_cc_group': True,
'fields': []
}
for field in self:
if field.name in ['cc_number', 'cc_expiry', 'cc_cvv']:
group['fields'].append(field)
else:
yield field
yield group
| mit |
VinceZK/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py | 122 | 3414 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.layout_tests.reftests import extract_reference_link
class ExtractLinkMatchTest(unittest.TestCase):
def test_getExtractMatch(self):
html_1 = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="mismatch" href="red-box-notref.xht" />
<link rel="mismatch" href="red-box-notref.xht" />
<meta name="flags" content="TOKENS" />
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
matches, mismatches = extract_reference_link.get_reference_link(html_1)
self.assertItemsEqual(matches,
["green-box-ref.xht", "blue-box-ref.xht"])
self.assertItemsEqual(mismatches,
["red-box-notref.xht", "red-box-notref.xht"])
html_2 = ""
empty_tuple_1 = extract_reference_link.get_reference_link(html_2)
self.assertEqual(empty_tuple_1, ([], []))
# Link does not have a "ref" attribute.
html_3 = """<link href="RELEVANT_SPEC_SECTION"/>"""
empty_tuple_2 = extract_reference_link.get_reference_link(html_3)
self.assertEqual(empty_tuple_2, ([], []))
# Link does not have a "href" attribute.
html_4 = """<link rel="match"/>"""
empty_tuple_3 = extract_reference_link.get_reference_link(html_4)
self.assertEqual(empty_tuple_3, ([], []))
# Link does not have a "/" at the end.
html_5 = """<link rel="help" href="RELEVANT_SPEC_SECTION">"""
empty_tuple_4 = extract_reference_link.get_reference_link(html_5)
self.assertEqual(empty_tuple_4, ([], []))
| bsd-3-clause |
freenas/samba | python/samba/netcmd/main.py | 8 | 2369 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""The main samba-tool command implementation."""
from samba import getopt as options
from samba.netcmd import SuperCommand
class cache_loader(dict):
"""
We only load subcommand tools if they are actually used.
This significantly reduces the amount of time spent starting up
samba-tool
"""
def __getitem__(self, attr):
item = dict.__getitem__(self, attr)
if item is None:
package = 'nettime' if attr == 'time' else attr
self[attr] = getattr(__import__('samba.netcmd.%s' % package,
fromlist=['cmd_%s' % attr]),
'cmd_%s' % attr)()
return dict.__getitem__(self, attr)
def iteritems(self):
for key in self:
yield (key, self[key])
def items(self):
return list(self.iteritems())
class cmd_sambatool(SuperCommand):
"""Main samba administration tool."""
takes_optiongroups = {
"versionopts": options.VersionOptions,
}
subcommands = cache_loader()
subcommands["dbcheck"] = None
subcommands["delegation"] = None
subcommands["dns"] = None
subcommands["domain"] = None
subcommands["drs"] = None
subcommands["dsacl"] = None
subcommands["fsmo"] = None
subcommands["gpo"] = None
subcommands["group"] = None
subcommands["ldapcmp"] = None
subcommands["ntacl"] = None
subcommands["rodc"] = None
subcommands["sites"] = None
subcommands["spn"] = None
subcommands["testparm"] = None
subcommands["time"] = None
subcommands["user"] = None
subcommands["processes"] = None
| gpl-3.0 |
admcrae/tensorflow | tensorflow/contrib/keras/python/keras/losses.py | 31 | 3469 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in Keras loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object
def mean_squared_error(y_true, y_pred):
return K.mean(K.square(y_pred - y_true), axis=-1)
def mean_absolute_error(y_true, y_pred):
return K.mean(K.abs(y_pred - y_true), axis=-1)
def mean_absolute_percentage_error(y_true, y_pred):
# Equivalent to MAE, but sometimes easier to interpret.
diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
return 100. * K.mean(diff, axis=-1)
def mean_squared_logarithmic_error(y_true, y_pred):
first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.mean(K.square(first_log - second_log), axis=-1)
def squared_hinge(y_true, y_pred):
return K.mean(K.square(K.maximum(1. - y_true * y_pred, 0.)), axis=-1)
def hinge(y_true, y_pred):
return K.mean(K.maximum(1. - y_true * y_pred, 0.), axis=-1)
def categorical_crossentropy(y_true, y_pred):
return K.categorical_crossentropy(y_pred, y_true)
def sparse_categorical_crossentropy(y_true, y_pred):
return K.sparse_categorical_crossentropy(y_pred, y_true)
def binary_crossentropy(y_true, y_pred):
return K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)
def kullback_leibler_divergence(y_true, y_pred):
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
def poisson(y_true, y_pred):
return K.mean(y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1)
def cosine_proximity(y_true, y_pred):
y_true = K.l2_normalize(y_true, axis=-1)
y_pred = K.l2_normalize(y_pred, axis=-1)
return -K.mean(y_true * y_pred, axis=-1)
# Aliases.
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
cosine = cosine_proximity
def serialize(loss):
return loss.__name__
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
| apache-2.0 |
tortxof/OpenBazaar | rudp/packet.py | 13 | 3395 | from pyee import EventEmitter
import json
import logging
class Packet(object):
def __init__(self, sequence_number, payload=None, synchronize=None, reset=None, packet_buffer=False):
self.log = logging.getLogger(
'%s' % self.__class__.__name__
)
self.event_emitter = EventEmitter()
self.segment = sequence_number
self.offset = 0
bools = 0
self._transmission_count = 0
if packet_buffer:
try:
data = json.loads(sequence_number)
bools = data.get('bools')
self._sequence_number = data.get('seq_num')
except ValueError as exc:
data = sequence_number
self.log.error(exc)
self._payload = data.get('payload')
self._size = data.get('size')
self._acknowledgement = (bools & 0x80)
self._synchronize = (bools & 0x40)
self._finish = (bools & 0x20)
self._reset = (bools & 0x10)
else:
self._acknowledgement = False
self._synchronize = bool(synchronize)
self._finish = False
self._reset = bool(reset)
self._sequence_number = sequence_number
self._payload = payload
self.log = logging.getLogger(
'%s' % self.__class__.__name__
)
@staticmethod
def create_acknowledgement_packet(sequence_number, guid, pubkey):
ack_data = json.dumps({
'type': 'ack',
'senderGUID': guid,
'pubkey': pubkey
})
packet = Packet(sequence_number, ack_data, False)
packet._acknowledgement = True
return packet
@staticmethod
def create_finish_packet():
packet = Packet(0, '', False, False)
packet._finish = True
return packet
def __eq__(self, other):
return (
self._acknowledgement is other._acknowledgement and
self._synchronize is other._synchronize and
self._finish is other._finish and
self._reset is other._reset and
self._sequence_number is other._sequence_number and
self._payload is other._payload
)
def __gt__(self, other):
return self._sequence_number > other._sequence_number
def __lt__(self, other):
return self._sequence_number < other._sequence_number
def __ge__(self, other):
return self._sequence_number >= other._sequence_number
def __le__(self, other):
return self._sequence_number <= other._sequence_number
def get_sequence_number(self):
return self._sequence_number
def to_buffer(self, guid, pubkey, hostname, port, nick='Default', nat_type=None):
bools = 0 + (
(self._acknowledgement and 0x80) |
(self._synchronize and 0x40) |
(self._finish and 0x20) |
(self._reset and 0x10)
)
packet_buffer = {
'bools': bools,
'seq_num': self._sequence_number,
'guid': guid,
'hostname': hostname,
'port': port,
'nat_type': nat_type,
'pubkey': pubkey,
'size': len(self._payload),
'payload': self._payload.encode('utf-8'),
'nick': nick
}
return json.dumps(packet_buffer)
| mit |
robhudson/django | tests/model_regress/models.py | 281 | 2293 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
CHOICES = (
(1, 'first'),
(2, 'second'),
)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date', 'headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __str__(self):
return self.headline
class Movie(models.Model):
# Test models with non-default primary keys / AutoFields #5218
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField(null=True)
class Event(models.Model):
when = models.DateTimeField()
@python_2_unicode_compatible
class Department(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Worker(models.Model):
department = models.ForeignKey(Department, models.CASCADE)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BrokenUnicodeMethod(models.Model):
name = models.CharField(max_length=7)
def __str__(self):
# Intentionally broken (invalid start byte in byte string).
return b'Name\xff: %s'.decode() % self.name
class NonAutoPK(models.Model):
name = models.CharField(max_length=10, primary_key=True)
# Chained foreign keys with to_field produce incorrect query #18432
class Model1(models.Model):
pkey = models.IntegerField(unique=True, db_index=True)
class Model2(models.Model):
model1 = models.ForeignKey(Model1, models.CASCADE, unique=True, to_field='pkey')
class Model3(models.Model):
model2 = models.ForeignKey(Model2, models.CASCADE, unique=True, to_field='model1')
| bsd-3-clause |
ycl2045/nova-master | nova/tests/virt/xenapi/image/test_glance.py | 11 | 7570 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from nova import context
from nova import exception
from nova.tests.virt.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import vm_utils
class TestGlanceStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestGlanceStore, self).setUp()
self.store = glance.GlanceStore()
self.flags(glance_host='1.1.1.1',
glance_port=123,
glance_api_insecure=False)
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stubs.Set(
vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
self.instance = {'uuid': 'blah',
'system_metadata': [],
'auto_disk_config': True,
'os_type': 'default',
'xenapi_use_agent': 'true'}
def _get_params(self):
return {'image_id': 'fake_image_uuid',
'glance_host': '1.1.1.1',
'glance_port': 123,
'glance_use_ssl': False,
'sr_path': '/fake/sr/path',
'extra_headers': {'X-Service-Catalog': '[]',
'X-Auth-Token': 'foobar',
'X-Roles': '',
'X-Tenant-Id': 'project',
'X-User-Id': 'user',
'X-Identity-Status': 'Confirmed'}}
def _get_download_params(self):
params = self._get_params()
params['uuid_stack'] = ['uuid1']
return params
def test_download_image(self):
params = self._get_download_params()
self.stubs.Set(vm_utils, '_make_uuid_stack',
lambda *a, **kw: ['uuid1'])
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance', 'download_vhd', **params)
self.mox.ReplayAll()
vdis = self.store.download_image(
self.context, self.session, self.instance, 'fake_image_uuid')
self.mox.VerifyAll()
def _get_upload_params(self, auto_disk_config=True,
expected_os_type='default'):
params = self._get_params()
params['vdi_uuids'] = ['fake_vdi_uuid']
params['properties'] = {'auto_disk_config': auto_disk_config,
'os_type': expected_os_type}
return params
def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
params = self._get_upload_params(auto_disk_config, expected_os_type)
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance', 'upload_vhd', **params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
['fake_vdi_uuid'], 'fake_image_uuid')
self.mox.VerifyAll()
def test_upload_image(self):
self._test_upload_image(True)
def test_upload_image_None_os_type(self):
self.instance['os_type'] = None
self._test_upload_image(True, 'linux')
def test_upload_image_no_os_type(self):
del self.instance['os_type']
self._test_upload_image(True, 'linux')
def test_upload_image_auto_config_disk_disabled(self):
sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
self.instance["system_metadata"] = sys_meta
self._test_upload_image("disabled")
def test_upload_image_raises_exception(self):
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(RuntimeError)
self.mox.ReplayAll()
self.assertRaises(RuntimeError, self.store.upload_image,
self.context, self.session, self.instance,
['fake_vdi_uuid'], 'fake_image_uuid')
self.mox.VerifyAll()
def test_upload_image_retries_then_raises_exception(self):
self.flags(glance_num_retries=2)
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
time.sleep(0.5)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
time.sleep(1)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
self.mox.ReplayAll()
self.assertRaises(exception.CouldNotUploadImage,
self.store.upload_image,
self.context, self.session, self.instance,
['fake_vdi_uuid'], 'fake_image_uuid')
self.mox.VerifyAll()
def test_upload_image_retries_on_signal_exception(self):
self.flags(glance_num_retries=2)
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
error_details = ["", "task signaled", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
time.sleep(0.5)
# Note(johngarbutt) XenServer 6.1 and later has this error
error_details = ["", "signal: SIGTERM", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
time.sleep(1)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
['fake_vdi_uuid'], 'fake_image_uuid')
self.mox.VerifyAll()
| apache-2.0 |
pcm17/tensorflow | tensorflow/python/ops/array_ops.py | 9 | 78695 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for manipulating tensors.
See the @{$python/array_ops} guide.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
@@bitcast
@@saturate_cast
@@broadcast_dynamic_shape
@@broadcast_static_shape
@@shape
@@shape_n
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
@@meshgrid
@@slice
@@strided_slice
@@split
@@tile
@@pad
@@concat
@@stack
@@parallel_stack
@@unstack
@@reverse_sequence
@@reverse
@@reverse_v2
@@transpose
@@extract_image_patches
@@space_to_batch_nd
@@space_to_batch
@@required_space_to_batch_paddings
@@batch_to_space_nd
@@batch_to_space
@@space_to_depth
@@depth_to_space
@@gather
@@gather_nd
@@unique_with_counts
@@scatter_nd
@@dynamic_partition
@@dynamic_stitch
@@boolean_mask
@@one_hot
@@sequence_mask
@@dequantize
@@quantize_v2
@@quantized_concat
@@setdiff1d
@@fake_quant_with_min_max_args
@@fake_quant_with_min_max_args_gradient
@@fake_quant_with_min_max_vars
@@fake_quant_with_min_max_vars_gradient
@@fake_quant_with_min_max_vars_per_channel
@@fake_quant_with_min_max_vars_per_channel_gradient
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import six
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_baseslice = slice
# pylint: disable=redefined-builtin,protected-access
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
shape(expand_dims(t, 0)) ==> [1, 2]
shape(expand_dims(t, 1)) ==> [2, 1]
shape(expand_dims(t, -1)) ==> [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`.
name: The name of the output `Tensor`.
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if both `dim` and `axis` are specified.
"""
# TODO(aselle): Remove argument dim
if dim is not None:
if axis is not None:
raise ValueError("can't specify both 'dim' and 'axis'")
axis = dim
return gen_array_ops._expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecated(
"2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops._list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops._list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable,protected-access
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops._list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops._list_diff.__doc__
# pylint: enable=protected-access
def broadcast_dynamic_shape(shape_x, shape_y):
# pylint: disable=protected-access
"""Returns the broadcasted dynamic shape between `shape_x` and `shape_y`.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of x.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops._broadcast_args(shape_x, shape_y)
# pylint: enable=protected-access
def broadcast_static_shape(shape_x, shape_y):
"""Returns the broadcasted static shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
shape(t) ==> [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
This operation returns an integer representing the number of elements in
`input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
size(t) ==> 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`. Defaults to tf.int32.
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops._prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
This operation returns an integer representing the rank of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
# shape of tensor 't' is [2, 2, 3]
rank(t) ==> 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
def _SliceHelper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a tensor as input is not currently allowed
Some useful examples:
```python
# strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# skip every row and reverse every column
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[3,2,1], [9,8,7]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[3,2,1]], [[9,8,7]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[3],[2],[1]], [[9],[8],[7]]]
# Ellipses (3 equivalent operations)
print(foo[tf.newaxis, :, :].eval()) # => [[[3,2,1], [9,8,7]]]
print(foo[tf.newaxis, ...].eval()) # => [[[3,2,1], [9,8,7]]]
print(foo[tf.newaxis].eval()) # => [[[3,2,1], [9,8,7]]]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable
object to slice (i.e. tensor is the read-only view of this
variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _baseslice):
strides.append(s.step if s.step is not None else 1)
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and s.start is not sys.maxsize:
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and s.stop != sys.maxsize:
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
if begin:
packed_begin, packed_end, packed_strides = (
stack(begin), stack(end), stack(strides))
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice from a tensor.
To a first order, this operation extracts a slice of size `end - begin`
from a tensor `input`
starting at the location specified by `begin`. The slice continues by adding
`stride` to the `begin` index until all dimensions are not less than `end`.
Note that components of stride can be negative, which causes a reverse
slice.
This operation can be thought of an encoding of a numpy style sliced
range. Given a python slice input[<spec0>, <spec1>, ..., <specn>]
this function will be called as follows.
`begin`, `end`, and `strides` will be all length n. n is in general
not the same dimensionality as `input`.
For the ith spec,
`begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask`,
and `shrink_axis_mask` will have the ith bit corresponding to
the ith spec.
If the ith bit of `begin_mask` is non-zero, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is non-zero, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is one, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example `foo[3:5,4]` on a 10x8 tensor produces a shape 2 tensor
whereas `foo[3:5,4:5]` produces a shape 2x1 tensor with shrink_mask
being 1<<1 == 2.
If the ith bit of `shrink_axis_mask` is one, then `begin`,
`end[i]`, and `stride[i]` are used to do a slice in the appropriate
dimension, but the output tensor will be reduced in dimensionality
by one. This is only valid if the ith entry of slice[i]==1.
NOTE: `begin` and `end` are zero-indexed`.
`strides` entries must be non-zero.
```python
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.strided_slice(input, [1, 0, 0], [2, 1, 3], [1, 1, 1]) ==> [[[3, 3, 3]]]
tf.strided_slice(input, [1, 0, 0], [2, 2, 3], [1, 1, 1]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.strided_slice(input, [1, -1, 0], [2, -3, 3], [1, -1, 1]) ==>[[[4, 4, 4],
[3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def assign(val):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
return gen_array_ops.strided_slice_assign(
ref=var,
begin=begin,
end=end,
strides=strides,
value=val,
name=name + "_assign",
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See ${tf.Tensor$`Tensor.__getitem__`}
for detailed examples of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```prettyprint
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(A[:2, :2]) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print sess.run(op) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
return _SliceHelper(var._AsTensor(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _SliceHelper)
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```prettyprint
# 'x' is [1, 4]
# 'y' is [2, 5]
# 'z' is [3, 6]
parallel_stack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]
```
The difference between stack and parallel_stack is that stack requires all
of the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction. Parallel stack
will copy pieces of the input into the output as they become available, in
some situations this can provide a performance benefit.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops._parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```prettyprint
# 'x' is [1, 4]
# 'y' is [2, 5]
# 'z' is [3, 6]
stack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
stack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unstack. The numpy equivalent is
tf.stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Supports negative indexes.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name).get_shape()
if value_shape.ndims is not None:
expanded_num_dims = value_shape.ndims + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops._pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError(
"Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" % (elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops._pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be
converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref:
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is not None and dtype != inferred_dtype:
return NotImplemented
return _autopacking_helper(v, inferred_dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function(
(list, tuple), _autopacking_conversion_function, 99)
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of pack. The numpy equivalent is
tf.unstack(x, n) = list(x)
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first
dimension. Supports negative indexes.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops._unpack(value, num=num, axis=axis, name=name)
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) ==> [4, 3]
tf.shape(tf.concat([t3, t4], 1)) ==> [2, 6]
```
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(axis,
name="concat_dim",
dtype=dtypes.int32).get_shape(
).assert_is_compatible_with(tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops._concat_v2(values=values,
axis=axis,
name=name)
def boolean_mask(tensor, mask, name="boolean_mask"):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) ==> [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) ==> [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), squeeze_dims=[1])
return gather(reshaped_tensor, indices)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops._prod(shape(tensor)[:ndims_mask], [0])
tensor = reshape(
tensor,
concat([[leading_size], shape(tensor)[ndims_mask:]], 0))
first_dim = shape_tensor[:ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape([first_dim])
.concatenate(shape_tensor[ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask)
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices => [12, 26, 37, 45]
tf.shape(a.values) => [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices => [26, 37]
tf.shape(b.values) => [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = setdiff1d(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is a scalar, `num_split`, then splits `value` along
dimension `axis` into `num_split` smaller tensors.
Requires that `num_split` evenly divides `value.shape[axis]`.
If `num_or_size_splits` is a tensor, `size_splits`, then splits `value` into
`len(size_splits)` pieces. The shape of the `i`-th piece has the same size as
the `value` except along dimension `axis` where the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) ==> [5, 4]
tf.shape(split1) ==> [5, 15]
tf.shape(split2) ==> [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) ==> [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either an integer indicating the number of splits along
split_dim or a 1-D Tensor containing the sizes of each output tensor
along split_dim. If an integer then it must evenly divide
`value.shape[axis]`; otherwise the sum of sizes along the split
dimension must match that of the `value`.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[0, rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
if isinstance(num_or_size_splits, six.integer_types):
return gen_array_ops._split(
split_dim=axis, num_split=num_or_size_splits, value=value, name=name)
else:
size_splits = ops.convert_to_tensor(num_or_size_splits)
if num is None:
size_splits_shape = size_splits.get_shape()
num = size_splits_shape.dims[0]
if num._value is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops._split_v(
value=value,
size_splits=size_splits,
split_dim=axis,
num_split=num,
name=name)
def transpose(a, perm=None, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
# 'x' is [[1 2 3]
# [4 5 6]]
tf.transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) ==> [[1 4]
[2 5]
[3 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
# 'x' is [[[1 2 3]
# [4 5 6]]
# [[7 8 9]
# [10 11 12]]]
# Take the transpose of the matrices in dimension-0
tf.transpose(x, perm=[0, 2, 1]) ==> [[[1 4]
[2 5]
[3 6]]
[[7 10]
[8 11]
[9 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if perm is None:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
ret = gen_array_ops.transpose(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = gen_array_ops.transpose(a, perm, name=name)
return ret
# pylint: disable=invalid-name
def matrix_transpose(a, name="matrix_transpose"):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
# Matrix with no batch dimension.
# 'x' is [[1 2 3]
# [4 5 6]]
tf.matrix_transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
(gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm)
# pylint: enable=invalid-name
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
try:
shape = tensor_shape.as_shape(shape)
output = constant(zero, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if dtype is not None and tensor.dtype != dtype:
ret = zeros(shape_internal(tensor, optimize=optimize), dtype, name=name)
ret.set_shape(tensor.get_shape())
return ret
else:
return gen_array_ops._zeros_like(tensor, name=name)
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, `complex128` or
`bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
ret.set_shape(tensor.get_shape())
return ret
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
try:
shape = tensor_shape.as_shape(shape)
output = constant(one, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape = tensor_shape.as_shape(shape)
if shape.is_fully_defined():
dim_list = shape.as_list()
else:
dim_list = []
ret = gen_array_ops._placeholder(
dtype=dtype,
shape=dim_list,
name=name)
ret.set_shape(shape)
return ret
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Takes numpy array or Tensor or None and returns either None or Tensor."""
if shape is None: return None
if not isinstance(shape, ops.Tensor):
for el in shape:
if el is None:
return None
return ops.convert_to_tensor(shape, name=name)
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape_name = (name + "/shape") if name is not None else None
shape = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[None], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype, shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, None],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
def pad(tensor, paddings, mode="CONSTANT", name=None): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
# 't' is [[1, 2, 3], [4, 5, 6]].
# 'paddings' is [[1, 1,], [2, 2]].
# rank of 't' is 2.
pad(t, paddings, "CONSTANT") ==> [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
pad(t, paddings, "REFLECT") ==> [[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]]
pad(t, paddings, "SYMMETRIC") ==> [[2, 1, 1, 2, 3, 3, 2],
[2, 1, 1, 2, 3, 3, 2],
[5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
return gen_array_ops._pad(tensor, paddings, name=name)
if mode == "REFLECT":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="REFLECT",
name=name)
if mode == "SYMMETRIC":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="SYMMETRIC",
name=name)
raise ValueError("Unknown padding mode: %s" % mode)
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```prettyprint
x = [1, 2, 3]
y = [4, 5, 6]
```
results in
```prettyprint
X = [[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]
Y = [[4, 5, 6],
[4, 5, 6],
[4, 5, 6]]
```
Args:
*args: `Tensor`s with rank 1
indexing: Either 'xy' or 'ij' (optional, default: 'xy')
name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])) )
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,)*(ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,)*(ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO: improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"]
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(
hypothesis, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops._edit_distance(hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(grad, op.inputs[0])
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(grad, op.inputs[0], op.inputs[1],
op.inputs[2])
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(grad, op.inputs[0],
op.inputs[1],
op.inputs[2])
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(input_shape,
dtype=dtypes.int32,
name="input_shape")
block_shape = ops.convert_to_tensor(block_shape,
dtype=dtypes.int32,
name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape()[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(base_paddings,
dtype=dtypes.int32,
name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack(
[[0, pad_end_extra[i]] for i in range(num_block_dims)], name="crops")
return result_paddings, result_crops
def space_to_batch(input, paddings, block_size, name=None): # pylint: disable=redefined-builtin
result = space_to_batch_nd(input,
paddings=paddings,
block_shape=np.array([block_size, block_size],
dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops._space_to_batch.__doc__
def batch_to_space(input, crops, block_size, name=None): # pylint: disable=redefined-builtin
result = batch_to_space_nd(input,
crops=crops,
block_shape=np.array([block_size, block_size],
dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops._batch_to_space.__doc__
def one_hot(indices, depth, on_value=None, off_value=None,
axis=None, dtype=None, name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
Examples
=========
Suppose that
```python
indices = [0, 2, -1, 1]
depth = 3
on_value = 5.0
off_value = 0.0
axis = -1
```
Then output is `[4 x 3]`:
```python
output =
[5.0 0.0 0.0] // one_hot(0)
[0.0 0.0 5.0] // one_hot(2)
[0.0 0.0 0.0] // one_hot(-1)
[0.0 5.0 0.0] // one_hot(1)
```
Suppose that
```python
indices = [[0, 2], [1, -1]]
depth = 3
on_value = 1.0
off_value = 0.0
axis = -1
```
Then output is `[2 x 2 x 3]`:
```python
output =
[
[1.0, 0.0, 0.0] // one_hot(0)
[0.0, 0.0, 1.0] // one_hot(2)
][
[0.0, 1.0, 0.0] // one_hot(1)
[0.0, 0.0, 0.0] // one_hot(-1)
]
```
Using default values for `on_value` and `off_value`:
```python
indices = [0, 1, 2]
depth = 3
```
The output will be
```python
output =
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(name, "one_hot", [indices, depth, on_value, off_value,
axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists \
else None
off_dtype = ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists\
else None
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if (on_exists and on_dtype != dtype):
raise TypeError("dtype {0} of on_value does not match " \
"dtype parameter {1}".format(on_dtype, dtype))
if (off_exists and off_dtype != dtype):
raise TypeError("dtype {0} of off_value does not match " \
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match " \
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,
name)
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Return a mask tensor representing the first N positions of each row.
Example:
```python
tf.sequence_mask([1, 3, 2], 5) =
[[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]]
```
Args:
lengths: 1D integer tensor, all its values < maxlen.
maxlen: scalar integer tensor, maximum length of each row. Default: use
maximum over lengths.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A 2D mask tensor, as shown in the example above, cast to specified dtype.
Raises:
ValueError: if the arguments have invalid rank.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if lengths.get_shape().ndims != 1:
raise ValueError("lengths must be 1D for sequence_mask")
if maxlen is None:
maxlen = gen_math_ops._max(lengths, [0])
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(constant(0, maxlen.dtype),
maxlen,
constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, 1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```prettyprint
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
shape(squeeze(t)) ==> [2, 3]
```
Or, to remove specific size 1 dimensions:
```prettyprint
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
if squeeze_dims is not None:
if axis is not None:
raise ValueError("Cannot specify both 'squeeze_dims' and 'axis'")
axis = squeeze_dims
if np.isscalar(axis):
axis = [axis]
return gen_array_ops._squeeze(input, axis, name)
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are vectors or higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
A `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
return gen_array_ops.where(input=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops._select(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
def reverse(tensor, axis, name=None):
return gen_array_ops.reverse_v2(tensor, axis, name)
reverse.__doc__ = gen_array_ops.reverse_v2.__doc__
# pylint: disable=redefined-builtin
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
# pylint: enable=redefined-builtin
reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
| apache-2.0 |
ptemplier/ansible | lib/ansible/modules/cloud/amazon/sqs_queue.py | 26 | 10118 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = """
---
module: sqs_queue
short_description: Creates or deletes AWS SQS queues.
description:
- Create or delete AWS SQS queues.
- Update attributes on existing queues.
version_added: "2.0"
author:
- Alan Loi (@loia)
- Fernando Jose Pando (@nand0p)
- Nadir Lloret (@nadirollo)
requirements:
- "boto >= 2.33.0"
options:
state:
description:
- Create or delete the queue
required: false
choices: ['present', 'absent']
default: 'present'
name:
description:
- Name of the queue.
required: true
default_visibility_timeout:
description:
- The default visibility timeout in seconds.
required: false
default: null
message_retention_period:
description:
- The message retention period in seconds.
required: false
default: null
maximum_message_size:
description:
- The maximum message size in bytes.
required: false
default: null
delivery_delay:
description:
- The delivery delay in seconds.
required: false
default: null
receive_message_wait_time:
description:
- The receive message wait time in seconds.
required: false
default: null
policy:
description:
- The json dict policy to attach to queue
required: false
default: null
version_added: "2.1"
redrive_policy:
description:
- json dict with the redrive_policy (see example)
required: false
default: null
version_added: "2.2"
extends_documentation_fragment:
- aws
- ec2
"""
RETURN = '''
default_visibility_timeout:
description: The default visibility timeout in seconds.
type: int
returned: always
sample: 30
delivery_delay:
description: The delivery delay in seconds.
type: int
returned: always
sample: 0
maximum_message_size:
description: The maximum message size in bytes.
type: int
returned: always
sample: 262144
message_retention_period:
description: The message retention period in seconds.
type: int
returned: always
sample: 345600
name:
description: Name of the SQS Queue
type: string
returned: always
sample: "queuename-987d2de0"
queue_arn:
description: The queue's Amazon resource name (ARN).
type: string
returned: on successful creation or update of the queue
sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0'
receive_message_wait_time:
description: The receive message wait time in seconds.
type: int
returned: always
sample: 0
region:
description: Region that the queue was created within
type: string
returned: always
sample: 'us-east-1'
'''
EXAMPLES = '''
# Create SQS queue with redrive policy
- sqs_queue:
name: my-queue
region: ap-southeast-2
default_visibility_timeout: 120
message_retention_period: 86400
maximum_message_size: 1024
delivery_delay: 30
receive_message_wait_time: 20
policy: "{{ json_dict }}"
redrive_policy:
maxReceiveCount: 5
deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
# Delete SQS queue
- sqs_queue:
name: my-queue
region: ap-southeast-2
state: absent
'''
import json
import traceback
try:
import boto.sqs
from boto.exception import BotoServerError, NoAuthHandlerFound
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def create_or_update_sqs_queue(connection, module):
queue_name = module.params.get('name')
queue_attributes = dict(
default_visibility_timeout=module.params.get('default_visibility_timeout'),
message_retention_period=module.params.get('message_retention_period'),
maximum_message_size=module.params.get('maximum_message_size'),
delivery_delay=module.params.get('delivery_delay'),
receive_message_wait_time=module.params.get('receive_message_wait_time'),
policy=module.params.get('policy'),
redrive_policy=module.params.get('redrive_policy')
)
result = dict(
region=module.params.get('region'),
name=queue_name,
)
result.update(queue_attributes)
try:
queue = connection.get_queue(queue_name)
if queue:
# Update existing
result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes)
else:
# Create new
if not module.check_mode:
queue = connection.create_queue(queue_name)
update_sqs_queue(queue, **queue_attributes)
result['changed'] = True
if not module.check_mode:
result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn']
result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout']
result['message_retention_period'] = queue.get_attributes('MessageRetentionPeriod')['MessageRetentionPeriod']
result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize']
result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds']
result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds']
except BotoServerError:
result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def update_sqs_queue(queue,
check_mode=False,
default_visibility_timeout=None,
message_retention_period=None,
maximum_message_size=None,
delivery_delay=None,
receive_message_wait_time=None,
policy=None,
redrive_policy=None):
changed = False
changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'Policy', policy,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'RedrivePolicy', redrive_policy,
check_mode=check_mode) or changed
return changed
def set_queue_attribute(queue, attribute, value, check_mode=False):
if not value:
return False
try:
existing_value = queue.get_attributes(attributes=attribute)[attribute]
except:
existing_value = ''
# convert dict attributes to JSON strings (sort keys for comparing)
if attribute in ['Policy', 'RedrivePolicy']:
value = json.dumps(value, sort_keys=True)
if existing_value:
existing_value = json.dumps(json.loads(existing_value), sort_keys=True)
if str(value) != existing_value:
if not check_mode:
queue.set_attribute(attribute, value)
return True
return False
def delete_sqs_queue(connection, module):
queue_name = module.params.get('name')
result = dict(
region=module.params.get('region'),
name=queue_name,
)
try:
queue = connection.get_queue(queue_name)
if queue:
if not module.check_mode:
connection.delete_queue(queue)
result['changed'] = True
else:
result['changed'] = False
except BotoServerError:
result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
name=dict(required=True, type='str'),
default_visibility_timeout=dict(type='int'),
message_retention_period=dict(type='int'),
maximum_message_size=dict(type='int'),
delivery_delay=dict(type='int'),
receive_message_wait_time=dict(type='int'),
policy=dict(type='dict', required=False),
redrive_policy=dict(type='dict', required=False),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg='region must be specified')
try:
connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_or_update_sqs_queue(connection, module)
elif state == 'absent':
delete_sqs_queue(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
hackebrot/cookiecutter | tests/test_get_config.py | 2 | 3890 | # -*- coding: utf-8 -*-
import os
import pytest
from cookiecutter import config
from cookiecutter.exceptions import (
ConfigDoesNotExistException, InvalidConfiguration
)
def test_merge_configs():
default = {
'cookiecutters_dir': '/home/example/some-path-to-templates',
'replay_dir': '/home/example/some-path-to-replay-files',
'default_context': {},
'abbreviations': {
'gh': 'https://github.com/{0}.git',
'gl': 'https://gitlab.com/{0}.git',
'bb': 'https://bitbucket.org/{0}',
}
}
user_config = {
'default_context': {
'full_name': 'Raphael Pierzina',
'github_username': 'hackebrot',
},
'abbreviations': {
'gl': 'https://gitlab.com/hackebrot/{0}.git',
'pytest-plugin': 'https://github.com/pytest-dev/pytest-plugin.git',
}
}
expected_config = {
'cookiecutters_dir': '/home/example/some-path-to-templates',
'replay_dir': '/home/example/some-path-to-replay-files',
'default_context': {
'full_name': 'Raphael Pierzina',
'github_username': 'hackebrot',
},
'abbreviations': {
'gh': 'https://github.com/{0}.git',
'gl': 'https://gitlab.com/hackebrot/{0}.git',
'bb': 'https://bitbucket.org/{0}',
'pytest-plugin': 'https://github.com/pytest-dev/pytest-plugin.git',
}
}
assert config.merge_configs(default, user_config) == expected_config
def test_get_config():
"""
Opening and reading config file
"""
conf = config.get_config('tests/test-config/valid-config.yaml')
expected_conf = {
'cookiecutters_dir': '/home/example/some-path-to-templates',
'replay_dir': '/home/example/some-path-to-replay-files',
'default_context': {
'full_name': 'Firstname Lastname',
'email': 'firstname.lastname@gmail.com',
'github_username': 'example'
},
'abbreviations': {
'gh': 'https://github.com/{0}.git',
'gl': 'https://gitlab.com/{0}.git',
'bb': 'https://bitbucket.org/{0}',
'helloworld': 'https://github.com/hackebrot/helloworld'
}
}
assert conf == expected_conf
def test_get_config_does_not_exist():
"""
Check that `exceptions.ConfigDoesNotExistException` is raised when
attempting to get a non-existent config file.
"""
with pytest.raises(ConfigDoesNotExistException):
config.get_config('tests/test-config/this-does-not-exist.yaml')
def test_invalid_config():
"""
An invalid config file should raise an `InvalidConfiguration` exception.
"""
with pytest.raises(InvalidConfiguration) as excinfo:
config.get_config('tests/test-config/invalid-config.yaml')
expected_error_msg = (
'Unable to parse YAML file '
'tests/test-config/invalid-config.yaml. '
'Error: '
)
assert expected_error_msg in str(excinfo.value)
def test_get_config_with_defaults():
"""
A config file that overrides 1 of 3 defaults
"""
conf = config.get_config('tests/test-config/valid-partial-config.yaml')
default_cookiecutters_dir = os.path.expanduser('~/.cookiecutters/')
default_replay_dir = os.path.expanduser('~/.cookiecutter_replay/')
expected_conf = {
'cookiecutters_dir': default_cookiecutters_dir,
'replay_dir': default_replay_dir,
'default_context': {
'full_name': 'Firstname Lastname',
'email': 'firstname.lastname@gmail.com',
'github_username': 'example'
},
'abbreviations': {
'gh': 'https://github.com/{0}.git',
'gl': 'https://gitlab.com/{0}.git',
'bb': 'https://bitbucket.org/{0}',
}
}
assert conf == expected_conf
| bsd-3-clause |
Cryptophobia/ansible | lib/ansible/utils/module_docs_fragments/aws.py | 232 | 3156 | # (c) 2014, Will Thames <will@thames.id.au>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# AWS only documentation fragment
DOCUMENTATION = """
options:
ec2_url:
description:
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Ignored for modules where region is required. Must be specified for all other modules if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
security_token:
description:
- AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
required: false
default: null
aliases: [ 'access_token' ]
version_added: "1.6"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
profile:
description:
- uses a boto profile. Only works with boto >= 2.24.0
required: false
default: null
aliases: []
version_added: "1.6"
requirements:
- "python >= 2.6"
- boto
notes:
- If parameters are not set within the module, the following
environment variables can be used in decreasing order of precedence
C(AWS_URL) or C(EC2_URL),
C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
C(AWS_REGION) or C(EC2_REGION)
- Ansible uses the boto configuration file (typically ~/.boto) if no
credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html
- C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
AWS region, when required, but this can also be configured in the boto config file
"""
| gpl-3.0 |
sheridancbio/cbioportal | core/src/test/scripts/system_tests_validate_data.py | 4 | 11621 | #!/usr/bin/env python3
'''
Copyright (c) 2016 The Hyve B.V.
This code is licensed under the GNU Affero General Public License (AGPL),
version 3, or (at your option) any later version.
'''
import unittest
import logging
import tempfile
import os
import shutil
import time
import difflib
from importer import validateData
try:
WindowsError
except NameError:
WindowsError = None
# globals:
PORTAL_INFO_DIR = 'test_data/api_json_system_tests'
class ValidateDataSystemTester(unittest.TestCase):
'''Test cases around running the complete validateData script
(such as "does it return the correct exit status?" or "does it generate
the html report when requested?", etc)
'''
def setUp(self):
_resetClassVars()
# Prepare global variables related to sample profiled for mutations and gene panels
self.mutation_sample_ids = None
self.mutation_file_sample_ids = set()
self.fusion_file_sample_ids = set()
def tearDown(self):
"""Close logging handlers after running validator and remove tmpdir."""
# restore original function
validateData.mutation_sample_ids = None
validateData.mutation_file_sample_ids = set()
validateData.fusion_file_sample_ids = set()
# get the logger used in validateData.main_validate()
validator_logger = logging.getLogger(validateData.__name__)
# flush and close all handlers of this logger
for logging_handler in validator_logger.handlers:
logging_handler.close()
# remove the handlers from the logger to reset it
validator_logger.handlers = []
super(ValidateDataSystemTester, self).tearDown()
def assertFileGenerated(self, tmp_file_name, expected_file_name):
"""Assert that a file has been generated with the expected contents."""
self.assertTrue(os.path.exists(tmp_file_name))
with open(tmp_file_name, 'r') as out_file, \
open(expected_file_name, 'r') as ref_file:
base_filename = os.path.basename(tmp_file_name)
diff_result = difflib.context_diff(
ref_file.readlines(),
out_file.readlines(),
fromfile='Expected {}'.format(base_filename),
tofile='Generated {}'.format(base_filename))
diff_line_list = list(diff_result)
self.assertEqual(diff_line_list, [],
msg='\n' + ''.join(diff_line_list))
# remove temp file if all is fine:
try:
os.remove(tmp_file_name)
except WindowsError:
# ignore this Windows specific error...probably happens because of virus scanners scanning the temp file...
pass
def test_exit_status_success(self):
'''study 0 : no errors, expected exit_status = 0.
If there are errors, the script should return
0: 'succeeded',
1: 'failed',
2: 'not performed as problems occurred',
3: 'succeeded with warnings'
'''
# build up the argument list
print("===study 0")
args = ['--study_directory', 'test_data/study_es_0/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
# execute main function with arguments provided as if from sys.argv
args = validateData.interface(args)
exit_status = validateData.main_validate(args)
self.assertEqual(0, exit_status)
def test_exit_status_failure(self):
'''study 1 : errors, expected exit_status = 1.'''
#Build up arguments and run
print("===study 1")
args = ['--study_directory', 'test_data/study_es_1/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(1, exit_status)
def test_exit_status_invalid(self):
'''test to fail: give wrong hugo file, or let a meta file point to a non-existing data file, expected exit_status = 2.'''
#Build up arguments and run
print("===study invalid")
args = ['--study_directory', 'test_data/study_es_invalid/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(2, exit_status)
def test_exit_status_warnings(self):
'''study 3 : warnings only, expected exit_status = 3.'''
# data_filename: test
#Build up arguments and run
print("===study 3")
args = ['--study_directory', 'test_data/study_es_3/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(3, exit_status)
def test_html_output(self):
'''
Test if html file is correctly generated when 'html_table' is given
'''
#Build up arguments and run
out_file_name = 'test_data/study_es_0/result_report.html~'
args = ['--study_directory', 'test_data/study_es_0/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', out_file_name]
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(0, exit_status)
self.assertFileGenerated(out_file_name,
'test_data/study_es_0/result_report.html')
def test_portal_mismatch(self):
'''Test if validation fails when data contradicts the portal.'''
# build up arguments and run
argv = ['--study_directory', 'test_data/study_portal_mismatch',
'--portal_info_dir', PORTAL_INFO_DIR, '--verbose']
parsed_args = validateData.interface(argv)
exit_status = validateData.main_validate(parsed_args)
# flush logging handlers used in validateData
validator_logger = logging.getLogger(validateData.__name__)
for logging_handler in validator_logger.handlers:
logging_handler.flush()
# expecting only warnings (about the skipped checks), no errors
self.assertEqual(exit_status, 1)
def test_no_portal_checks(self):
'''Test if validation skips portal-specific checks when instructed.'''
# build up arguments and run
argv = ['--study_directory', 'test_data/study_portal_mismatch',
'--verbose',
'--no_portal_checks']
parsed_args = validateData.interface(argv)
exit_status = validateData.main_validate(parsed_args)
# flush logging handlers used in validateData
validator_logger = logging.getLogger(validateData.__name__)
for logging_handler in validator_logger.handlers:
logging_handler.flush()
# expecting only warnings (about the skipped checks), no errors
self.assertEqual(exit_status, 3)
def test_problem_in_clinical(self):
'''Test whether the script aborts if the sample file cannot be parsed.
Further files cannot be validated in this case, as all sample IDs will
be undefined. Validate if the script is giving the proper error.
'''
# build the argument list
out_file_name = 'test_data/study_wr_clin/result_report.html~'
print('==test_problem_in_clinical==')
args = ['--study_directory', 'test_data/study_wr_clin/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', out_file_name]
# execute main function with arguments provided as if from sys.argv
args = validateData.interface(args)
exit_status = validateData.main_validate(args)
self.assertEqual(1, exit_status)
# TODO - set logger in main_validate and read out buffer here to assert on nr of errors
self.assertFileGenerated(out_file_name,
'test_data/study_wr_clin/result_report.html')
def test_various_issues(self):
'''Test if output is generated for a mix of errors and warnings.
This includes HTML ouput, the error line file and the exit status.
'''
# build the argument list
html_file_name = 'test_data/study_various_issues/result_report.html~'
error_file_name = 'test_data/study_various_issues/error_file.txt~'
args = ['--study_directory', 'test_data/study_various_issues/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', html_file_name,
'--error_file', error_file_name]
args = validateData.interface(args)
# execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
# flush logging handlers used in validateData
validator_logger = logging.getLogger(validateData.__name__)
for logging_handler in validator_logger.handlers:
logging_handler.flush()
# should fail because of various errors in addition to warnings
self.assertEqual(1, exit_status)
# In MAF files (mutation data) there is a column called
# "Matched_Norm_Sample_Barcode". The respective metadata file supports
# giving a list of sample codes against which this column is validated.
# This and other errors are expected in these output files.
self.assertFileGenerated(
html_file_name,
'test_data/study_various_issues/result_report.html')
self.assertFileGenerated(
error_file_name,
'test_data/study_various_issues/error_file.txt')
def test_files_with_quotes(self):
'''
Tests the scenario where data files contain quotes. This should give errors.
'''
#Build up arguments and run
out_file_name = 'test_data/study_quotes/result_report.html~'
print('==test_files_with_quotes==')
args = ['--study_directory', 'test_data/study_quotes/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', out_file_name]
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
# should fail because of errors with quotes
self.assertEqual(1, exit_status)
self.assertFileGenerated(out_file_name,
'test_data/study_quotes/result_report.html')
def _resetClassVars():
"""Reset the state of classes that check mulitple files of the same type.
GsvaWiseFileValidator classes check
consistency between multiple data files by collecting information in class variables.
This implementation is not consistent with the unit test environment that simulates
different studies to be loaded. To ensure real-world fucntionality the class variables
should be reset before each unit test that tests multi file consistency."""
for c in [ validateData.GsvaWiseFileValidator ]:
c.prior_validated_sample_ids = None
c.prior_validated_feature_ids = None
c.prior_validated_header = None
if __name__ == '__main__':
unittest.main(buffer=True)
| agpl-3.0 |
hanselke/erpnext-1 | erpnext/stock/report/itemwise_recommended_reorder_level/itemwise_recommended_reorder_level.py | 52 | 3381 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import getdate, flt
def execute(filters=None):
if not filters: filters = {}
float_preceision = frappe.db.get_default("float_preceision")
condition =get_condition(filters)
avg_daily_outgoing = 0
diff = ((getdate(filters.get("to_date")) - getdate(filters.get("from_date"))).days)+1
if diff <= 0:
frappe.throw(_("'From Date' must be after 'To Date'"))
columns = get_columns()
items = get_item_info()
consumed_item_map = get_consumed_items(condition)
delivered_item_map = get_delivered_items(condition)
data = []
for item in items:
total_outgoing = consumed_item_map.get(item.name, 0)+delivered_item_map.get(item.name,0)
avg_daily_outgoing = flt(total_outgoing/diff, float_preceision)
reorder_level = (avg_daily_outgoing * flt(item.lead_time_days)) + flt(item.min_order_qty)
data.append([item.name, item.item_name, item.description, item.min_order_qty, item.lead_time_days,
consumed_item_map.get(item.name, 0), delivered_item_map.get(item.name,0), total_outgoing,
avg_daily_outgoing, reorder_level])
return columns , data
def get_columns():
return[
_("Item") + ":Link/Item:120", _("Item Name") + ":Data:120", _("Description") + "::160",
_("Minimum Inventory Level") + ":Float:160", _("Lead Time Days") + ":Float:120", _("Consumed") + ":Float:120",
_("Delivered") + ":Float:120", _("Total Outgoing") + ":Float:120", _("Avg Daily Outgoing") + ":Float:160",
_("Reorder Level") + ":Float:120"
]
def get_item_info():
return frappe.db.sql("""select name, item_name, description, min_order_qty,
lead_time_days from tabItem""", as_dict=1)
def get_consumed_items(condition):
cn_items = frappe.db.sql("""select se_item.item_code,
sum(se_item.actual_qty) as 'consume_qty'
from `tabStock Entry` se, `tabStock Entry Detail` se_item
where se.name = se_item.parent and se.docstatus = 1
and ifnull(se_item.t_warehouse, '') = '' %s
group by se_item.item_code""" % (condition), as_dict=1)
cn_items_map = {}
for item in cn_items:
cn_items_map.setdefault(item.item_code, item.consume_qty)
return cn_items_map
def get_delivered_items(condition):
dn_items = frappe.db.sql("""select dn_item.item_code, sum(dn_item.qty) as dn_qty
from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item
where dn.name = dn_item.parent and dn.docstatus = 1 %s
group by dn_item.item_code""" % (condition), as_dict=1)
si_items = frappe.db.sql("""select si_item.item_name, sum(si_item.qty) as si_qty
from `tabSales Invoice` si, `tabSales Invoice Item` si_item
where si.name = si_item.parent and si.docstatus = 1 and
ifnull(si.update_stock, 0) = 1 and ifnull(si.is_pos, 0) = 1 %s
group by si_item.item_name""" % (condition), as_dict=1)
dn_item_map = {}
for item in dn_items:
dn_item_map.setdefault(item.item_code, item.dn_qty)
for item in si_items:
dn_item_map.setdefault(item.item_code, item.si_qty)
return dn_item_map
def get_condition(filters):
conditions = ""
if filters.get("from_date") and filters.get("to_date"):
conditions += " and posting_date between '%s' and '%s'" % (filters["from_date"],filters["to_date"])
else:
frappe.throw(_("From and To dates required"))
return conditions
| agpl-3.0 |
nirbheek/cerbero-old | test/test_cerbero_config.py | 27 | 8277 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import sys
import tempfile
import unittest
from cerbero import config as cconfig
from cerbero.enums import Platform
from cerbero.errors import FatalError, ConfigurationError
from cerbero.utils import system_info
Config = cconfig.Config
class LinuxPackagesTest(unittest.TestCase):
def setUp(self):
os.environ[cconfig.CERBERO_UNINSTALLED]='1'
def _checkLoadConfig(self, config, func, filename, properties):
with open(filename, 'w+') as f:
for p in properties:
f.write('%s="test"\n' % p)
func()
for p in properties:
self.assertEquals(getattr(config, p), 'test')
def testAllPropsInitializedNone(self):
config = Config()
for p in config._properties:
self.assertIsNone(getattr(config, p))
def testLoadDefaults(self):
config = Config()
config.load_defaults()
platform, arch, distro, distro_version, num_of_cpus = system_info()
data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
data_dir = os.path.abspath(data_dir)
props = {
'platform': platform,
'target_platform': platform,
'distro': distro,
'distro_version': distro_version,
'target_distro': distro,
'target_distro_version': distro_version,
'arch': arch,
'target_arch': arch,
'num_of_cpus': num_of_cpus,
'host': None,
'build': None,
'target': None,
'prefix': None,
'sources': None,
'local_sources': None,
'min_osx_sdk_version': None,
'lib_suffix': '',
'cache_file': None,
'toolchain_prefix': None,
'install_dir': None,
'packages_prefix': None,
'data_dir': data_dir,
'environ_dir': config._relative_path('config'),
'recipes_dir': config._relative_path('recipes'),
'packages_dir': config._relative_path('packages'),
'git_root': cconfig.DEFAULT_GIT_ROOT,
'wix_prefix': cconfig.DEFAULT_WIX_PREFIX,
'packager': cconfig.DEFAULT_PACKAGER,
'py_prefix': 'lib/python%s.%s' % (sys.version_info[0],
sys.version_info[1]),
'allow_parallel_build': cconfig.DEFAULT_ALLOW_PARALLEL_BUILD,
'use_configure_cache': False,
'allow_system_libs': True,
'external_packages': {},
'external_recipes': {},
'use_ccache': None,
'force_git_commit': None,
'universal_archs': [cconfig.Architecture.X86, cconfig.Architecture.X86_64],
}
self.assertEquals(sorted(config._properties), sorted(props.keys()))
for p, v in props.iteritems():
self.assertEquals(getattr(config, p), v)
def testLoadMainConfig(self):
config = Config()
tmpconfig = tempfile.NamedTemporaryFile()
cconfig.DEFAULT_CONFIG_FILE = tmpconfig.name
config._load_main_config()
for p in config._properties:
self.assertIsNone(getattr(config, p))
config.load_defaults()
self._checkLoadConfig(config, config._load_main_config,
tmpconfig.name, config._properties)
def testLoadPlatformConfig(self):
config = Config()
tmpdir = tempfile.mkdtemp()
config.environ_dir = tmpdir
config.load_defaults()
config._load_platform_config()
platform_config = os.path.join(tmpdir, '%s.config' %
config.target_platform)
config.load_defaults()
self._checkLoadConfig(config, config._load_platform_config,
platform_config, config._properties)
def testFindDataDir(self):
config = Config()
del os.environ[cconfig.CERBERO_UNINSTALLED]
config._check_uninstalled()
self.failUnlessRaises(FatalError, config.load_defaults)
def testCheckUninstalled(self):
config = Config()
del os.environ[cconfig.CERBERO_UNINSTALLED]
config._check_uninstalled()
self.assertFalse(config.uninstalled)
os.environ[cconfig.CERBERO_UNINSTALLED]='1'
config._check_uninstalled()
self.assertTrue(config.uninstalled)
def testSetupEnv(self):
config = Config()
tmpdir = tempfile.mkdtemp()
config.prefix = tmpdir
config.load_defaults()
config.do_setup_env()
env = config.get_env(tmpdir, os.path.join(tmpdir, 'lib'),
config.py_prefix)
for k, v in env.iteritems():
self.assertEquals(os.environ[k], v)
def testParseBadConfigFile(self):
config = Config()
tmpfile = tempfile.NamedTemporaryFile()
with open(tmpfile.name, 'w') as f:
f.write('nonsense line')
self.failUnlessRaises(ConfigurationError, config.parse, tmpfile.name)
def testJoinPath(self):
config = Config()
config.platform = Platform.LINUX
self.assertEquals(config._join_path('/test1', '/test2'), '/test1:/test2')
config.platform = Platform.WINDOWS
self.assertEquals(config._join_path('/test1', '/test2'), '/test1;/test2')
def testLoadCommandConfig(self):
config = Config()
config.filename = None
config._load_cmd_config(None)
self.assertIsNone(config.filename)
self.failUnlessRaises(ConfigurationError, config._load_cmd_config,
'/foo/bar')
tmpfile = tempfile.NamedTemporaryFile()
config._load_cmd_config(tmpfile.name)
self.assertEquals(config.filename, cconfig.DEFAULT_CONFIG_FILE)
def testLastDefaults(self):
config = Config()
config._load_last_defaults()
cerbero_home = os.path.expanduser('~/cerbero')
self.assertEquals(config.prefix, os.path.join(cerbero_home, 'dist'))
self.assertEquals(config.install_dir, config.prefix)
self.assertEquals(config.sources,
os.path.join(cerbero_home, 'sources'))
self.assertEquals(config.local_sources,
os.path.join(cerbero_home, 'sources', 'local'))
def testRecipesExternalRepositories(self):
config = Config()
config.recipes_dir = 'test'
config.external_recipes = {'test1': ('/path/to/repo', 1),
'test2': ('/path/to/other/repo', 2)}
expected = {'default': ('test', 0),
'test1': ('/path/to/repo', 1),
'test2': ('/path/to/other/repo', 2)}
self.assertEquals(config.get_recipes_repos(), expected)
def testPakcagesExternalRepositories(self):
config = Config()
config.packages_dir = 'test'
config.external_packages = {'test1': ('/path/to/repo', 1),
'test2': ('/path/to/other/repo', 2)}
expected = {'default': ('test', 0),
'test1': ('/path/to/repo', 1),
'test2': ('/path/to/other/repo', 2)}
self.assertEquals(config.get_packages_repos(), expected)
| lgpl-2.1 |
kuropatkin/lte | .waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Tools/winres.py | 21 | 2783 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import re,traceback
from waflib import Task,Logs,Utils
from waflib.TaskGen import extension
from waflib.Tools import c_preproc
@extension('.rc')
def rc_file(self,node):
obj_ext='.rc.o'
if self.env['WINRC_TGT_F']=='/fo':
obj_ext='.res'
rctask=self.create_task('winrc',node,node.change_ext(obj_ext))
try:
self.compiled_tasks.append(rctask)
except AttributeError:
self.compiled_tasks=[rctask]
re_lines=re.compile('(?:^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*?)\s*$)|''(?:^\w+[ \t]*(ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)[ \t]*(.*?)\s*$)',re.IGNORECASE|re.MULTILINE)
class rc_parser(c_preproc.c_parser):
def filter_comments(self,filepath):
code=Utils.readf(filepath)
if c_preproc.use_trigraphs:
for(a,b)in c_preproc.trig_def:code=code.split(a).join(b)
code=c_preproc.re_nl.sub('',code)
code=c_preproc.re_cpp.sub(c_preproc.repl,code)
ret=[]
for m in re.finditer(re_lines,code):
if m.group(2):
ret.append((m.group(2),m.group(3)))
else:
ret.append(('include',m.group(5)))
return ret
def addlines(self,node):
self.currentnode_stack.append(node.parent)
filepath=node.abspath()
self.count_files+=1
if self.count_files>c_preproc.recursion_limit:
raise c_preproc.PreprocError("recursion limit exceeded")
pc=self.parse_cache
Logs.debug('preproc: reading file %r',filepath)
try:
lns=pc[filepath]
except KeyError:
pass
else:
self.lines.extend(lns)
return
try:
lines=self.filter_comments(filepath)
lines.append((c_preproc.POPFILE,''))
lines.reverse()
pc[filepath]=lines
self.lines.extend(lines)
except IOError:
raise c_preproc.PreprocError("could not read the file %s"%filepath)
except Exception:
if Logs.verbose>0:
Logs.error("parsing %s failed"%filepath)
traceback.print_exc()
class winrc(Task.Task):
run_str='${WINRC} ${WINRCFLAGS} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}'
color='BLUE'
def scan(self):
tmp=rc_parser(self.generator.includes_nodes)
tmp.start(self.inputs[0],self.env)
nodes=tmp.nodes
names=tmp.names
if Logs.verbose:
Logs.debug('deps: deps for %s: %r; unresolved %r'%(str(self),nodes,names))
return(nodes,names)
def configure(conf):
v=conf.env
v['WINRC_TGT_F']='-o'
v['WINRC_SRC_F']='-i'
if not conf.env.WINRC:
if v.CC_NAME=='msvc':
conf.find_program('RC',var='WINRC',path_list=v['PATH'])
v['WINRC_TGT_F']='/fo'
v['WINRC_SRC_F']=''
else:
conf.find_program('windres',var='WINRC',path_list=v['PATH'])
if not conf.env.WINRC:
conf.fatal('winrc was not found!')
v['WINRCFLAGS']=[]
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.