repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
jwlawson/tensorflow | tensorflow/contrib/factorization/python/kernel_tests/masked_matmul_ops_test.py | 111 | 3662 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for masked_matmul_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-todo, g-import-not-at-top
import numpy as np
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def MakeMask():
inds = [[0, 0], [0, 2], [1, 1], [2, 0], [2, 3]] * 100
indices = np.array(inds).astype(np.int64)
shape = np.array([5, 4]).astype(np.int64)
return (indices, shape)
class MaskedProductOpsTest(test.TestCase):
def setUp(self):
a = [
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.7, 0.8, 0.9],
[1.1, 1.2, 1.3],
[1.4, 1.5, 1.6],
]
b = [
[0.1, 0.4, 0.7, 1.1],
[0.2, 0.5, 0.8, 1.2],
[0.3, 0.6, 0.9, 1.3],
]
self._dot_products = np.array([0.14, 0.5, 0.77, 0.5, 2.9] * 100)
self._a = np.array(a).astype(np.float32)
self._b = np.array(b).astype(np.float32)
self._mask_ind, self._mask_shape = MakeMask()
def _runTestMaskedProduct(self, transpose_a, transpose_b):
with ops.Graph().as_default(), self.test_session() as sess:
a = self._a if not transpose_a else array_ops.transpose(self._a)
b = self._b if not transpose_b else array_ops.transpose(self._b)
def AssertClose(sp_x, sp_y):
x_inds, x_vals, y_inds, y_vals = sess.run(
[sp_x.indices, sp_x.values,
sp_y.indices, sp_y.values])
self.assertAllClose(x_inds, y_inds)
self.assertAllClose(x_vals, y_vals)
values = gen_factorization_ops.masked_matmul(
a, b, self._mask_ind, transpose_a, transpose_b)
result = sparse_tensor.SparseTensor(
self._mask_ind, values, self._mask_shape)
true_result = sparse_tensor.SparseTensor(
self._mask_ind, self._dot_products, self._mask_shape)
AssertClose(result, true_result)
def _runTestEmptyMaskedProduct(self):
with ops.Graph().as_default(), self.test_session() as sess:
empty_mask = constant_op.constant(0, shape=[0, 2], dtype=dtypes.int64)
values = gen_factorization_ops.masked_matmul(
self._a, self._b, empty_mask, False, False)
self.assertEqual(len(values.eval(session=sess)), 0)
def testMaskedProduct(self):
self._runTestMaskedProduct(False, False)
def testMaskedProductTransposeA(self):
self._runTestMaskedProduct(True, False)
def testMaskedProductTransposeB(self):
self._runTestMaskedProduct(False, True)
def testMaskedProductTransposeAAndB(self):
self._runTestMaskedProduct(True, True)
def testEmptyMaskedProduct(self):
self._runTestEmptyMaskedProduct()
if __name__ == "__main__":
test.main()
| apache-2.0 |
kidburglar/youtube-dl | youtube_dl/extractor/iqiyi.py | 10 | 13633 | # coding: utf-8
from __future__ import unicode_literals
import hashlib
import itertools
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import (
clean_html,
decode_packed_codes,
get_element_by_id,
get_element_by_attribute,
ExtractorError,
ohdave_rsa_encrypt,
remove_start,
)
def md5_text(text):
return hashlib.md5(text.encode('utf-8')).hexdigest()
class IqiyiSDK(object):
def __init__(self, target, ip, timestamp):
self.target = target
self.ip = ip
self.timestamp = timestamp
@staticmethod
def split_sum(data):
return compat_str(sum(map(lambda p: int(p, 16), list(data))))
@staticmethod
def digit_sum(num):
if isinstance(num, int):
num = compat_str(num)
return compat_str(sum(map(int, num)))
def even_odd(self):
even = self.digit_sum(compat_str(self.timestamp)[::2])
odd = self.digit_sum(compat_str(self.timestamp)[1::2])
return even, odd
def preprocess(self, chunksize):
self.target = md5_text(self.target)
chunks = []
for i in range(32 // chunksize):
chunks.append(self.target[chunksize * i:chunksize * (i + 1)])
if 32 % chunksize:
chunks.append(self.target[32 - 32 % chunksize:])
return chunks, list(map(int, self.ip.split('.')))
def mod(self, modulus):
chunks, ip = self.preprocess(32)
self.target = chunks[0] + ''.join(map(lambda p: compat_str(p % modulus), ip))
def split(self, chunksize):
modulus_map = {
4: 256,
5: 10,
8: 100,
}
chunks, ip = self.preprocess(chunksize)
ret = ''
for i in range(len(chunks)):
ip_part = compat_str(ip[i] % modulus_map[chunksize]) if i < 4 else ''
if chunksize == 8:
ret += ip_part + chunks[i]
else:
ret += chunks[i] + ip_part
self.target = ret
def handle_input16(self):
self.target = md5_text(self.target)
self.target = self.split_sum(self.target[:16]) + self.target + self.split_sum(self.target[16:])
def handle_input8(self):
self.target = md5_text(self.target)
ret = ''
for i in range(4):
part = self.target[8 * i:8 * (i + 1)]
ret += self.split_sum(part) + part
self.target = ret
def handleSum(self):
self.target = md5_text(self.target)
self.target = self.split_sum(self.target) + self.target
def date(self, scheme):
self.target = md5_text(self.target)
d = time.localtime(self.timestamp)
strings = {
'y': compat_str(d.tm_year),
'm': '%02d' % d.tm_mon,
'd': '%02d' % d.tm_mday,
}
self.target += ''.join(map(lambda c: strings[c], list(scheme)))
def split_time_even_odd(self):
even, odd = self.even_odd()
self.target = odd + md5_text(self.target) + even
def split_time_odd_even(self):
even, odd = self.even_odd()
self.target = even + md5_text(self.target) + odd
def split_ip_time_sum(self):
chunks, ip = self.preprocess(32)
self.target = compat_str(sum(ip)) + chunks[0] + self.digit_sum(self.timestamp)
def split_time_ip_sum(self):
chunks, ip = self.preprocess(32)
self.target = self.digit_sum(self.timestamp) + chunks[0] + compat_str(sum(ip))
class IqiyiSDKInterpreter(object):
def __init__(self, sdk_code):
self.sdk_code = sdk_code
def run(self, target, ip, timestamp):
self.sdk_code = decode_packed_codes(self.sdk_code)
functions = re.findall(r'input=([a-zA-Z0-9]+)\(input', self.sdk_code)
sdk = IqiyiSDK(target, ip, timestamp)
other_functions = {
'handleSum': sdk.handleSum,
'handleInput8': sdk.handle_input8,
'handleInput16': sdk.handle_input16,
'splitTimeEvenOdd': sdk.split_time_even_odd,
'splitTimeOddEven': sdk.split_time_odd_even,
'splitIpTimeSum': sdk.split_ip_time_sum,
'splitTimeIpSum': sdk.split_time_ip_sum,
}
for function in functions:
if re.match(r'mod\d+', function):
sdk.mod(int(function[3:]))
elif re.match(r'date[ymd]{3}', function):
sdk.date(function[4:])
elif re.match(r'split\d+', function):
sdk.split(int(function[5:]))
elif function in other_functions:
other_functions[function]()
else:
raise ExtractorError('Unknown funcion %s' % function)
return sdk.target
class IqiyiIE(InfoExtractor):
IE_NAME = 'iqiyi'
IE_DESC = '爱奇艺'
_VALID_URL = r'https?://(?:(?:[^.]+\.)?iqiyi\.com|www\.pps\.tv)/.+\.html'
_NETRC_MACHINE = 'iqiyi'
_TESTS = [{
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
# MD5 checksum differs on my machine and Travis CI
'info_dict': {
'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
'ext': 'mp4',
'title': '美国德州空中惊现奇异云团 酷似UFO',
}
}, {
'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
'md5': 'b7dc800a4004b1b57749d9abae0472da',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb',
'ext': 'mp4',
# This can be either Simplified Chinese or Traditional Chinese
'title': r're:^(?:名侦探柯南 国语版:第752集 迫近灰原秘密的黑影 下篇|名偵探柯南 國語版:第752集 迫近灰原秘密的黑影 下篇)$',
},
'skip': 'Geo-restricted to China',
}, {
'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
'only_matching': True,
}, {
'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
'only_matching': True,
}, {
'url': 'http://yule.iqiyi.com/pcb.html',
'info_dict': {
'id': '4a0af228fddb55ec96398a364248ed7f',
'ext': 'mp4',
'title': '第2017-04-21期 女艺人频遭极端粉丝骚扰',
},
}, {
# VIP-only video. The first 2 parts (6 minutes) are available without login
# MD5 sums omitted as values are different on Travis CI and my machine
'url': 'http://www.iqiyi.com/v_19rrny4w8w.html',
'info_dict': {
'id': 'f3cf468b39dddb30d676f89a91200dc1',
'ext': 'mp4',
'title': '泰坦尼克号',
},
'skip': 'Geo-restricted to China',
}, {
'url': 'http://www.iqiyi.com/a_19rrhb8ce1.html',
'info_dict': {
'id': '202918101',
'title': '灌篮高手 国语版',
},
'playlist_count': 101,
}, {
'url': 'http://www.pps.tv/w_19rrbav0ph.html',
'only_matching': True,
}]
_FORMATS_MAP = {
'96': 1, # 216p, 240p
'1': 2, # 336p, 360p
'2': 3, # 480p, 504p
'21': 4, # 504p
'4': 5, # 720p
'17': 5, # 720p
'5': 6, # 1072p, 1080p
'18': 7, # 1080p
}
def _real_initialize(self):
self._login()
@staticmethod
def _rsa_fun(data):
# public key extracted from http://static.iqiyi.com/js/qiyiV2/20160129180840/jobs/i18n/i18nIndex.js
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
return ohdave_rsa_encrypt(data, e, N)
def _login(self):
username, password = self._get_login_info()
# No authentication to be performed
if not username:
return True
data = self._download_json(
'http://kylin.iqiyi.com/get_token', None,
note='Get token for logging', errnote='Unable to get token for logging')
sdk = data['sdk']
timestamp = int(time.time())
target = '/apis/reglogin/login.action?lang=zh_TW&area_code=null&email=%s&passwd=%s&agenttype=1&from=undefined&keeplogin=0&piccode=&fromurl=&_pos=1' % (
username, self._rsa_fun(password.encode('utf-8')))
interp = IqiyiSDKInterpreter(sdk)
sign = interp.run(target, data['ip'], timestamp)
validation_params = {
'target': target,
'server': 'BEA3AA1908656AABCCFF76582C4C6660',
'token': data['token'],
'bird_src': 'f8d91d57af224da7893dd397d52d811a',
'sign': sign,
'bird_t': timestamp,
}
validation_result = self._download_json(
'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params), None,
note='Validate credentials', errnote='Unable to validate credentials')
MSG_MAP = {
'P00107': 'please login via the web interface and enter the CAPTCHA code',
'P00117': 'bad username or password',
}
code = validation_result['code']
if code != 'A00000':
msg = MSG_MAP.get(code)
if not msg:
msg = 'error %s' % code
if validation_result.get('msg'):
msg += ': ' + validation_result['msg']
self._downloader.report_warning('unable to log in: ' + msg)
return False
return True
def get_raw_data(self, tvid, video_id):
tm = int(time.time() * 1000)
key = 'd5fb4bd9d50c4be6948c97edd7254b0e'
sc = md5_text(compat_str(tm) + key + tvid)
params = {
'tvid': tvid,
'vid': video_id,
'src': '76f90cbd92f94a2e925d83e8ccd22cb7',
'sc': sc,
't': tm,
}
return self._download_json(
'http://cache.m.iqiyi.com/jp/tmts/%s/%s/' % (tvid, video_id),
video_id, transform_source=lambda s: remove_start(s, 'var tvInfoJs='),
query=params, headers=self.geo_verification_headers())
def _extract_playlist(self, webpage):
PAGE_SIZE = 50
links = re.findall(
r'<a[^>]+class="site-piclist_pic_link"[^>]+href="(http://www\.iqiyi\.com/.+\.html)"',
webpage)
if not links:
return
album_id = self._search_regex(
r'albumId\s*:\s*(\d+),', webpage, 'album ID')
album_title = self._search_regex(
r'data-share-title="([^"]+)"', webpage, 'album title', fatal=False)
entries = list(map(self.url_result, links))
# Start from 2 because links in the first page are already on webpage
for page_num in itertools.count(2):
pagelist_page = self._download_webpage(
'http://cache.video.qiyi.com/jp/avlist/%s/%d/%d/' % (album_id, page_num, PAGE_SIZE),
album_id,
note='Download playlist page %d' % page_num,
errnote='Failed to download playlist page %d' % page_num)
pagelist = self._parse_json(
remove_start(pagelist_page, 'var tvInfoJs='), album_id)
vlist = pagelist['data']['vlist']
for item in vlist:
entries.append(self.url_result(item['vurl']))
if len(vlist) < PAGE_SIZE:
break
return self.playlist_result(entries, album_id, album_title)
def _real_extract(self, url):
webpage = self._download_webpage(
url, 'temp_id', note='download video page')
# There's no simple way to determine whether an URL is a playlist or not
# Sometimes there are playlist links in individual videos, so treat it
# as a single video first
tvid = self._search_regex(
r'data-(?:player|shareplattrigger)-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid', default=None)
if tvid is None:
playlist_result = self._extract_playlist(webpage)
if playlist_result:
return playlist_result
raise ExtractorError('Can\'t find any video')
video_id = self._search_regex(
r'data-(?:player|shareplattrigger)-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
formats = []
for _ in range(5):
raw_data = self.get_raw_data(tvid, video_id)
if raw_data['code'] != 'A00000':
if raw_data['code'] == 'A00111':
self.raise_geo_restricted()
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
data = raw_data['data']
for stream in data['vidl']:
if 'm3utx' not in stream:
continue
vd = compat_str(stream['vd'])
formats.append({
'url': stream['m3utx'],
'format_id': vd,
'ext': 'mp4',
'preference': self._FORMATS_MAP.get(vd, -1),
'protocol': 'm3u8_native',
})
if formats:
break
self._sleep(5, video_id)
self._sort_formats(formats)
title = (get_element_by_id('widget-videotitle', webpage) or
clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage)) or
self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title'))
return {
'id': video_id,
'title': title,
'formats': formats,
}
| unlicense |
mxia/engine | third_party/libxml/src/xstc/xstc.py | 165 | 21006 | #!/usr/bin/env python
#
# This is the MS subset of the W3C test suite for XML Schemas.
# This file is generated from the MS W3c test suite description file.
#
import sys, os
import exceptions, optparse
import libxml2
opa = optparse.OptionParser()
opa.add_option("-b", "--base", action="store", type="string", dest="baseDir",
default="",
help="""The base directory; i.e. the parent folder of the
"nisttest", "suntest" and "msxsdtest" directories.""")
opa.add_option("-o", "--out", action="store", type="string", dest="logFile",
default="test.log",
help="The filepath of the log file to be created")
opa.add_option("--log", action="store_true", dest="enableLog",
default=False,
help="Create the log file")
opa.add_option("--no-test-out", action="store_true", dest="disableTestStdOut",
default=False,
help="Don't output test results")
opa.add_option("-s", "--silent", action="store_true", dest="silent", default=False,
help="Disables display of all tests")
opa.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False,
help="Displays all tests (only if --silent is not set)")
opa.add_option("-x", "--max", type="int", dest="maxTestCount",
default="-1",
help="The maximum number of tests to be run")
opa.add_option("-t", "--test", type="string", dest="singleTest",
default=None,
help="Runs the specified test only")
opa.add_option("--tsw", "--test-starts-with", type="string", dest="testStartsWith",
default=None,
help="Runs the specified test(s), starting with the given string")
opa.add_option("--rieo", "--report-internal-errors-only", action="store_true",
dest="reportInternalErrOnly", default=False,
help="Display erroneous tests of type 'internal' only")
opa.add_option("--rueo", "--report-unimplemented-errors-only", action="store_true",
dest="reportUnimplErrOnly", default=False,
help="Display erroneous tests of type 'unimplemented' only")
opa.add_option("--rmleo", "--report-mem-leak-errors-only", action="store_true",
dest="reportMemLeakErrOnly", default=False,
help="Display erroneous tests of type 'memory leak' only")
opa.add_option("-c", "--combines", type="string", dest="combines",
default=None,
help="Combines to be run (all if omitted)")
opa.add_option("--csw", "--csw", type="string", dest="combineStartsWith",
default=None,
help="Combines to be run (all if omitted)")
opa.add_option("--rc", "--report-combines", action="store_true",
dest="reportCombines", default=False,
help="Display combine reports")
opa.add_option("--rec", "--report-err-combines", action="store_true",
dest="reportErrCombines", default=False,
help="Display erroneous combine reports only")
opa.add_option("--debug", action="store_true",
dest="debugEnabled", default=False,
help="Displays debug messages")
opa.add_option("--info", action="store_true",
dest="info", default=False,
help="Displays info on the suite only. Does not run any test.")
opa.add_option("--sax", action="store_true",
dest="validationSAX", default=False,
help="Use SAX2-driven validation.")
opa.add_option("--tn", action="store_true",
dest="displayTestName", default=False,
help="Display the test name in every case.")
(options, args) = opa.parse_args()
if options.combines is not None:
options.combines = options.combines.split()
################################################
# The vars below are not intended to be changed.
#
msgSchemaNotValidButShould = "The schema should be valid."
msgSchemaValidButShouldNot = "The schema should be invalid."
msgInstanceNotValidButShould = "The instance should be valid."
msgInstanceValidButShouldNot = "The instance should be invalid."
vendorNIST = "NIST"
vendorNIST_2 = "NIST-2"
vendorSUN = "SUN"
vendorMS = "MS"
###################
# Helper functions.
#
vendor = None
def handleError(test, msg):
global options
if not options.silent:
test.addLibLog("'%s' LIB: %s" % (test.name, msg))
if msg.find("Unimplemented") > -1:
test.failUnimplemented()
elif msg.find("Internal") > -1:
test.failInternal()
def fixFileNames(fileName):
if (fileName is None) or (fileName == ""):
return ""
dirs = fileName.split("/")
if dirs[1] != "Tests":
fileName = os.path.join(".", "Tests")
for dir in dirs[1:]:
fileName = os.path.join(fileName, dir)
return fileName
class XSTCTestGroup:
def __init__(self, name, schemaFileName, descr):
global vendor, vendorNIST_2
self.name = name
self.descr = descr
self.mainSchema = True
self.schemaFileName = fixFileNames(schemaFileName)
self.schemaParsed = False
self.schemaTried = False
def setSchema(self, schemaFileName, parsed):
if not self.mainSchema:
return
self.mainSchema = False
self.schemaParsed = parsed
self.schemaTried = True
class XSTCTestCase:
# <!-- groupName, Name, Accepted, File, Val, Descr
def __init__(self, isSchema, groupName, name, accepted, file, val, descr):
global options
#
# Constructor.
#
self.testRunner = None
self.isSchema = isSchema
self.groupName = groupName
self.name = name
self.accepted = accepted
self.fileName = fixFileNames(file)
self.val = val
self.descr = descr
self.failed = False
self.combineName = None
self.log = []
self.libLog = []
self.initialMemUsed = 0
self.memLeak = 0
self.excepted = False
self.bad = False
self.unimplemented = False
self.internalErr = False
self.noSchemaErr = False
self.failed = False
#
# Init the log.
#
if not options.silent:
if self.descr is not None:
self.log.append("'%s' descr: %s\n" % (self.name, self.descr))
self.log.append("'%s' exp validity: %d\n" % (self.name, self.val))
def initTest(self, runner):
global vendorNIST, vendorSUN, vendorMS, vendorNIST_2, options, vendor
#
# Get the test-group.
#
self.runner = runner
self.group = runner.getGroup(self.groupName)
if vendor == vendorMS or vendor == vendorSUN:
#
# Use the last given directory for the combine name.
#
dirs = self.fileName.split("/")
self.combineName = dirs[len(dirs) -2]
elif vendor == vendorNIST:
#
# NIST files are named in the following form:
# "NISTSchema-short-pattern-1.xsd"
#
tokens = self.name.split("-")
self.combineName = tokens[1]
elif vendor == vendorNIST_2:
#
# Group-names have the form: "atomic-normalizedString-length-1"
#
tokens = self.groupName.split("-")
self.combineName = "%s-%s" % (tokens[0], tokens[1])
else:
self.combineName = "unkown"
raise Exception("Could not compute the combine name of a test.")
if (not options.silent) and (self.group.descr is not None):
self.log.append("'%s' group-descr: %s\n" % (self.name, self.group.descr))
def addLibLog(self, msg):
"""This one is intended to be used by the error handler
function"""
global options
if not options.silent:
self.libLog.append(msg)
def fail(self, msg):
global options
self.failed = True
if not options.silent:
self.log.append("'%s' ( FAILED: %s\n" % (self.name, msg))
def failNoSchema(self):
global options
self.failed = True
self.noSchemaErr = True
if not options.silent:
self.log.append("'%s' X NO-SCHEMA\n" % (self.name))
def failInternal(self):
global options
self.failed = True
self.internalErr = True
if not options.silent:
self.log.append("'%s' * INTERNAL\n" % self.name)
def failUnimplemented(self):
global options
self.failed = True
self.unimplemented = True
if not options.silent:
self.log.append("'%s' ? UNIMPLEMENTED\n" % self.name)
def failCritical(self, msg):
global options
self.failed = True
self.bad = True
if not options.silent:
self.log.append("'%s' ! BAD: %s\n" % (self.name, msg))
def failExcept(self, e):
global options
self.failed = True
self.excepted = True
if not options.silent:
self.log.append("'%s' # EXCEPTION: %s\n" % (self.name, e.__str__()))
def setUp(self):
#
# Set up Libxml2.
#
self.initialMemUsed = libxml2.debugMemory(1)
libxml2.initParser()
libxml2.lineNumbersDefault(1)
libxml2.registerErrorHandler(handleError, self)
def tearDown(self):
libxml2.schemaCleanupTypes()
libxml2.cleanupParser()
self.memLeak = libxml2.debugMemory(1) - self.initialMemUsed
def isIOError(self, file, docType):
err = None
try:
err = libxml2.lastError()
except:
# Suppress exceptions.
pass
if (err is None):
return False
if err.domain() == libxml2.XML_FROM_IO:
self.failCritical("failed to access the %s resource '%s'\n" % (docType, file))
def debugMsg(self, msg):
global options
if options.debugEnabled:
sys.stdout.write("'%s' DEBUG: %s\n" % (self.name, msg))
def finalize(self):
global options
"""Adds additional info to the log."""
#
# Add libxml2 messages.
#
if not options.silent:
self.log.extend(self.libLog)
#
# Add memory leaks.
#
if self.memLeak != 0:
self.log.append("%s + memory leak: %d bytes\n" % (self.name, self.memLeak))
def run(self):
"""Runs a test."""
global options
##filePath = os.path.join(options.baseDir, self.fileName)
# filePath = "%s/%s/%s/%s" % (options.baseDir, self.test_Folder, self.schema_Folder, self.schema_File)
if options.displayTestName:
sys.stdout.write("'%s'\n" % self.name)
try:
self.validate()
except (Exception, libxml2.parserError, libxml2.treeError), e:
self.failExcept(e)
def parseSchema(fileName):
schema = None
ctxt = libxml2.schemaNewParserCtxt(fileName)
try:
try:
schema = ctxt.schemaParse()
except:
pass
finally:
del ctxt
return schema
class XSTCSchemaTest(XSTCTestCase):
def __init__(self, groupName, name, accepted, file, val, descr):
XSTCTestCase.__init__(self, 1, groupName, name, accepted, file, val, descr)
def validate(self):
global msgSchemaNotValidButShould, msgSchemaValidButShouldNot
schema = None
filePath = self.fileName
# os.path.join(options.baseDir, self.fileName)
valid = 0
try:
#
# Parse the schema.
#
self.debugMsg("loading schema: %s" % filePath)
schema = parseSchema(filePath)
self.debugMsg("after loading schema")
if schema is None:
self.debugMsg("schema is None")
self.debugMsg("checking for IO errors...")
if self.isIOError(file, "schema"):
return
self.debugMsg("checking schema result")
if (schema is None and self.val) or (schema is not None and self.val == 0):
self.debugMsg("schema result is BAD")
if (schema == None):
self.fail(msgSchemaNotValidButShould)
else:
self.fail(msgSchemaValidButShouldNot)
else:
self.debugMsg("schema result is OK")
finally:
self.group.setSchema(self.fileName, schema is not None)
del schema
class XSTCInstanceTest(XSTCTestCase):
def __init__(self, groupName, name, accepted, file, val, descr):
XSTCTestCase.__init__(self, 0, groupName, name, accepted, file, val, descr)
def validate(self):
instance = None
schema = None
filePath = self.fileName
# os.path.join(options.baseDir, self.fileName)
if not self.group.schemaParsed and self.group.schemaTried:
self.failNoSchema()
return
self.debugMsg("loading instance: %s" % filePath)
parserCtxt = libxml2.newParserCtxt()
if (parserCtxt is None):
# TODO: Is this one necessary, or will an exception
# be already raised?
raise Exception("Could not create the instance parser context.")
if not options.validationSAX:
try:
try:
instance = parserCtxt.ctxtReadFile(filePath, None, libxml2.XML_PARSE_NOWARNING)
except:
# Suppress exceptions.
pass
finally:
del parserCtxt
self.debugMsg("after loading instance")
if instance is None:
self.debugMsg("instance is None")
self.failCritical("Failed to parse the instance for unknown reasons.")
return
try:
#
# Validate the instance.
#
self.debugMsg("loading schema: %s" % self.group.schemaFileName)
schema = parseSchema(self.group.schemaFileName)
try:
validationCtxt = schema.schemaNewValidCtxt()
#validationCtxt = libxml2.schemaNewValidCtxt(None)
if (validationCtxt is None):
self.failCritical("Could not create the validation context.")
return
try:
self.debugMsg("validating instance")
if options.validationSAX:
instance_Err = validationCtxt.schemaValidateFile(filePath, 0)
else:
instance_Err = validationCtxt.schemaValidateDoc(instance)
self.debugMsg("after instance validation")
self.debugMsg("instance-err: %d" % instance_Err)
if (instance_Err != 0 and self.val == 1) or (instance_Err == 0 and self.val == 0):
self.debugMsg("instance result is BAD")
if (instance_Err != 0):
self.fail(msgInstanceNotValidButShould)
else:
self.fail(msgInstanceValidButShouldNot)
else:
self.debugMsg("instance result is OK")
finally:
del validationCtxt
finally:
del schema
finally:
if instance is not None:
instance.freeDoc()
####################
# Test runner class.
#
class XSTCTestRunner:
CNT_TOTAL = 0
CNT_RAN = 1
CNT_SUCCEEDED = 2
CNT_FAILED = 3
CNT_UNIMPLEMENTED = 4
CNT_INTERNAL = 5
CNT_BAD = 6
CNT_EXCEPTED = 7
CNT_MEMLEAK = 8
CNT_NOSCHEMA = 9
CNT_NOTACCEPTED = 10
CNT_SCHEMA_TEST = 11
def __init__(self):
self.logFile = None
self.counters = self.createCounters()
self.testList = []
self.combinesRan = {}
self.groups = {}
self.curGroup = None
def createCounters(self):
counters = {self.CNT_TOTAL:0, self.CNT_RAN:0, self.CNT_SUCCEEDED:0,
self.CNT_FAILED:0, self.CNT_UNIMPLEMENTED:0, self.CNT_INTERNAL:0, self.CNT_BAD:0,
self.CNT_EXCEPTED:0, self.CNT_MEMLEAK:0, self.CNT_NOSCHEMA:0, self.CNT_NOTACCEPTED:0,
self.CNT_SCHEMA_TEST:0}
return counters
def addTest(self, test):
self.testList.append(test)
test.initTest(self)
def getGroup(self, groupName):
return self.groups[groupName]
def addGroup(self, group):
self.groups[group.name] = group
def updateCounters(self, test, counters):
if test.memLeak != 0:
counters[self.CNT_MEMLEAK] += 1
if not test.failed:
counters[self.CNT_SUCCEEDED] +=1
if test.failed:
counters[self.CNT_FAILED] += 1
if test.bad:
counters[self.CNT_BAD] += 1
if test.unimplemented:
counters[self.CNT_UNIMPLEMENTED] += 1
if test.internalErr:
counters[self.CNT_INTERNAL] += 1
if test.noSchemaErr:
counters[self.CNT_NOSCHEMA] += 1
if test.excepted:
counters[self.CNT_EXCEPTED] += 1
if not test.accepted:
counters[self.CNT_NOTACCEPTED] += 1
if test.isSchema:
counters[self.CNT_SCHEMA_TEST] += 1
return counters
def displayResults(self, out, all, combName, counters):
out.write("\n")
if all:
if options.combines is not None:
out.write("combine(s): %s\n" % str(options.combines))
elif combName is not None:
out.write("combine : %s\n" % combName)
out.write(" total : %d\n" % counters[self.CNT_TOTAL])
if all or options.combines is not None:
out.write(" ran : %d\n" % counters[self.CNT_RAN])
out.write(" (schemata) : %d\n" % counters[self.CNT_SCHEMA_TEST])
# out.write(" succeeded : %d\n" % counters[self.CNT_SUCCEEDED])
out.write(" not accepted : %d\n" % counters[self.CNT_NOTACCEPTED])
if counters[self.CNT_FAILED] > 0:
out.write(" failed : %d\n" % counters[self.CNT_FAILED])
out.write(" -> internal : %d\n" % counters[self.CNT_INTERNAL])
out.write(" -> unimpl. : %d\n" % counters[self.CNT_UNIMPLEMENTED])
out.write(" -> skip-invalid-schema : %d\n" % counters[self.CNT_NOSCHEMA])
out.write(" -> bad : %d\n" % counters[self.CNT_BAD])
out.write(" -> exceptions : %d\n" % counters[self.CNT_EXCEPTED])
out.write(" memory leaks : %d\n" % counters[self.CNT_MEMLEAK])
def displayShortResults(self, out, all, combName, counters):
out.write("Ran %d of %d tests (%d schemata):" % (counters[self.CNT_RAN],
counters[self.CNT_TOTAL], counters[self.CNT_SCHEMA_TEST]))
# out.write(" succeeded : %d\n" % counters[self.CNT_SUCCEEDED])
if counters[self.CNT_NOTACCEPTED] > 0:
out.write(" %d not accepted" % (counters[self.CNT_NOTACCEPTED]))
if counters[self.CNT_FAILED] > 0 or counters[self.CNT_MEMLEAK] > 0:
if counters[self.CNT_FAILED] > 0:
out.write(" %d failed" % (counters[self.CNT_FAILED]))
out.write(" (")
if counters[self.CNT_INTERNAL] > 0:
out.write(" %d internal" % (counters[self.CNT_INTERNAL]))
if counters[self.CNT_UNIMPLEMENTED] > 0:
out.write(" %d unimplemented" % (counters[self.CNT_UNIMPLEMENTED]))
if counters[self.CNT_NOSCHEMA] > 0:
out.write(" %d skip-invalid-schema" % (counters[self.CNT_NOSCHEMA]))
if counters[self.CNT_BAD] > 0:
out.write(" %d bad" % (counters[self.CNT_BAD]))
if counters[self.CNT_EXCEPTED] > 0:
out.write(" %d exception" % (counters[self.CNT_EXCEPTED]))
out.write(" )")
if counters[self.CNT_MEMLEAK] > 0:
out.write(" %d leaks" % (counters[self.CNT_MEMLEAK]))
out.write("\n")
else:
out.write(" all passed\n")
def reportCombine(self, combName):
global options
counters = self.createCounters()
#
# Compute evaluation counters.
#
for test in self.combinesRan[combName]:
counters[self.CNT_TOTAL] += 1
counters[self.CNT_RAN] += 1
counters = self.updateCounters(test, counters)
if options.reportErrCombines and (counters[self.CNT_FAILED] == 0) and (counters[self.CNT_MEMLEAK] == 0):
pass
else:
if options.enableLog:
self.displayResults(self.logFile, False, combName, counters)
self.displayResults(sys.stdout, False, combName, counters)
def displayTestLog(self, test):
sys.stdout.writelines(test.log)
sys.stdout.write("~~~~~~~~~~\n")
def reportTest(self, test):
global options
error = test.failed or test.memLeak != 0
#
# Only erroneous tests will be written to the log,
# except @verbose is switched on.
#
if options.enableLog and (options.verbose or error):
self.logFile.writelines(test.log)
self.logFile.write("~~~~~~~~~~\n")
#
# if not @silent, only erroneous tests will be
# written to stdout, except @verbose is switched on.
#
if not options.silent:
if options.reportInternalErrOnly and test.internalErr:
self.displayTestLog(test)
if options.reportMemLeakErrOnly and test.memLeak != 0:
self.displayTestLog(test)
if options.reportUnimplErrOnly and test.unimplemented:
self.displayTestLog(test)
if (options.verbose or error) and (not options.reportInternalErrOnly) and (not options.reportMemLeakErrOnly) and (not options.reportUnimplErrOnly):
self.displayTestLog(test)
def addToCombines(self, test):
found = False
if self.combinesRan.has_key(test.combineName):
self.combinesRan[test.combineName].append(test)
else:
self.combinesRan[test.combineName] = [test]
def run(self):
global options
if options.info:
for test in self.testList:
self.addToCombines(test)
sys.stdout.write("Combines: %d\n" % len(self.combinesRan))
sys.stdout.write("%s\n" % self.combinesRan.keys())
return
if options.enableLog:
self.logFile = open(options.logFile, "w")
try:
for test in self.testList:
self.counters[self.CNT_TOTAL] += 1
#
# Filter tests.
#
if options.singleTest is not None and options.singleTest != "":
if (test.name != options.singleTest):
continue
elif options.combines is not None:
if not options.combines.__contains__(test.combineName):
continue
elif options.testStartsWith is not None:
if not test.name.startswith(options.testStartsWith):
continue
elif options.combineStartsWith is not None:
if not test.combineName.startswith(options.combineStartsWith):
continue
if options.maxTestCount != -1 and self.counters[self.CNT_RAN] >= options.maxTestCount:
break
self.counters[self.CNT_RAN] += 1
#
# Run the thing, dammit.
#
try:
test.setUp()
try:
test.run()
finally:
test.tearDown()
finally:
#
# Evaluate.
#
test.finalize()
self.reportTest(test)
if options.reportCombines or options.reportErrCombines:
self.addToCombines(test)
self.counters = self.updateCounters(test, self.counters)
finally:
if options.reportCombines or options.reportErrCombines:
#
# Build a report for every single combine.
#
# TODO: How to sort a dict?
#
self.combinesRan.keys().sort(None)
for key in self.combinesRan.keys():
self.reportCombine(key)
#
# Display the final report.
#
if options.silent:
self.displayShortResults(sys.stdout, True, None, self.counters)
else:
sys.stdout.write("===========================\n")
self.displayResults(sys.stdout, True, None, self.counters)
| bsd-3-clause |
gauravjns/taiga-back | taiga/projects/attachments/models.py | 10 | 3995 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import os
import os.path as path
from unidecode import unidecode
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils import timezone
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext_lazy as _
from django.utils.text import get_valid_filename
from taiga.base.utils.iterators import split_by_n
def get_attachment_file_path(instance, filename):
basename = path.basename(filename)
basename = get_valid_filename(basename)
hs = hashlib.sha256()
hs.update(force_bytes(timezone.now().isoformat()))
hs.update(os.urandom(1024))
p1, p2, p3, p4, *p5 = split_by_n(hs.hexdigest(), 1)
hash_part = path.join(p1, p2, p3, p4, "".join(p5))
return path.join("attachments", hash_part, basename)
class Attachment(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="change_attachments",
verbose_name=_("owner"))
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="attachments", verbose_name=_("project"))
content_type = models.ForeignKey(ContentType, null=False, blank=False,
verbose_name=_("content type"))
object_id = models.PositiveIntegerField(null=False, blank=False,
verbose_name=_("object id"))
content_object = generic.GenericForeignKey("content_type", "object_id")
created_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("created date"),
default=timezone.now)
modified_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("modified date"))
name = models.CharField(blank=True, default="", max_length=500)
size = models.IntegerField(null=True, blank=True, editable=False, default=None)
attached_file = models.FileField(max_length=500, null=True, blank=True,
upload_to=get_attachment_file_path,
verbose_name=_("attached file"))
is_deprecated = models.BooleanField(default=False, verbose_name=_("is deprecated"))
description = models.TextField(null=False, blank=True, verbose_name=_("description"))
order = models.IntegerField(default=0, null=False, blank=False, verbose_name=_("order"))
_importing = None
class Meta:
verbose_name = "attachment"
verbose_name_plural = "attachments"
ordering = ["project", "created_date", "id"]
permissions = (
("view_attachment", "Can view attachment"),
)
def save(self, *args, **kwargs):
if not self._importing or not self.modified_date:
self.modified_date = timezone.now()
return super().save(*args, **kwargs)
def __str__(self):
return "Attachment: {}".format(self.id)
| agpl-3.0 |
JSchwehn/ahmia | tools/test_hidden_services.py | 6 | 9303 | # -*- coding: utf-8 -*-
"""Test all hidden service's: HTTP GET tells if the service is online."""
import httplib
import signal # To timeout the TCP/HTTP connection
import socket
import urllib2
from urllib2 import Request
import requests
import simplejson
import socks
from bs4 import BeautifulSoup # To parse HTML
socket.setdefaulttimeout(80) # Timeout after 1min 20s
class MyHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
return str(code) + ":::" + headers['Location']
#return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class SocksiPyConnection_SSL(httplib.HTTPSConnection):
"""Socks connection for HTTPS."""
def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True,
username=None, password=None, *args, **kwargs):
self.proxyargs = (proxytype, proxyaddr, proxyport, rdns,
username, password)
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
def connect(self):
self.sock = socks.socksocket()
self.sock.setproxy(*self.proxyargs)
if isinstance(self.timeout, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
class SocksiPyHandler_SSL(urllib2.HTTPSHandler):
"""Socks connection for HTTPS."""
def __init__(self, *args, **kwargs):
self.args = args
self.kw = kwargs
urllib2.HTTPSHandler.__init__(self)
def http_open(self, req):
def build(host, port=None, strict=None, timeout=0):
"""Build connection."""
conn = SocksiPyConnection_SSL(*self.args, host=host, port=port,
strict=strict, timeout=timeout, **self.kw)
return conn
return self.do_open(build, req)
class SocksiPyConnection(httplib.HTTPConnection):
"""Socks connection for HTTP."""
def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True,
username=None, password=None, *args, **kwargs):
self.proxyargs = (proxytype, proxyaddr, proxyport, rdns,
username, password)
httplib.HTTPConnection.__init__(self, *args, **kwargs)
def connect(self):
self.sock = socks.socksocket()
self.sock.setproxy(*self.proxyargs)
if isinstance(self.timeout, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
class SocksiPyHandler(urllib2.HTTPHandler):
"""Socks connection for HTTP."""
def __init__(self, *args, **kwargs):
self.args = args
self.kw = kwargs
urllib2.HTTPHandler.__init__(self)
def http_open(self, req):
def build(host, port=None, strict=None, timeout=0):
"""Build connection."""
conn = SocksiPyConnection(*self.args, host=host, port=port,
strict=strict, timeout=timeout, **self.kw)
return conn
return self.do_open(build, req)
class Timeout(object):
"""Timeout class using ALARM signal"""
class Timeout(Exception):
"""Pass exception."""
pass
def __init__(self, sec):
"""Init."""
self.sec = sec
def __enter__(self):
"""ALARM signal."""
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(self.sec)
def __exit__(self, *args):
"""Disable alarm."""
signal.alarm(0)
def raise_timeout(self, *args):
"""Timeout."""
raise Timeout.Timeout()
def open_req(req):
"""Open request."""
try:
# Run block of code with timeouts
with Timeout(60):
handle = urllib2.urlopen(req)
if handle.getcode() != 200:
print handle.getcode()
handle.close()
else:
print handle.read()
handle.close()
return True
except Timeout.Timeout:
print "Timeout"
except urllib2.HTTPError, error:
print 'HTTPError = ' + str(error.code)
except urllib2.URLError, error:
print 'URLError = ' + str(error.reason)
except Exception:
import traceback
print 'generic exception: ' + traceback.format_exc()
return False
def send_put(url, data):
"""Send HTTP POST."""
req = Request(url)
req.add_data(data)
req.get_method = lambda: 'PUT'
if not open_req(req):
print "Updating failed:"
print url
print data
def main():
"""Test each hidden service with HTTP GET."""
urldomains = 'https://127.0.0.1:45454/alldomains'
links = get2txt(urldomains).split('\n')
for link in links:
if not link:
continue
put_url = 'https://127.0.0.1:45454/address/'
hs_id = link[7:-7]
put_url = put_url + hs_id + "/status"
data = hs_online_check('http://'+str(hs_id)+'.onion/', put_url)
send_put(put_url, data)
#data = hs_online_check('https://'+str(hs_id)+'.onion/', put_url)
#send_put(put_url, data)
def get2txt(url):
"""Read from URL."""
txt = ""
try:
txt = requests.get(url, verify=False).text
return txt
except urllib2.HTTPError as error:
print error
return txt
def hs_online_check(onion, put_url):
"""Online check for hidden service."""
try:
print onion
return hs_http_checker(onion, put_url)
except Exception as error:
print "Returned nothing."
print error
return ""
def hs_http_checker(onion, put_url):
"""Socks connection to the Tor network. Try to download an onion."""
if onion[:5] == "https":
socks_con = SocksiPyHandler_SSL(socks.PROXY_TYPE_SOCKS4, '127.0.0.1', 9050)
else:
socks_con = SocksiPyHandler(socks.PROXY_TYPE_SOCKS4, '127.0.0.1', 9050)
opener = urllib2.build_opener(MyHTTPRedirectHandler, socks_con)
header = "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0"
opener.addheaders = [('User-agent', header)]
return hs_downloader(opener, onion, put_url)
def hs_downloader(opener, onion, put_url):
"""Try to download the front page and description.json."""
test_handle = opener.open(onion)
if ":::" in test_handle:
print "Redirect..."
redirect_json = build_json_answer(onion)
send_put(put_url, redirect_json)
handle = opener.open(test_handle.split(":::")[1])
else:
handle = test_handle
code = handle.getcode()
print "Site answers to the online check with code %d." % code
if code != 404: # It is up
json_html = analyze_front_page(handle.read())
json_official = hs_download_description(opener, onion)
if json_official:
json_data = json_official
elif json_html:
json_data = json_html
else:
json_data = build_json_answer(onion)
return json_data
else:
return ""
def build_json_answer(onion):
"""Make a JSON string."""
json_data = '{"not_official": 1, "title": "'
json_data = json_data + str(onion) + '", "description": "'
json_data = json_data + '", "relation": "",'
json_data = json_data + '"keywords": "'
json_data = json_data + '", "type": "", "language": "",'
json_data = json_data + '"contactInformation": "" }'
return json_data
def analyze_front_page(raw_html):
"""Analyze raw HTML page."""
try:
soup = BeautifulSoup(raw_html)
title_element = soup.find('title')
desc_element = soup.find(attrs={"name":"description"})
keywords_element = soup.find(attrs={"name":"keywords"})
title = ""
keywords = ""
description = ""
h1_element = soup.find('h1')
if title_element:
title = title_element.string.encode('utf-8')
if desc_element and desc_element['content']:
description = desc_element['content'].encode('utf-8')
if keywords_element and keywords_element['content']:
keywords = keywords_element['content'].encode('utf-8')
if not title and h1_element:
title = h1_element.string.encode('utf-8')
if title or keywords or description:
json_data = '{"not_official": 1, "title": "'
json_data = json_data + title[:100] + '", "description": "'
json_data = json_data + description[:500] + '", "relation": "",'
json_data = json_data + '"keywords": "' + keywords[:200]
json_data = json_data + '", "type": "", "language": "",'
json_data = json_data + '"contactInformation": "" }'
return json_data
else:
return ""
except Exception as error:
print error
return ""
def hs_download_description(opener, onion):
"""Try to download description.json."""
try:
dec_url = str(onion)+'description.json'
handle = opener.open(dec_url)
descr = handle.read()
# There cannot be that big descriptions
if len(descr) < 5000:
descr = descr.replace('\r', '')
descr = descr.replace('\n', '')
simplejson.loads(descr)
return descr
except Exception as error:
print error
return ""
if __name__ == '__main__':
main()
| bsd-3-clause |
purism/pdak | daklib/formats.py | 10 | 2635 | #!/usr/bin/python
""" Helper functions for the various changes formats
@contact: Debian FTPMaster <ftpmaster@debian.org>
@copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
@copyright: 2009 Chris Lamb <lamby@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
# <mhy> !!!!11111iiiiiioneoneoneone
# <dak> mhy: Error: "!!!11111iiiiiioneoneoneone" is not a valid command.
# <mhy> dak: oh shut up
# <dak> mhy: Error: "oh" is not a valid command.
################################################################################
from regexes import re_verwithext
from dak_exceptions import UnknownFormatError
def parse_format(txt):
"""
Parse a .changes Format string into a tuple representation for easy
comparison.
>>> parse_format('1.0')
(1, 0)
>>> parse_format('8.4 (hardy)')
(8, 4, 'hardy')
If the format doesn't match these forms, raises UnknownFormatError.
@type txt: string
@param txt: Format string to parse
@rtype: tuple
@return: Parsed format
@raise UnknownFormatError: Unknown Format: line
"""
format = re_verwithext.search(txt)
if format is None:
raise UnknownFormatError(txt)
format = format.groups()
if format[1] is None:
format = int(float(format[0])), 0, format[2]
else:
format = int(format[0]), int(format[1]), format[2]
if format[2] is None:
format = format[:2]
return format
def validate_changes_format(format, field):
"""
Validate a tuple-representation of a .changes Format: field. Raises
UnknownFormatError if the field is invalid, otherwise return type is
undefined.
"""
if (format < (1, 5) or format > (1, 8)):
raise UnknownFormatError(repr(format))
if field != 'files' and format < (1, 8):
raise UnknownFormatError(repr(format))
| gpl-2.0 |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/plat-sunos5/TYPES.py | 75 | 5806 | # Generated by h2py from /usr/include/sys/types.h
# Included from sys/isa_defs.h
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 8
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_ALIGNMENT_REQUIRED = 1
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 4
_DOUBLE_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 4
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 4
_ALIGNMENT_REQUIRED = 0
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_ALIGNMENT_REQUIRED = 1
_LONG_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 8
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 8
_LONG_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
# Included from sys/feature_tests.h
_POSIX_C_SOURCE = 1
_LARGEFILE64_SOURCE = 1
_LARGEFILE_SOURCE = 1
_FILE_OFFSET_BITS = 64
_FILE_OFFSET_BITS = 32
_POSIX_C_SOURCE = 199506L
_POSIX_PTHREAD_SEMANTICS = 1
_XOPEN_VERSION = 500
_XOPEN_VERSION = 4
_XOPEN_VERSION = 3
# Included from sys/machtypes.h
# Included from sys/inttypes.h
# Included from sys/int_types.h
# Included from sys/int_limits.h
INT8_MAX = (127)
INT16_MAX = (32767)
INT32_MAX = (2147483647)
INTMAX_MAX = INT32_MAX
INT_LEAST8_MAX = INT8_MAX
INT_LEAST16_MAX = INT16_MAX
INT_LEAST32_MAX = INT32_MAX
INT8_MIN = (-128)
INT16_MIN = (-32767-1)
INT32_MIN = (-2147483647-1)
INTMAX_MIN = INT32_MIN
INT_LEAST8_MIN = INT8_MIN
INT_LEAST16_MIN = INT16_MIN
INT_LEAST32_MIN = INT32_MIN
# Included from sys/int_const.h
def INT8_C(c): return (c)
def INT16_C(c): return (c)
def INT32_C(c): return (c)
def INT64_C(c): return __CONCAT__(c,l)
def INT64_C(c): return __CONCAT__(c,ll)
def UINT8_C(c): return __CONCAT__(c,u)
def UINT16_C(c): return __CONCAT__(c,u)
def UINT32_C(c): return __CONCAT__(c,u)
def UINT64_C(c): return __CONCAT__(c,ul)
def UINT64_C(c): return __CONCAT__(c,ull)
def INTMAX_C(c): return __CONCAT__(c,l)
def UINTMAX_C(c): return __CONCAT__(c,ul)
def INTMAX_C(c): return __CONCAT__(c,ll)
def UINTMAX_C(c): return __CONCAT__(c,ull)
def INTMAX_C(c): return (c)
def UINTMAX_C(c): return (c)
# Included from sys/int_fmtio.h
PRId8 = "d"
PRId16 = "d"
PRId32 = "d"
PRId64 = "ld"
PRId64 = "lld"
PRIdLEAST8 = "d"
PRIdLEAST16 = "d"
PRIdLEAST32 = "d"
PRIdLEAST64 = "ld"
PRIdLEAST64 = "lld"
PRIi8 = "i"
PRIi16 = "i"
PRIi32 = "i"
PRIi64 = "li"
PRIi64 = "lli"
PRIiLEAST8 = "i"
PRIiLEAST16 = "i"
PRIiLEAST32 = "i"
PRIiLEAST64 = "li"
PRIiLEAST64 = "lli"
PRIo8 = "o"
PRIo16 = "o"
PRIo32 = "o"
PRIo64 = "lo"
PRIo64 = "llo"
PRIoLEAST8 = "o"
PRIoLEAST16 = "o"
PRIoLEAST32 = "o"
PRIoLEAST64 = "lo"
PRIoLEAST64 = "llo"
PRIx8 = "x"
PRIx16 = "x"
PRIx32 = "x"
PRIx64 = "lx"
PRIx64 = "llx"
PRIxLEAST8 = "x"
PRIxLEAST16 = "x"
PRIxLEAST32 = "x"
PRIxLEAST64 = "lx"
PRIxLEAST64 = "llx"
PRIX8 = "X"
PRIX16 = "X"
PRIX32 = "X"
PRIX64 = "lX"
PRIX64 = "llX"
PRIXLEAST8 = "X"
PRIXLEAST16 = "X"
PRIXLEAST32 = "X"
PRIXLEAST64 = "lX"
PRIXLEAST64 = "llX"
PRIu8 = "u"
PRIu16 = "u"
PRIu32 = "u"
PRIu64 = "lu"
PRIu64 = "llu"
PRIuLEAST8 = "u"
PRIuLEAST16 = "u"
PRIuLEAST32 = "u"
PRIuLEAST64 = "lu"
PRIuLEAST64 = "llu"
SCNd16 = "hd"
SCNd32 = "d"
SCNd64 = "ld"
SCNd64 = "lld"
SCNi16 = "hi"
SCNi32 = "i"
SCNi64 = "li"
SCNi64 = "lli"
SCNo16 = "ho"
SCNo32 = "o"
SCNo64 = "lo"
SCNo64 = "llo"
SCNu16 = "hu"
SCNu32 = "u"
SCNu64 = "lu"
SCNu64 = "llu"
SCNx16 = "hx"
SCNx32 = "x"
SCNx64 = "lx"
SCNx64 = "llx"
PRIdMAX = "ld"
PRIoMAX = "lo"
PRIxMAX = "lx"
PRIuMAX = "lu"
PRIdMAX = "lld"
PRIoMAX = "llo"
PRIxMAX = "llx"
PRIuMAX = "llu"
PRIdMAX = "d"
PRIoMAX = "o"
PRIxMAX = "x"
PRIuMAX = "u"
SCNiMAX = "li"
SCNdMAX = "ld"
SCNoMAX = "lo"
SCNxMAX = "lx"
SCNiMAX = "lli"
SCNdMAX = "lld"
SCNoMAX = "llo"
SCNxMAX = "llx"
SCNiMAX = "i"
SCNdMAX = "d"
SCNoMAX = "o"
SCNxMAX = "x"
# Included from sys/types32.h
SHRT_MIN = (-32768)
SHRT_MAX = 32767
USHRT_MAX = 65535
INT_MIN = (-2147483647-1)
INT_MAX = 2147483647
LONG_MIN = (-9223372036854775807L-1L)
LONG_MAX = 9223372036854775807L
LONG_MIN = (-2147483647L-1L)
LONG_MAX = 2147483647L
P_MYID = (-1)
# Included from sys/select.h
# Included from sys/time.h
TIME32_MAX = INT32_MAX
TIME32_MIN = INT32_MIN
def TIMEVAL_OVERFLOW(tv): return \
from TYPES import *
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
DST_GB = 7
DST_RUM = 8
DST_TUR = 9
DST_AUSTALT = 10
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_REALPROF = 3
def ITIMERVAL_OVERFLOW(itv): return \
SEC = 1
MILLISEC = 1000
MICROSEC = 1000000
NANOSEC = 1000000000
# Included from sys/time_impl.h
def TIMESPEC_OVERFLOW(ts): return \
def ITIMERSPEC_OVERFLOW(it): return \
__CLOCK_REALTIME0 = 0
CLOCK_VIRTUAL = 1
CLOCK_PROF = 2
__CLOCK_REALTIME3 = 3
CLOCK_HIGHRES = 4
CLOCK_MAX = 5
CLOCK_REALTIME = __CLOCK_REALTIME3
CLOCK_REALTIME = __CLOCK_REALTIME0
TIMER_RELTIME = 0x0
TIMER_ABSTIME = 0x1
# Included from sys/mutex.h
from TYPES import *
def MUTEX_HELD(x): return (mutex_owned(x))
def TICK_TO_SEC(tick): return ((tick) / hz)
def SEC_TO_TICK(sec): return ((sec) * hz)
def TICK_TO_MSEC(tick): return \
def MSEC_TO_TICK(msec): return \
def MSEC_TO_TICK_ROUNDUP(msec): return \
def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
def USEC_TO_TICK_ROUNDUP(usec): return \
def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
def NSEC_TO_TICK_ROUNDUP(nsec): return \
def TIMEVAL_TO_TICK(tvp): return \
def TIMESTRUC_TO_TICK(tsp): return \
# Included from time.h
from TYPES import *
# Included from iso/time_iso.h
NULL = 0L
NULL = 0
CLOCKS_PER_SEC = 1000000
FD_SETSIZE = 65536
FD_SETSIZE = 1024
_NBBY = 8
NBBY = _NBBY
def FD_ZERO(p): return bzero((p), sizeof (*(p)))
| apache-2.0 |
smikes/depot_tools | gn.py | 4 | 1185 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is a wrapper around the GN binary that is pulled from Google
Cloud Storage when you sync Chrome. The binaries go into platform-specific
subdirectories in the source tree.
This script makes there be one place for forwarding to the correct platform's
binary. It will also automatically try to find the gn binary when run inside
the chrome source tree, so users can just type "gn" on the command line
(normally depot_tools is on the path)."""
import gclient_utils
import os
import subprocess
import sys
def main(args):
bin_path = gclient_utils.GetBuildtoolsPlatformBinaryPath()
if not bin_path:
print >> sys.stderr, ('gn.py: Could not find checkout in any parent of '
'the current path.\nThis must be run inside a '
'checkout.')
sys.exit(1)
gn_path = os.path.join(bin_path, 'gn' + gclient_utils.GetExeSuffix())
return subprocess.call([gn_path] + sys.argv[1:])
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
bm5w/second_dataS | test_quicksort.py | 1 | 1531 | import pytest
from quicksort import quicksort
def test_quicksort_simple():
input = [1, 3, 2]
assert quicksort(input) == [1, 2, 3]
def test_quicksort():
input = [54, 26, 93, 17, 71, 31, 44, 55, 20]
assert quicksort(input) == [17, 20, 26, 31, 44, 54, 55, 71, 93]
def test_quicksort_duplicate():
input = [54, 26, 93, 17, 71, 31, 44, 55, 20, 20]
assert quicksort(input) == [17, 20, 20, 26, 31, 44, 54, 55, 71, 93]
def test_quicksort_more_duplicates():
input = [54, 26, 93, 17, 71, 31, 44, 55, 20, 20, 20, 20]
assert quicksort(input) == [17, 20, 20, 20, 20, 26, 31, 44, 54, 55, 71, 93]
def test_quicksort_large():
input = range(10000)
assert quicksort(input) == range(10000)
def test_quicksort_big_floats():
input = [x*0.01 for x in range(0, 1000)]
assert quicksort(input) == [x*0.01 for x in range(0, 1000)]
def test_wrong_type():
input = 'x'
with pytest.raises(TypeError):
quicksort(input)
def test_quicksort_big_reverse():
input = range(1000)[::-1]
assert quicksort(input) == range(1000)
def test_quicksort_big_increase_decrease():
input = range(500)+range(500)[::-1]
expected = range(500)*2
expected.sort()
assert quicksort(input) == expected
def test_quicksort_duplicates():
input = (range(100)+range(100))[::-1]
expected = range(100)+range(100)
expected.sort()
assert quicksort(input) == expected
def test_quicksort_all_duplicates():
input = [100]*20
assert quicksort(input) == [100]*20
| mit |
mialwe/mngb | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
avsm/lifedb-server | client/python/lifedb/tests/client.py | 1 | 5909 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import doctest
import os
import unittest
import StringIO
import time
import tempfile
from lifedb import client
import httplib2
httplib2.debuglevel = 0
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.uri = os.environ.get('LIFEDB_URI', client.DEFAULT_BASE_URI)
self.username = os.environ.get('LIFEDB_TEST_USERNAME', 'foo')
self.password = os.environ.get('LIFEDB_TEST_PASSWORD', 'bar')
def tearDown(self):
pass
class LoginOKTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.server = client.Server(self.username, self.password, uri=self.uri)
class LoginBadTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.server = client.Server("BADUSERNAME", "BADPASSWD", uri=self.uri)
class BasicPassTestCase(LoginOKTestCase):
def test_logged_in_ping(self):
data = self.server.ping()
self.assertEquals(data, "pong")
class BasicFailTestCase(LoginBadTestCase):
def test_not_logged_in_ping(self):
self.assertRaises(client.ResourceForbidden, self.server.ping)
class TasksPassTestCase(LoginOKTestCase):
def test_task_create(self):
self.server.task_create("foo","Dummy","single","dummy", args={'WANTSLEEP':'100'})
tasks = self.server.task_list()
self.assert_('foo' in tasks)
self.destroy_and_check("foo")
def destroy_and_check(self, name):
self.server.task_destroy(name)
tasks = self.server.task_list ()
self.assert_('foo' not in tasks)
def long_test_task_periodic_create(self):
period=3
tmp = tempfile.NamedTemporaryFile()
args = { 'TMPFILELOC' : tmp.name }
cmd="echo foo >> %s" % tmp.name
self.server.task_create("bar","Dummy","periodic","",period=period, args=args)
tasks = self.server.task_list()
self.assert_('bar' in tasks)
time.sleep(period*4+1)
self.destroy_and_check("bar")
f = open(tmp.name, 'r')
lines = map(str.strip, f.readlines())
f.close()
tmp.close()
print lines
self.assertEquals(lines,['foo','foo','foo','foo'])
def test_task_constant_create(self):
self.server.task_create("foo","Dummy","constant","",args={'WANTSLEEP':'100'})
tasks = self.server.task_list()
self.assert_('foo' in tasks)
self.assertEquals(tasks['foo']['info']['mode'], 'constant')
self.destroy_and_check('foo')
def test_task_get(self):
self.server.task_create("xxx", "Dummy", "single", "", args={'WANTSLEEP':'50'})
task = self.server.task_get("xxx")
self.assertEquals(task['info']['plugin'], 'Dummy')
self.destroy_and_check('xxx')
def test_task_negative_get(self):
self.assertRaises(client.ResourceNotFound, self.server.task_get, "nonexistent")
def test_task_create_invalid(self):
self.assertRaises(client.ServerError, self.server.task_create,
'invalid', 'xxx', 'yyy', '')
def long_test_task_overload(self):
max_tasks = 10
for t in range(max_tasks):
self.server.task_create("foo%d" % t, "Dummy", "single","", args={'WANTSLEEP':'100000'})
for t in range(5):
self.assertRaises(client.ServerError, self.server.task_create,
"bar", "Dummy", "single", "")
for t in range(max_tasks):
self.server.task_destroy("foo%d" % t)
def very_long_test_task_fd_leak(self):
for t in range(2000):
self.server.task_create("foo", "Dummy", "single", "", args={'WANTSLEEP':'100'})
self.server.task_create("bar", "Dummy", "single", "", args={'WANTSLEEP':'100'})
self.server.task_destroy("foo")
self.server.task_destroy("bar")
class TasksFailTestCase(LoginBadTestCase):
def test_task_create_not_logged_in(self):
self.assertRaises(client.ResourceForbidden, self.server.task_create,
"foo", "Dummy", "single", "")
def test_task_get_not_logged_in(self):
self.assertRaises(client.ResourceForbidden, self.server.task_get, "nonexistent")
class PasswordPassTestCase(LoginOKTestCase):
def test_passwd_create(self):
username="notsecret"
password="verysecret"
service="arandomwebsite"
self.server.password_create(service, username, password)
rpass = self.server.password_get(service, username)
self.assertEqual(rpass, password)
self.server.password_delete(service, username)
self.assertRaises(client.ResourceNotFound, self.server.password_get, username, password)
class PasswordFailTestCase(LoginBadTestCase):
def test_passwd_create(self):
self.assertRaises(client.ResourceForbidden, self.server.password_create, 'x','x','x')
self.assertRaises(client.ResourceForbidden, self.server.password_get, 'x', 'x')
self.assertRaises(client.ResourceForbidden, self.server.password_delete, 'x', 'x')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PasswordPassTestCase, 'test'))
suite.addTest(unittest.makeSuite(PasswordFailTestCase, 'test'))
suite.addTest(unittest.makeSuite(BasicPassTestCase, 'test'))
suite.addTest(unittest.makeSuite(BasicFailTestCase, 'test'))
suite.addTest(unittest.makeSuite(TasksPassTestCase, 'test'))
suite.addTest(unittest.makeSuite(TasksFailTestCase, 'test'))
suite.addTest(unittest.makeSuite(TasksPassTestCase, 'long_test'))
#suite.addTest(unittest.makeSuite(TasksPassTestCase, 'very_long_test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| gpl-2.0 |
brahimalaya/K-OS--Konnex-Operating-System | kosek/Error.py | 2 | 3326 | # -*- coding: utf-8 -*-
__version__ = "0.9.0"
__copyright__ = """
k_os (Konnex Operating-System based on the OSEK/VDX-Standard).
(C) 2007-2013 by Christoph Schueler <github.com/Christoph2,
cpu12.gems@googlemail.com>
All Rights Reserved
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
s. FLOSS-EXCEPTION.txt
"""
import logging
import sys
def createLogger(level, name, fmt):
logger = logging.getLogger(name)
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter(fmt)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
class OILError(object):
inst = None
def __new__(cls):
if cls.inst is None:
# Make it a Singleton.
cls.inst=super(OILError, cls).__new__(cls)
cls.fatalErrorCounter = 0
cls.errorCounter = 0
cls.warningCounter = 0
cls.informationCounter = 0
cls.loggerMessageonly = createLogger(logging.NOTSET, "kos.oil.logger.messageonly",
"[%(levelname)s] - %(message)s"
)
cls.loggerFilename = createLogger(logging.NOTSET, "kos.oil.logger.filename",
"[%(levelname)s]:%(fname)s - %(message)s"
)
cls.loggerFull = createLogger(logging.NOTSET, "kos.oil.logger.full",
"[%(levelname)s]:%(fname)s:%(lno)s - %(message)s"
)
return cls.inst
def logMessage(self, level, message, lineno = None, filename = None, code = None):
if lineno and filename:
self.loggerFull.log(level, message, extra = {'lno': lineno, 'fname': filename})
elif filename:
self.loggerFilename.log(level, message, extra={'fname': filename})
else:
self.loggerMessageonly.log(level, message)
def fatalError(self, message, lineno = None, filename = None, code = ''):
self.logMessage(logging.CRITICAL, message, lineno, filename, "F-" + code)
self.fatalErrorCounter += 1
sys.exit(1)
def error(self, message, lineno = None, filename = None, code = ''):
self.logMessage(logging.ERROR, message, lineno, filename, "E-" + code)
self.errorCounter += 1
def warning(self, message, lineno = None, filename = None, code = ''):
self.logMessage(logging.WARNING, message, lineno, filename, "W-" + code)
self.warningCounter += 1
def information(self, message, lineno = None, filename = None, code = ''):
self.logMessage(logging.INFO, message, lineno, filename, "I-" + code)
self.informationCounter += 1
| gpl-2.0 |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/db/migrate/versions/010_fix_column_lengths.py | 4 | 2577 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sqlalchemy as sa
from migrate import changeset
def upgrade(migrate_engine):
metadata = sa.MetaData()
metadata.bind = migrate_engine
# the old (non-sqlalchemy-migrate) migration scripts messed up the
# lengths of these columns, so fix them here.
changeset.alter_column(
sa.Column('class_name', sa.String(128), nullable=False),
table="schedulers",
metadata=metadata,
engine=migrate_engine)
changeset.alter_column(
sa.Column('name', sa.String(128), nullable=False),
table="schedulers",
metadata=metadata,
engine=migrate_engine)
# sqlalchemy's reflection gets the server_defaults wrong, so this
# table has to be included here.
changes = sa.Table('changes', metadata,
sa.Column('changeid', sa.Integer, primary_key=True),
sa.Column('author', sa.String(256), nullable=False),
sa.Column('comments', sa.String(1024), nullable=False),
sa.Column('is_dir', sa.SmallInteger, nullable=False),
sa.Column('branch', sa.String(256)),
sa.Column('revision', sa.String(256)),
sa.Column('revlink', sa.String(256)),
sa.Column('when_timestamp', sa.Integer, nullable=False),
sa.Column('category', sa.String(256)),
sa.Column('repository', sa.String(length=512), nullable=False,
server_default=''),
sa.Column('project', sa.String(length=512), nullable=False,
server_default=''),
)
changeset.alter_column(
sa.Column('author', sa.String(256), nullable=False),
table=changes,
metadata=metadata,
engine=migrate_engine)
changeset.alter_column(
sa.Column('branch', sa.String(256)),
table=changes,
metadata=metadata,
engine=migrate_engine)
| gpl-2.0 |
Codefans-fan/odoo | addons/l10n_multilang/__init__.py | 438 | 1082 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
import l10n_multilang
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bergolho1337/URI-Online-Judge | Basicos/Python/1061/main.py | 1 | 1292 | # -*- coding: utf-8 -*-
def converteString (dia, hora):
evento = []
# Parse do dia
num = dia[4:6]
evento.append(int(num))
# Parse da hora
num = hora[0:2]
evento.append(int(num))
# Parse dos minutos
num = hora[5:7]
evento.append(int(num))
# Parse dos segundos
num = hora[10:12]
evento.append(int(num))
return evento
def calculaDuracao (inicio, fim):
inicio_seg = (inicio[0]*86400)+(inicio[1]*3600)+(inicio[2]*60)+(inicio[3])
fim_seg = (fim[0]*86400)+(fim[1]*3600)+(fim[2]*60)+(fim[3])
duracao_seg = fim_seg - inicio_seg
dias = duracao_seg / 86400
duracao_seg = duracao_seg - (dias*86400)
horas = duracao_seg / 3600
duracao_seg = duracao_seg - (horas*3600)
minutos = duracao_seg / 60
duracao_seg = duracao_seg - (minutos*60)
segundos = duracao_seg
return dias, horas, minutos, segundos
dia_inicio = raw_input()
hora_inicio = raw_input()
dia_fim = raw_input()
hora_fim = raw_input()
evento_inicio = converteString(dia_inicio,hora_inicio)
evento_fim = converteString(dia_fim,hora_fim)
dias, horas, minutos, segundos = calculaDuracao(evento_inicio,evento_fim)
print("%d dia(s)" % dias)
print("%d hora(s)" % horas)
print("%d minuto(s)" % minutos)
print("%d segundo(s)" % segundos) | gpl-2.0 |
daniponi/django | tests/model_package/tests.py | 380 | 2668 | from __future__ import unicode_literals
from django.db import connection, models
from django.db.backends.utils import truncate_name
from django.test import TestCase
from .models.article import Article, Site
from .models.publication import Publication
class Advertisement(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", blank=True)
class ModelPackageTests(TestCase):
def test_m2m_tables_in_subpackage_models(self):
"""
Regression for #12168: models split into subpackages still get M2M
tables.
"""
p = Publication.objects.create(title="FooBar")
site = Site.objects.create(name="example.com")
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
def test_models_in_the_test_package(self):
"""
Regression for #12245 - Models can exist in the test package, too.
"""
p = Publication.objects.create(title="FooBar")
ad = Advertisement.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisement.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
def test_automatic_m2m_column_names(self):
"""
Regression for #12386 - field names on the autogenerated intermediate
class that are specified as dotted strings don't retain any path
component for the field or column name.
"""
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_db_table(),
truncate_name('model_package_article_publications', connection.ops.max_name_length()),
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
| bsd-3-clause |
google/material-design-icons | update/venv/lib/python3.9/site-packages/fontTools/varLib/plot.py | 5 | 4153 | """Visualize DesignSpaceDocument and resulting VariationModel."""
from fontTools.varLib.models import VariationModel, supportScalar
from fontTools.designspaceLib import DesignSpaceDocument
from matplotlib import pyplot
from mpl_toolkits.mplot3d import axes3d
from itertools import cycle
import math
import logging
import sys
log = logging.getLogger(__name__)
def stops(support, count=10):
a,b,c = support
return [a + (b - a) * i / count for i in range(count)] + \
[b + (c - b) * i / count for i in range(count)] + \
[c]
def _plotLocationsDots(locations, axes, subplot, **kwargs):
for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)):
if len(axes) == 1:
subplot.plot(
[loc.get(axes[0], 0)],
[1.],
'o',
color=color,
**kwargs
)
elif len(axes) == 2:
subplot.plot(
[loc.get(axes[0], 0)],
[loc.get(axes[1], 0)],
[1.],
'o',
color=color,
**kwargs
)
else:
raise AssertionError(len(axes))
def plotLocations(locations, fig, names=None, **kwargs):
n = len(locations)
cols = math.ceil(n**.5)
rows = math.ceil(n / cols)
if names is None:
names = [None] * len(locations)
model = VariationModel(locations)
names = [names[model.reverseMapping[i]] for i in range(len(names))]
axes = sorted(locations[0].keys())
if len(axes) == 1:
_plotLocations2D(
model, axes[0], fig, cols, rows, names=names, **kwargs
)
elif len(axes) == 2:
_plotLocations3D(
model, axes, fig, cols, rows, names=names, **kwargs
)
else:
raise ValueError("Only 1 or 2 axes are supported")
def _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs):
subplot = fig.add_subplot(111)
for i, (support, color, name) in enumerate(
zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
):
if name is not None:
subplot.set_title(name)
subplot.set_xlabel(axis)
pyplot.xlim(-1.,+1.)
Xs = support.get(axis, (-1.,0.,+1.))
X, Y = [], []
for x in stops(Xs):
y = supportScalar({axis:x}, support)
X.append(x)
Y.append(y)
subplot.plot(X, Y, color=color, **kwargs)
_plotLocationsDots(model.locations, [axis], subplot)
def _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs):
ax1, ax2 = axes
axis3D = fig.add_subplot(111, projection='3d')
for i, (support, color, name) in enumerate(
zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
):
if name is not None:
axis3D.set_title(name)
axis3D.set_xlabel(ax1)
axis3D.set_ylabel(ax2)
pyplot.xlim(-1.,+1.)
pyplot.ylim(-1.,+1.)
Xs = support.get(ax1, (-1.,0.,+1.))
Ys = support.get(ax2, (-1.,0.,+1.))
for x in stops(Xs):
X, Y, Z = [], [], []
for y in Ys:
z = supportScalar({ax1:x, ax2:y}, support)
X.append(x)
Y.append(y)
Z.append(z)
axis3D.plot(X, Y, Z, color=color, **kwargs)
for y in stops(Ys):
X, Y, Z = [], [], []
for x in Xs:
z = supportScalar({ax1:x, ax2:y}, support)
X.append(x)
Y.append(y)
Z.append(z)
axis3D.plot(X, Y, Z, color=color, **kwargs)
_plotLocationsDots(model.locations, [ax1, ax2], axis3D)
def plotDocument(doc, fig, **kwargs):
doc.normalize()
locations = [s.location for s in doc.sources]
names = [s.name for s in doc.sources]
plotLocations(locations, fig, names, **kwargs)
def main(args=None):
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
# configure the library logger (for >= WARNING)
configLogger()
# comment this out to enable debug messages from logger
# log.setLevel(logging.DEBUG)
if len(args) < 1:
print("usage: fonttools varLib.plot source.designspace", file=sys.stderr)
print(" or")
print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr)
sys.exit(1)
fig = pyplot.figure()
fig.set_tight_layout(True)
if len(args) == 1 and args[0].endswith('.designspace'):
doc = DesignSpaceDocument()
doc.read(args[0])
plotDocument(doc, fig)
else:
axes = [chr(c) for c in range(ord('A'), ord('Z')+1)]
locs = [dict(zip(axes, (float(v) for v in s.split(',')))) for s in args]
plotLocations(locs, fig)
pyplot.show()
if __name__ == '__main__':
import sys
sys.exit(main())
| apache-2.0 |
cauchycui/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
fernandobt8/thrift | test/crossrunner/prepare.py | 50 | 1686 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import subprocess
from crossrunner.collect import collect_testlibs
def prepare(config_dict, testdir, server_match, client_match):
libs, libs2 = collect_testlibs(config_dict, server_match, client_match)
libs.extend(libs2)
def prepares():
for lib in libs:
pre = lib.get('prepare')
if pre:
yield pre, lib['workdir']
def files():
for lib in libs:
workdir = os.path.join(testdir, lib['workdir'])
for c in lib['command']:
if not c.startswith('-'):
p = os.path.join(workdir, c)
if not os.path.exists(p):
yield os.path.split(p)
def make(p):
d, f = p
with open(os.devnull, 'w') as devnull:
return subprocess.Popen(['make', f], cwd=d, stderr=devnull)
for pre, d in prepares():
subprocess.Popen(pre, cwd=d).wait()
for p in list(map(make, set(files()))):
p.wait()
return True
| apache-2.0 |
xubenben/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
seanwestfall/django | tests/user_commands/tests.py | 205 | 7165 | import os
from django.apps import apps
from django.core import management
from django.core.management import BaseCommand, CommandError, find_commands
from django.core.management.utils import find_command, popen_wrapper
from django.db import connection
from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.test.utils import captured_stderr, captured_stdout, extend_sys_path
from django.utils import translation
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.six import StringIO
# A minimal set of apps to avoid system checks running on all apps.
@override_settings(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'user_commands',
],
)
class CommandTests(SimpleTestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertIn("I don't feel like dancing Rock'n'Roll.\n", out.getvalue())
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
# Passing options as arguments also works (thanks argparse)
management.call_command('dance', '--style', 'Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" Test that an unknown command raises CommandError """
self.assertRaises(CommandError, management.call_command, ('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
with captured_stderr() as stderr, self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
self.assertIn("CommandError", stderr.getvalue())
def test_deactivate_locale_set(self):
# Deactivate translation when set to true
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_false', stdout=out)
self.assertEqual(out.getvalue(), "")
def test_configured_locale_preserved(self):
# Leaves locale from settings when set to false
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_true', stdout=out)
self.assertEqual(out.getvalue(), "pl\n")
def test_find_command_without_PATH(self):
"""
find_command should still work when the PATH environment variable
doesn't exist (#22256).
"""
current_path = os.environ.pop('PATH', None)
try:
self.assertIsNone(find_command('_missing_'))
finally:
if current_path is not None:
os.environ['PATH'] = current_path
def test_discover_commands_in_eggs(self):
"""
Test that management commands can also be loaded from Python eggs.
"""
egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
egg_name = '%s/basic.egg' % egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['commandegg']):
cmds = find_commands(os.path.join(apps.get_app_config('commandegg').path, 'management'))
self.assertEqual(cmds, ['eggcommand'])
def test_call_command_option_parsing(self):
"""
When passing the long option name to call_command, the available option
key is the option dest name (#22985).
"""
out = StringIO()
management.call_command('dance', stdout=out, opt_3=True)
self.assertIn("option3", out.getvalue())
self.assertNotIn("opt_3", out.getvalue())
self.assertNotIn("opt-3", out.getvalue())
@ignore_warnings(category=RemovedInDjango110Warning)
def test_optparse_compatibility(self):
"""
optparse should be supported during Django 1.8/1.9 releases.
"""
out = StringIO()
management.call_command('optparse_cmd', stdout=out)
self.assertEqual(out.getvalue(), "All right, let's dance Rock'n'Roll.\n")
# Simulate command line execution
with captured_stdout() as stdout, captured_stderr():
management.execute_from_command_line(['django-admin', 'optparse_cmd'])
self.assertEqual(stdout.getvalue(), "All right, let's dance Rock'n'Roll.\n")
def test_calling_a_command_with_only_empty_parameter_should_ends_gracefully(self):
out = StringIO()
management.call_command('hal', "--empty", stdout=out)
self.assertIn("Dave, I can't do that.\n", out.getvalue())
def test_calling_command_with_app_labels_and_parameters_should_be_ok(self):
out = StringIO()
management.call_command('hal', 'myapp', "--verbosity", "3", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok(self):
out = StringIO()
management.call_command('hal', "--verbosity", "3", "myapp", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error(self):
out = StringIO()
with self.assertRaises(CommandError):
management.call_command('hal', stdout=out)
def test_output_transaction(self):
out = StringIO()
management.call_command('transaction', stdout=out, no_color=True)
output = out.getvalue().strip()
self.assertTrue(output.startswith(connection.ops.start_transaction_sql()))
self.assertTrue(output.endswith(connection.ops.end_transaction_sql()))
def test_call_command_no_checks(self):
"""
By default, call_command should not trigger the check framework, unless
specifically asked.
"""
self.counter = 0
def patched_check(self_, **kwargs):
self.counter = self.counter + 1
saved_check = BaseCommand.check
BaseCommand.check = patched_check
try:
management.call_command("dance", verbosity=0)
self.assertEqual(self.counter, 0)
management.call_command("dance", verbosity=0, skip_checks=False)
self.assertEqual(self.counter, 1)
finally:
BaseCommand.check = saved_check
class UtilsTests(SimpleTestCase):
def test_no_existent_external_program(self):
self.assertRaises(CommandError, popen_wrapper, ['a_42_command_that_doesnt_exist_42'])
| bsd-3-clause |
foreni-packages/golismero | thirdparty_libs/requests/structures.py | 398 | 3575 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import os
import collections
from itertools import islice
class IteratorProxy(object):
"""docstring for IteratorProxy"""
def __init__(self, i):
self.i = i
# self.i = chain.from_iterable(i)
def __iter__(self):
return self.i
def __len__(self):
if hasattr(self.i, '__len__'):
return len(self.i)
if hasattr(self.i, 'len'):
return self.i.len
if hasattr(self.i, 'fileno'):
return os.fstat(self.i.fileno()).st_size
def read(self, n):
return "".join(islice(self.i, None, n))
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive:
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| gpl-2.0 |
alon/servo | tests/wpt/css-tests/tools/pywebsocket/src/example/close_wsh.py | 495 | 2835 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
from mod_pywebsocket import common
from mod_pywebsocket import stream
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
code, reason = line.split(' ', 1)
if code is None or reason is None:
return
request.ws_stream.close_connection(int(code), reason)
# close_connection() initiates closing handshake. It validates code
# and reason. If you want to send a broken close frame for a test,
# following code will be useful.
# > data = struct.pack('!H', int(code)) + reason.encode('UTF-8')
# > request.connection.write(stream.create_close_frame(data))
# > # Suppress to re-respond client responding close frame.
# > raise Exception("customized server initiated closing handshake")
def web_socket_passive_closing_handshake(request):
# Simply echo a close status code
code, reason = request.ws_close_code, request.ws_close_reason
# pywebsocket sets pseudo code for receiving an empty body close frame.
if code == common.STATUS_NO_STATUS_RECEIVED:
code = None
reason = ''
return code, reason
# vi:sts=4 sw=4 et
| mpl-2.0 |
mnahm5/django-estore | Lib/site-packages/botocore/vendored/requests/adapters.py | 573 | 16810 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| mit |
ShoRit/shipping-costs-sample | v2/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| apache-2.0 |
zmanji/ecryptfs | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
mrquim/repository.mrquim | repo/script.module.youtube.dl/lib/youtube_dl/extractor/roxwel.py | 73 | 1970 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import unified_strdate, determine_ext
class RoxwelIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?roxwel\.com/player/(?P<filename>.+?)(\.|\?|$)'
_TEST = {
'url': 'http://www.roxwel.com/player/passionpittakeawalklive.html',
'info_dict': {
'id': 'passionpittakeawalklive',
'ext': 'flv',
'title': 'Take A Walk (live)',
'uploader': 'Passion Pit',
'uploader_id': 'passionpit',
'upload_date': '20120928',
'description': 'Passion Pit performs "Take A Walk\" live at The Backyard in Austin, Texas. ',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
filename = mobj.group('filename')
info_url = 'http://www.roxwel.com/api/videos/%s' % filename
info = self._download_json(info_url, filename)
rtmp_rates = sorted([int(r.replace('flv_', '')) for r in info['media_rates'] if r.startswith('flv_')])
best_rate = rtmp_rates[-1]
url_page_url = 'http://roxwel.com/pl_one_time.php?filename=%s&quality=%s' % (filename, best_rate)
rtmp_url = self._download_webpage(url_page_url, filename, 'Downloading video url')
ext = determine_ext(rtmp_url)
if ext == 'f4v':
rtmp_url = rtmp_url.replace(filename, 'mp4:%s' % filename)
return {
'id': filename,
'title': info['title'],
'url': rtmp_url,
'ext': 'flv',
'description': info['description'],
'thumbnail': info.get('player_image_url') or info.get('image_url_large'),
'uploader': info['artist'],
'uploader_id': info['artistname'],
'upload_date': unified_strdate(info['dbdate']),
}
| gpl-2.0 |
inonit/django-chemtrails | tests/testapp/migrations/0005_guild.py | 1 | 1061 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-10 13:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('testapp', '0004_book_view_book_permission'),
]
operations = [
migrations.CreateModel(
name='Guild',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='guild_contacts', to='testapp.Author')),
('members', models.ManyToManyField(related_name='guild_set', to='testapp.Author', verbose_name='members')),
],
),
migrations.AddField(
model_name='author',
name='guilds',
field=models.ManyToManyField(blank=True, to='testapp.Guild'),
),
]
| mit |
openstack/manila | manila/share/drivers/hitachi/hnas/ssh.py | 2 | 35481 | # Copyright (c) 2015 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
import paramiko
import six
import os
import time
from manila import exception
from manila.i18n import _
from manila import utils as mutils
LOG = log.getLogger(__name__)
class HNASSSHBackend(object):
def __init__(self, hnas_ip, hnas_username, hnas_password, ssh_private_key,
cluster_admin_ip0, evs_id, evs_ip, fs_name, job_timeout):
self.ip = hnas_ip
self.port = 22
self.user = hnas_username
self.password = hnas_password
self.priv_key = ssh_private_key
self.admin_ip0 = cluster_admin_ip0
self.evs_id = six.text_type(evs_id)
self.fs_name = fs_name
self.evs_ip = evs_ip
self.sshpool = None
self.job_timeout = job_timeout
LOG.debug("Hitachi HNAS Driver using SSH backend.")
def get_stats(self):
"""Get the stats from file-system.
:returns:
fs_capacity.size = Total size from filesystem.
available_space = Free space currently on filesystem.
dedupe = True if dedupe is enabled on filesystem.
"""
command = ['df', '-a', '-f', self.fs_name]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not get HNAS backend stats.")
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
line = output.split('\n')
fs = Filesystem(line[3])
available_space = fs.size - fs.used
return fs.size, available_space, fs.dedupe
def nfs_export_add(self, share_id, snapshot_id=None):
if snapshot_id is not None:
path = os.path.join('/snapshots', share_id, snapshot_id)
name = os.path.join('/snapshots', snapshot_id)
else:
path = name = os.path.join('/shares', share_id)
command = ['nfs-export', 'add', '-S', 'disable', '-c', '127.0.0.1',
name, self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not create NFS export %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def nfs_export_del(self, share_id=None, snapshot_id=None):
if share_id is not None:
name = os.path.join('/shares', share_id)
elif snapshot_id is not None:
name = os.path.join('/snapshots', snapshot_id)
else:
msg = _("NFS export not specified to delete.")
raise exception.HNASBackendException(msg=msg)
command = ['nfs-export', 'del', name]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr:
LOG.warning("Export %s does not exist on "
"backend anymore.", name)
else:
msg = _("Could not delete NFS export %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_share_add(self, share_id, snapshot_id=None):
if snapshot_id is not None:
path = r'\\snapshots\\' + share_id + r'\\' + snapshot_id
name = snapshot_id
else:
path = r'\\shares\\' + share_id
name = share_id
command = ['cifs-share', 'add', '-S', 'disable', '--enable-abe',
'--nodefaultsaa', name, self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not create CIFS share %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_share_del(self, name):
command = ['cifs-share', 'del', '--target-label', self.fs_name,
name]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if e.exit_code == 1:
LOG.warning("CIFS share %s does not exist on "
"backend anymore.", name)
else:
msg = _("Could not delete CIFS share %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def get_nfs_host_list(self, share_id):
export = self._get_export(share_id)
return export[0].export_configuration
def update_nfs_access_rule(self, host_list, share_id=None,
snapshot_id=None):
if share_id is not None:
name = os.path.join('/shares', share_id)
elif snapshot_id is not None:
name = os.path.join('/snapshots', snapshot_id)
else:
msg = _("No share/snapshot provided to update NFS rules.")
raise exception.HNASBackendException(msg=msg)
command = ['nfs-export', 'mod', '-c']
if len(host_list) == 0:
command.append('127.0.0.1')
else:
string_command = '"' + six.text_type(host_list[0])
for i in range(1, len(host_list)):
string_command += ',' + (six.text_type(host_list[i]))
string_command += '"'
command.append(string_command)
command.append(name)
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not update access rules for NFS export %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_allow_access(self, name, user, permission, is_snapshot=False):
command = ['cifs-saa', 'add', '--target-label', self.fs_name,
name, user, permission]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'already listed as a user' in e.stderr:
if is_snapshot:
LOG.debug('User %(user)s already allowed to access '
'snapshot %(snapshot)s.', {
'user': user,
'snapshot': name,
})
else:
self._update_cifs_rule(name, user, permission)
else:
entity_type = "share"
if is_snapshot:
entity_type = "snapshot"
msg = _("Could not add access of user %(user)s to "
"%(entity_type)s %(name)s.") % {
'user': user,
'name': name,
'entity_type': entity_type,
}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def _update_cifs_rule(self, name, user, permission):
LOG.debug('User %(user)s already allowed to access '
'share %(share)s. Updating access level...', {
'user': user,
'share': name,
})
command = ['cifs-saa', 'change', '--target-label', self.fs_name,
name, user, permission]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not update access of user %(user)s to "
"share %(share)s.") % {
'user': user,
'share': name,
}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_deny_access(self, name, user, is_snapshot=False):
command = ['cifs-saa', 'delete', '--target-label', self.fs_name,
name, user]
entity_type = "share"
if is_snapshot:
entity_type = "snapshot"
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if ('not listed as a user' in e.stderr or
'Could not delete user/group' in e.stderr):
LOG.warning('User %(user)s already not allowed to access '
'%(entity_type)s %(name)s.', {
'entity_type': entity_type,
'user': user,
'name': name
})
else:
msg = _("Could not delete access of user %(user)s to "
"%(entity_type)s %(name)s.") % {
'user': user,
'name': name,
'entity_type': entity_type,
}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def list_cifs_permissions(self, hnas_share_id):
command = ['cifs-saa', 'list', '--target-label', self.fs_name,
hnas_share_id]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if 'No entries for this share' in e.stderr:
LOG.debug('Share %(share)s does not have any permission '
'added.', {'share': hnas_share_id})
return []
else:
msg = _("Could not list access of share %s.") % hnas_share_id
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
permissions = CIFSPermissions(output)
return permissions.permission_list
def tree_clone(self, src_path, dest_path):
command = ['tree-clone-job-submit', '-e', '-f', self.fs_name,
src_path, dest_path]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if ('Cannot find any clonable files in the source directory' in
e.stderr):
msg = _("Source path %s is empty.") % src_path
LOG.debug(msg)
raise exception.HNASNothingToCloneException(msg=msg)
else:
msg = _("Could not submit tree clone job to clone from %(src)s"
" to %(dest)s.") % {'src': src_path, 'dest': dest_path}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
job_submit = JobSubmit(output)
if job_submit.request_status == 'Request submitted successfully':
job_id = job_submit.job_id
job_status = None
progress = ''
job_rechecks = 0
starttime = time.time()
deadline = starttime + self.job_timeout
while (not job_status or
job_status.job_state != "Job was completed"):
command = ['tree-clone-job-status', job_id]
output, err = self._execute(command)
job_status = JobStatus(output)
if job_status.job_state == 'Job failed':
break
old_progress = progress
progress = job_status.data_bytes_processed
if old_progress == progress:
job_rechecks += 1
now = time.time()
if now > deadline:
command = ['tree-clone-job-abort', job_id]
self._execute(command)
LOG.error("Timeout in snapshot creation from "
"source path %s.", src_path)
msg = _("Share snapshot of source path %s "
"was not created.") % src_path
raise exception.HNASBackendException(msg=msg)
else:
time.sleep(job_rechecks ** 2)
else:
job_rechecks = 0
if (job_status.job_state, job_status.job_status,
job_status.directories_missing,
job_status.files_missing) == ("Job was completed",
"Success", '0', '0'):
LOG.debug("Snapshot of source path %(src)s to destination "
"path %(dest)s created successfully.",
{'src': src_path,
'dest': dest_path})
else:
LOG.error('Error creating snapshot of source path %s.',
src_path)
msg = _('Snapshot of source path %s was not '
'created.') % src_path
raise exception.HNASBackendException(msg=msg)
def tree_delete(self, path):
command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name,
path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'Source path: Cannot access' in e.stderr:
LOG.warning("Attempted to delete path %s "
"but it does not exist.", path)
else:
msg = _("Could not submit tree delete job to delete path "
"%s.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
@mutils.retry(exception=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def create_directory(self, dest_path):
self._locked_selectfs('create', dest_path)
if not self.check_directory(dest_path):
msg = _("Command to create directory %(path)s was run in another "
"filesystem instead of %(fs)s.") % {
'path': dest_path,
'fs': self.fs_name,
}
LOG.warning(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(exception=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def delete_directory(self, path):
try:
self._locked_selectfs('delete', path)
except exception.HNASDirectoryNotEmpty:
pass
else:
if self.check_directory(path):
msg = _("Command to delete empty directory %(path)s was run in"
" another filesystem instead of %(fs)s.") % {
'path': path,
'fs': self.fs_name,
}
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(exception=exception.HNASSSCIsBusy, wait_random=True,
retries=5)
def check_directory(self, path):
command = ['path-to-object-number', '-f', self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'path-to-object-number is currently running' in e.stdout:
msg = (_("SSC command path-to-object-number for path %s "
"is currently busy.") % path)
raise exception.HNASSSCIsBusy(msg=msg)
if 'Unable to locate component:' in e.stdout:
LOG.debug("Cannot find %(path)s: %(out)s",
{'path': path, 'out': e.stdout})
return False
else:
msg = _("Could not check if path %s exists.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
return True
def check_fs_mounted(self):
command = ['df', '-a', '-f', self.fs_name]
output, err = self._execute(command)
if "not found" in output:
msg = _("Filesystem %s does not exist or it is not available "
"in the current EVS context.") % self.fs_name
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
line = output.split('\n')
fs = Filesystem(line[3])
return fs.mounted
def mount(self):
command = ['mount', self.fs_name]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'file system is already mounted' not in e.stderr:
msg = _("Failed to mount filesystem %s.") % self.fs_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def vvol_create(self, vvol_name):
# create a virtual-volume inside directory
path = '/shares/' + vvol_name
command = ['virtual-volume', 'add', '--ensure', self.fs_name,
vvol_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Failed to create vvol %s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def vvol_delete(self, vvol_name):
path = '/shares/' + vvol_name
# Virtual-volume and quota are deleted together
command = ['tree-delete-job-submit', '--confirm', '-f',
self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'Source path: Cannot access' in e.stderr:
LOG.warning("Share %s does not exist.", vvol_name)
else:
msg = _("Failed to delete vvol %s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def quota_add(self, vvol_name, vvol_quota):
str_quota = six.text_type(vvol_quota) + 'G'
command = ['quota', 'add', '--usage-limit',
str_quota, '--usage-hard-limit',
'yes', self.fs_name, vvol_name]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Failed to add %(quota)s quota to vvol "
"%(vvol)s.") % {'quota': str_quota, 'vvol': vvol_name}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def modify_quota(self, vvol_name, new_size):
str_quota = six.text_type(new_size) + 'G'
command = ['quota', 'mod', '--usage-limit', str_quota,
self.fs_name, vvol_name]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Failed to update quota of vvol %(vvol)s to "
"%(quota)s.") % {'quota': str_quota, 'vvol': vvol_name}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def check_vvol(self, vvol_name):
command = ['virtual-volume', 'list', '--verbose', self.fs_name,
vvol_name]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Virtual volume %s does not exist.") % vvol_name
LOG.exception(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def check_quota(self, vvol_name):
command = ['quota', 'list', '--verbose', self.fs_name, vvol_name]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not check quota of vvol %s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
if 'No quotas matching specified filter criteria' in output:
msg = _("Virtual volume %s does not have any"
" quota.") % vvol_name
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def check_export(self, vvol_name, is_snapshot=False):
export = self._get_export(vvol_name, is_snapshot=is_snapshot)
if (vvol_name in export[0].export_name and
self.fs_name in export[0].file_system_label):
return
else:
msg = _("Export %s does not exist.") % export[0].export_name
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def check_cifs(self, vvol_name):
output = self._cifs_list(vvol_name)
cifs_share = CIFSShare(output)
if self.fs_name != cifs_share.fs:
msg = _("CIFS share %(share)s is not located in "
"configured filesystem "
"%(fs)s.") % {'share': vvol_name,
'fs': self.fs_name}
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def is_cifs_in_use(self, vvol_name):
output = self._cifs_list(vvol_name)
cifs_share = CIFSShare(output)
return cifs_share.is_mounted
def _cifs_list(self, vvol_name):
command = ['cifs-share', 'list', vvol_name]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr:
msg = _("CIFS share %(share)s was not found in EVS "
"%(evs_id)s") % {'share': vvol_name,
'evs_id': self.evs_id}
LOG.exception(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
msg = _("Could not list CIFS shares by vvol name "
"%s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
return output
def get_share_quota(self, share_id):
command = ['quota', 'list', self.fs_name, share_id]
output, err = self._execute(command)
quota = Quota(output)
if quota.limit is None:
return None
if quota.limit_unit == 'TB':
return quota.limit * units.Ki
elif quota.limit_unit == 'GB':
return quota.limit
else:
msg = _("Share %s does not support quota values "
"below 1G.") % share_id
LOG.error(msg)
raise exception.HNASBackendException(msg=msg)
def get_share_usage(self, share_id):
command = ['quota', 'list', self.fs_name, share_id]
output, err = self._execute(command)
quota = Quota(output)
if quota.usage is None:
msg = _("Virtual volume %s does not have any quota.") % share_id
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
bytes_usage = strutils.string_to_bytes(six.text_type(quota.usage) +
quota.usage_unit)
return bytes_usage / units.Gi
def _get_export(self, name, is_snapshot=False):
if is_snapshot:
name = '/snapshots/' + name
else:
name = '/shares/' + name
command = ['nfs-export', 'list ', name]
export_list = []
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr:
msg = _("Export %(name)s was not found in EVS "
"%(evs_id)s.") % {
'name': name,
'evs_id': self.evs_id,
}
LOG.exception(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
msg = _("Could not list NFS exports by name %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
items = output.split('Export name')
if items[0][0] == '\n':
items.pop(0)
for i in range(0, len(items)):
export_list.append(Export(items[i]))
return export_list
@mutils.retry(exception=exception.HNASConnException, wait_random=True)
def _execute(self, commands):
command = ['ssc', '127.0.0.1']
if self.admin_ip0 is not None:
command = ['ssc', '--smuauth', self.admin_ip0]
command += ['console-context', '--evs', self.evs_id]
commands = command + commands
mutils.check_ssh_injection(commands)
commands = ' '.join(commands)
if not self.sshpool:
self.sshpool = mutils.SSHPool(ip=self.ip,
port=self.port,
conn_timeout=None,
login=self.user,
password=self.password,
privatekey=self.priv_key)
with self.sshpool.item() as ssh:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
out, err = processutils.ssh_execute(ssh, commands,
check_exit_code=True)
LOG.debug("Command %(cmd)s result: out = %(out)s - err = "
"%(err)s.", {
'cmd': commands,
'out': out,
'err': err,
})
return out, err
except processutils.ProcessExecutionError as e:
if 'Failed to establish SSC connection' in e.stderr:
msg = _("Failed to establish SSC connection.")
LOG.debug(msg)
raise exception.HNASConnException(msg=msg)
else:
LOG.debug("Error running SSH command. "
"Command %(cmd)s result: out = %(out)s - err = "
"%(err)s - exit = %(exit)s.", {
'cmd': e.cmd,
'out': e.stdout,
'err': e.stderr,
'exit': e.exit_code,
})
raise
@mutils.synchronized("hitachi_hnas_select_fs", external=True)
def _locked_selectfs(self, op, path):
if op == 'create':
command = ['selectfs', self.fs_name, '\n',
'ssc', '127.0.0.1', 'console-context', '--evs',
self.evs_id, 'mkdir', '-p', path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if "Current file system invalid: VolumeNotFound" in e.stderr:
msg = _("Command to create directory %s failed due to "
"context change.") % path
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
else:
msg = _("Failed to create directory %s.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
if op == 'delete':
command = ['selectfs', self.fs_name, '\n',
'ssc', '127.0.0.1', 'console-context', '--evs',
self.evs_id, 'rmdir', path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'DirectoryNotEmpty' in e.stderr:
msg = _("Share %s has more snapshots.") % path
LOG.debug(msg)
raise exception.HNASDirectoryNotEmpty(msg=msg)
elif 'cannot remove' in e.stderr and 'NotFound' in e.stderr:
LOG.warning("Attempted to delete path %s but it does "
"not exist.", path)
elif 'Current file system invalid: VolumeNotFound' in e.stderr:
msg = _("Command to delete empty directory %s failed due "
"to context change.") % path
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
else:
msg = _("Failed to delete directory %s.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
class Export(object):
def __init__(self, data):
if data:
split_data = data.split('Export configuration:\n')
items = split_data[0].split('\n')
self.export_name = items[0].split(':')[1].strip()
self.export_path = items[1].split(':')[1].strip()
if '*** not available ***' in items[2]:
self.file_system_info = items[2].split(':')[1].strip()
index = 0
else:
self.file_system_label = items[2].split(':')[1].strip()
self.file_system_size = items[3].split(':')[1].strip()
self.file_system_free_space = items[4].split(':')[1].strip()
self.file_system_state = items[5].split(':')[1]
self.formatted = items[6].split('=')[1].strip()
self.mounted = items[7].split('=')[1].strip()
self.failed = items[8].split('=')[1].strip()
self.thin_provisioned = items[9].split('=')[1].strip()
index = 7
self.access_snapshots = items[3 + index].split(':')[1].strip()
self.display_snapshots = items[4 + index].split(':')[1].strip()
self.read_caching = items[5 + index].split(':')[1].strip()
self.disaster_recovery_setting = items[6 + index].split(':')[1]
self.recovered = items[7 + index].split('=')[1].strip()
self.transfer_setting = items[8 + index].split('=')[1].strip()
self.export_configuration = []
export_config = split_data[1].split('\n')
for i in range(0, len(export_config)):
if any(j.isdigit() or j.isalpha() for j in export_config[i]):
self.export_configuration.append(export_config[i])
class JobStatus(object):
def __init__(self, data):
if data:
lines = data.split("\n")
self.job_id = lines[0].split()[3]
self.physical_node = lines[2].split()[3]
self.evs = lines[3].split()[2]
self.volume_number = lines[4].split()[3]
self.fs_id = lines[5].split()[4]
self.fs_name = lines[6].split()[4]
self.source_path = lines[7].split()[3]
self.creation_time = " ".join(lines[8].split()[3:5])
self.destination_path = lines[9].split()[3]
self.ensure_path_exists = lines[10].split()[5]
self.job_state = " ".join(lines[12].split()[3:])
self.job_started = " ".join(lines[14].split()[2:4])
self.job_ended = " ".join(lines[15].split()[2:4])
self.job_status = lines[16].split()[2]
error_details_line = lines[17].split()
if len(error_details_line) > 3:
self.error_details = " ".join(error_details_line[3:])
else:
self.error_details = None
self.directories_processed = lines[18].split()[3]
self.files_processed = lines[19].split()[3]
self.data_bytes_processed = lines[20].split()[4]
self.directories_missing = lines[21].split()[4]
self.files_missing = lines[22].split()[4]
self.files_skipped = lines[23].split()[4]
skipping_details_line = lines[24].split()
if len(skipping_details_line) > 3:
self.skipping_details = " ".join(skipping_details_line[3:])
else:
self.skipping_details = None
class JobSubmit(object):
def __init__(self, data):
if data:
split_data = data.replace(".", "").split()
self.request_status = " ".join(split_data[1:4])
self.job_id = split_data[8]
class Filesystem(object):
def __init__(self, data):
if data:
items = data.split()
self.id = items[0]
self.label = items[1]
self.evs = items[2]
self.size = float(items[3])
self.size_measure = items[4]
if self.size_measure == 'TB':
self.size = self.size * units.Ki
if items[5:7] == ["Not", "mounted"]:
self.mounted = False
else:
self.mounted = True
self.used = float(items[5])
self.used_measure = items[6]
if self.used_measure == 'TB':
self.used = self.used * units.Ki
self.dedupe = 'dedupe enabled' in data
class Quota(object):
def __init__(self, data):
if data:
if 'No quotas matching' in data:
self.type = None
self.target = None
self.usage = None
self.usage_unit = None
self.limit = None
self.limit_unit = None
else:
items = data.split()
self.type = items[2]
self.target = items[6]
self.usage = items[9]
self.usage_unit = items[10]
if items[13] == 'Unset':
self.limit = None
else:
self.limit = float(items[13])
self.limit_unit = items[14]
class CIFSPermissions(object):
def __init__(self, data):
self.permission_list = []
hnas_cifs_permissions = [('Allow Read', 'ar'),
('Allow Change & Read', 'acr'),
('Allow Full Control', 'af'),
('Deny Read', 'dr'),
('Deny Change & Read', 'dcr'),
('Deny Full Control', 'df')]
lines = data.split('\n')
for line in lines:
filtered = list(filter(lambda x: x[0] in line,
hnas_cifs_permissions))
if len(filtered) == 1:
token, permission = filtered[0]
user = line.split(token)[1:][0].strip()
self.permission_list.append((user, permission))
class CIFSShare(object):
def __init__(self, data):
lines = data.split('\n')
for line in lines:
if 'File system label' in line:
self.fs = line.split(': ')[1]
elif 'Share users' in line:
users = line.split(': ')
self.is_mounted = users[1] != '0'
| apache-2.0 |
carolFrohlich/nipype | nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py | 2 | 1485 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..utils import ApplyInverseDeformation
def test_ApplyInverseDeformation_inputs():
input_map = dict(bounding_box=dict(field='comp{1}.inv.comp{1}.sn2def.bb',
),
deformation=dict(field='comp{1}.inv.comp{1}.sn2def.matname',
xor=[u'deformation_field'],
),
deformation_field=dict(field='comp{1}.inv.comp{1}.def',
xor=[u'deformation'],
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_files=dict(field='fnames',
mandatory=True,
),
interpolation=dict(field='interp',
),
matlab_cmd=dict(),
mfile=dict(usedefault=True,
),
paths=dict(),
target=dict(field='comp{1}.inv.space',
),
use_mcr=dict(),
use_v8struct=dict(min_ver='8',
usedefault=True,
),
voxel_sizes=dict(field='comp{1}.inv.comp{1}.sn2def.vox',
),
)
inputs = ApplyInverseDeformation.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyInverseDeformation_outputs():
output_map = dict(out_files=dict(),
)
outputs = ApplyInverseDeformation.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
sbuss/voteswap | lib/social/backends/classlink.py | 4 | 1487 | from social.backends.oauth import BaseOAuth2
class ClasslinkOAuth(BaseOAuth2):
"""
Classlink OAuth authentication backend.
Docs: https://developer.classlink.com/docs/oauth2-workflow
"""
name = 'classlink'
AUTHORIZATION_URL = 'https://launchpad.classlink.com/oauth2/v2/auth'
ACCESS_TOKEN_URL = 'https://launchpad.classlink.com/oauth2/v2/token'
ACCESS_TOKEN_METHOD = 'POST'
DEFAULT_SCOPE = ['profile']
REDIRECT_STATE = False
SCOPE_SEPARATOR = ' '
def get_user_id(self, details, response):
"""Return user unique id provided by service"""
return response['UserId']
def get_user_details(self, response):
"""Return user details from Classlink account"""
fullname, first_name, last_name = self.get_user_names(
first_name=response.get('FirstName'),
last_name=response.get('LastName')
)
return {
'username': response.get('Email') or response.get('LoginId'),
'email': response.get('Email'),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
}
def user_data(self, token, *args, **kwargs):
"""Loads user data from service"""
url = 'https://nodeapi.classlink.com/v2/my/info'
auth_header = {"Authorization": "Bearer %s" % token}
try:
return self.get_json(url, headers=auth_header)
except ValueError:
return None
| mit |
dvliman/jaikuengine | .google_appengine/lib/django-1.4/tests/regressiontests/admin_custom_urls/models.py | 42 | 1467 | from functools import update_wrapper
from django.contrib import admin
from django.db import models
class Action(models.Model):
name = models.CharField(max_length=50, primary_key=True)
description = models.CharField(max_length=70)
def __unicode__(self):
return self.name
class ActionAdmin(admin.ModelAdmin):
"""
A ModelAdmin for the Action model that changes the URL of the add_view
to '<app name>/<model name>/!add/'
The Action model has a CharField PK.
"""
list_display = ('name', 'description')
def remove_url(self, name):
"""
Remove all entries named 'name' from the ModelAdmin instance URL
patterns list
"""
return filter(lambda e: e.name != name, super(ActionAdmin, self).get_urls())
def get_urls(self):
# Add the URL of our custom 'add_view' view to the front of the URLs
# list. Remove the existing one(s) first
from django.conf.urls import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
view_name = '%s_%s_add' % info
return patterns('',
url(r'^!add/$', wrap(self.add_view), name=view_name),
) + self.remove_url(view_name)
admin.site.register(Action, ActionAdmin)
| apache-2.0 |
defionscode/ansible | lib/ansible/modules/cloud/amazon/efs_facts.py | 47 | 13236 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: efs_facts
short_description: Get information about Amazon EFS file systems
description:
- This module can be used to search Amazon EFS file systems.
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
aliases: [ creation_token ]
id:
description:
- ID of Amazon EFS.
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary.
targets:
description:
- List of targets on which to filter the returned results.
- Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Find all existing efs
efs_facts:
register: result
- name: Find efs using id
efs_facts:
id: fs-1234abcd
- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
efs_facts:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: str
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: str
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned: always
type: str
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned: always
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address
returned: always
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
filesystem_address:
description: url of file system
returned: always
type: str
sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: str
sample: "generalPurpose"
throughput_mode:
description: mode of throughput for the file system
returned: when botocore >= 1.10.57
type: str
sample: "bursting"
provisioned_throughput_in_mibps:
description: throughput provisioned in Mibps
returned: when botocore >= 1.10.57 and throughput_mode is set to "provisioned"
type: float
sample: 15.0
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from collections import defaultdict
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
from ansible.module_utils._text import to_native
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
self.module = module
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e))
self.region = region
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def list_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
paginator = self.connection.get_paginator('describe_file_systems')
return paginator.paginate(**kwargs).build_full_result()['FileSystems']
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_tags(self, file_system_id):
"""
Returns tag list for selected instance of EFS
"""
paginator = self.connection.get_paginator('describe_tags')
return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags'])
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_mount_targets(self, file_system_id):
"""
Returns mount targets for selected instance of EFS
"""
paginator = self.connection.get_paginator('describe_mount_targets')
return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_security_groups(self, mount_target_id):
"""
Returns security groups for selected instance of EFS
"""
return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups']
def get_mount_targets_data(self, file_systems):
for item in file_systems:
if item['life_cycle_state'] == self.STATE_AVAILABLE:
try:
mount_targets = self.get_mount_targets(item['file_system_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS targets")
for mt in mount_targets:
item['mount_targets'].append(camel_dict_to_snake_dict(mt))
return file_systems
def get_security_groups_data(self, file_systems):
for item in file_systems:
if item['life_cycle_state'] == self.STATE_AVAILABLE:
for target in item['mount_targets']:
if target['life_cycle_state'] == self.STATE_AVAILABLE:
try:
target['security_groups'] = self.get_security_groups(target['mount_target_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS security groups")
else:
target['security_groups'] = []
else:
item['tags'] = {}
item['mount_targets'] = []
return file_systems
def get_file_systems(self, file_system_id=None, creation_token=None):
kwargs = dict()
if file_system_id:
kwargs['FileSystemId'] = file_system_id
if creation_token:
kwargs['CreationToken'] = creation_token
try:
file_systems = self.list_file_systems(**kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS file systems")
results = list()
for item in file_systems:
item['CreationTime'] = str(item['CreationTime'])
"""
In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
AWS documentation is available here:
U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html)
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
result = camel_dict_to_snake_dict(item)
result['tags'] = {}
result['mount_targets'] = []
# Set tags *after* doing camel to snake
if result['life_cycle_state'] == self.STATE_AVAILABLE:
try:
result['tags'] = self.get_tags(result['file_system_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS tags")
results.append(result)
return results
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'mount_target_id',
'subnet-': 'subnet_id',
'eni-': 'network_interface_id',
'sg-': 'security_groups'
}
return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items()
if str(attr_id).startswith(prefix)], 'ip_address')
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount target requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(),
name=dict(aliases=['creation_token']),
tags=dict(type="dict", default={}),
targets=dict(type="list", default=[])
))
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True)
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(fs_id, name)
if tags:
file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)]
file_systems_info = connection.get_mount_targets_data(file_systems_info)
file_systems_info = connection.get_security_groups_data(file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)]
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
if __name__ == '__main__':
main()
| gpl-3.0 |
ayesandarmoe/microblog_flask_tutorial | flask/lib/python2.7/site-packages/babel/messages/plurals.py | 29 | 6664 | # -*- coding: utf-8 -*-
"""
babel.messages.plurals
~~~~~~~~~~~~~~~~~~~~~~
Plural form definitions.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from babel.core import default_locale, Locale
from operator import itemgetter
# XXX: remove this file, duplication with babel.plural
LC_CTYPE = default_locale('LC_CTYPE')
PLURALS = {
# Afar
# 'aa': (),
# Abkhazian
# 'ab': (),
# Avestan
# 'ae': (),
# Afrikaans - From Pootle's PO's
'af': (2, '(n != 1)'),
# Akan
# 'ak': (),
# Amharic
# 'am': (),
# Aragonese
# 'an': (),
# Arabic - From Pootle's PO's
'ar': (6, '(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n>=3 && n<=10 ? 3 : n>=11 && n<=99 ? 4 : 5)'),
# Assamese
# 'as': (),
# Avaric
# 'av': (),
# Aymara
# 'ay': (),
# Azerbaijani
# 'az': (),
# Bashkir
# 'ba': (),
# Belarusian
# 'be': (),
# Bulgarian - From Pootle's PO's
'bg': (2, '(n != 1)'),
# Bihari
# 'bh': (),
# Bislama
# 'bi': (),
# Bambara
# 'bm': (),
# Bengali - From Pootle's PO's
'bn': (2, '(n != 1)'),
# Tibetan - as discussed in private with Andrew West
'bo': (1, '0'),
# Breton
# 'br': (),
# Bosnian
# 'bs': (),
# Catalan - From Pootle's PO's
'ca': (2, '(n != 1)'),
# Chechen
# 'ce': (),
# Chamorro
# 'ch': (),
# Corsican
# 'co': (),
# Cree
# 'cr': (),
# Czech
'cs': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Church Slavic
# 'cu': (),
# Chuvash
'cv': (1, '0'),
# Welsh
'cy': (5, '(n==1 ? 1 : n==2 ? 2 : n==3 ? 3 : n==6 ? 4 : 0)'),
# Danish
'da': (2, '(n != 1)'),
# German
'de': (2, '(n != 1)'),
# Divehi
# 'dv': (),
# Dzongkha
'dz': (1, '0'),
# Greek
'el': (2, '(n != 1)'),
# English
'en': (2, '(n != 1)'),
# Esperanto
'eo': (2, '(n != 1)'),
# Spanish
'es': (2, '(n != 1)'),
# Estonian
'et': (2, '(n != 1)'),
# Basque - From Pootle's PO's
'eu': (2, '(n != 1)'),
# Persian - From Pootle's PO's
'fa': (1, '0'),
# Finnish
'fi': (2, '(n != 1)'),
# French
'fr': (2, '(n > 1)'),
# Friulian - From Pootle's PO's
'fur': (2, '(n > 1)'),
# Irish
'ga': (3, '(n==1 ? 0 : n==2 ? 1 : 2)'),
# Galician - From Pootle's PO's
'gl': (2, '(n != 1)'),
# Hausa - From Pootle's PO's
'ha': (2, '(n != 1)'),
# Hebrew
'he': (2, '(n != 1)'),
# Hindi - From Pootle's PO's
'hi': (2, '(n != 1)'),
# Croatian
'hr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Hungarian
'hu': (1, '0'),
# Armenian - From Pootle's PO's
'hy': (1, '0'),
# Icelandic - From Pootle's PO's
'is': (2, '(n != 1)'),
# Italian
'it': (2, '(n != 1)'),
# Japanese
'ja': (1, '0'),
# Georgian - From Pootle's PO's
'ka': (1, '0'),
# Kongo - From Pootle's PO's
'kg': (2, '(n != 1)'),
# Khmer - From Pootle's PO's
'km': (1, '0'),
# Korean
'ko': (1, '0'),
# Kurdish - From Pootle's PO's
'ku': (2, '(n != 1)'),
# Lao - Another member of the Tai language family, like Thai.
'lo': (1, '0'),
# Lithuanian
'lt': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Latvian
'lv': (3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
# Maltese - From Pootle's PO's
'mt': (4, '(n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
# Norwegian Bokmål
'nb': (2, '(n != 1)'),
# Dutch
'nl': (2, '(n != 1)'),
# Norwegian Nynorsk
'nn': (2, '(n != 1)'),
# Norwegian
'no': (2, '(n != 1)'),
# Punjabi - From Pootle's PO's
'pa': (2, '(n != 1)'),
# Polish
'pl': (3, '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Portuguese
'pt': (2, '(n != 1)'),
# Brazilian
'pt_BR': (2, '(n > 1)'),
# Romanian - From Pootle's PO's
'ro': (3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'),
# Russian
'ru': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Slovak
'sk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Slovenian
'sl': (4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
# Serbian - From Pootle's PO's
'sr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Southern Sotho - From Pootle's PO's
'st': (2, '(n != 1)'),
# Swedish
'sv': (2, '(n != 1)'),
# Thai
'th': (1, '0'),
# Turkish
'tr': (1, '0'),
# Ukrainian
'uk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Venda - From Pootle's PO's
've': (2, '(n != 1)'),
# Vietnamese - From Pootle's PO's
'vi': (1, '0'),
# Xhosa - From Pootle's PO's
'xh': (2, '(n != 1)'),
# Chinese - From Pootle's PO's (modified)
'zh': (1, '0'),
}
DEFAULT_PLURAL = (2, '(n != 1)')
class _PluralTuple(tuple):
"""A tuple with plural information."""
__slots__ = ()
num_plurals = property(itemgetter(0), doc="""
The number of plurals used by the locale.""")
plural_expr = property(itemgetter(1), doc="""
The plural expression used by the locale.""")
plural_forms = property(lambda x: 'nplurals=%s; plural=%s;' % x, doc="""
The plural expression used by the catalog or locale.""")
def __str__(self):
return self.plural_forms
def get_plural(locale=LC_CTYPE):
"""A tuple with the information catalogs need to perform proper
pluralization. The first item of the tuple is the number of plural
forms, the second the plural expression.
>>> get_plural(locale='en')
(2, '(n != 1)')
>>> get_plural(locale='ga')
(3, '(n==1 ? 0 : n==2 ? 1 : 2)')
The object returned is a special tuple with additional members:
>>> tup = get_plural("ja")
>>> tup.num_plurals
1
>>> tup.plural_expr
'0'
>>> tup.plural_forms
'nplurals=1; plural=0;'
Converting the tuple into a string prints the plural forms for a
gettext catalog:
>>> str(tup)
'nplurals=1; plural=0;'
"""
locale = Locale.parse(locale)
try:
tup = PLURALS[str(locale)]
except KeyError:
try:
tup = PLURALS[locale.language]
except KeyError:
tup = DEFAULT_PLURAL
return _PluralTuple(tup)
| gpl-2.0 |
reinaH/osf.io | scripts/tests/test_glacier_audit.py | 54 | 2368 | # -*- coding: utf-8 -*-
from nose.tools import * # noqa
from tests.base import OsfTestCase
from website.addons.osfstorage.tests.factories import FileVersionFactory
from website.addons.osfstorage import model
from scripts.osfstorage import glacier_audit
mock_output = {
'ArchiveList': [
{
'ArchiveDescription': 'abcdef',
'ArchiveId': '123456',
'Size': 24601,
},
],
}
mock_inventory = {
each['ArchiveDescription']: each
for each in mock_output['ArchiveList']
}
class TestGlacierInventory(OsfTestCase):
def tearDown(self):
super(TestGlacierInventory, self).tearDown()
model.OsfStorageFileVersion.remove()
def test_inventory(self):
version = FileVersionFactory(
size=24601,
metadata={'archive': '123456'},
location={
'service': 'cloud',
'container': 'cloud',
'object': 'abcdef',
},
)
glacier_audit.check_glacier_version(version, mock_inventory)
def test_inventory_not_found(self):
version = FileVersionFactory(
size=24601,
metadata={'archive': '123456'},
location={
'service': 'cloud',
'container': 'cloud',
'object': 'abcdefg',
},
)
with assert_raises(glacier_audit.NotFound):
glacier_audit.check_glacier_version(version, mock_inventory)
def test_inventory_wrong_archive_id(self):
version = FileVersionFactory(
size=24601,
metadata={'archive': '1234567'},
location={
'service': 'cloud',
'container': 'cloud',
'object': 'abcdef',
},
)
with assert_raises(glacier_audit.BadArchiveId):
glacier_audit.check_glacier_version(version, mock_inventory)
def test_inventory_wrong_size(self):
version = FileVersionFactory(
size=24602,
metadata={'archive': '123456'},
location={
'service': 'cloud',
'container': 'cloud',
'object': 'abcdef',
},
)
with assert_raises(glacier_audit.BadSize):
glacier_audit.check_glacier_version(version, mock_inventory)
| apache-2.0 |
samuelctabor/ardupilot | Tools/scripts/uploader.py | 11 | 39942 | #!/usr/bin/env python
############################################################################
#
# Copyright (c) 2012-2017 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
#
# Serial firmware uploader for the PX4FMU bootloader
#
# The PX4 firmware file is a JSON-encoded Python object, containing
# metadata fields and a zlib-compressed base64-encoded firmware image.
#
# The uploader uses the following fields from the firmware file:
#
# image
# The firmware that will be uploaded.
# image_size
# The size of the firmware in bytes.
# board_id
# The board for which the firmware is intended.
# board_revision
# Currently only used for informational purposes.
#
# for python2.7 compatibility
from __future__ import print_function
import sys
import argparse
import binascii
import serial
import struct
import json
import zlib
import base64
import time
import array
import os
import platform
import re
from sys import platform as _platform
is_WSL = bool("Microsoft" in platform.uname()[2])
# default list of port names to look for autopilots
default_ports = ['/dev/serial/by-id/usb-Ardu*',
'/dev/serial/by-id/usb-3D*',
'/dev/serial/by-id/usb-APM*',
'/dev/serial/by-id/usb-Radio*',
'/dev/serial/by-id/usb-*_3DR_*',
'/dev/serial/by-id/usb-Hex_Technology_Limited*',
'/dev/serial/by-id/usb-Hex_ProfiCNC*',
'/dev/serial/by-id/usb-Holybro*',
'/dev/serial/by-id/usb-mRo*',
'/dev/tty.usbmodem*']
if "cygwin" in _platform or is_WSL:
default_ports += ['/dev/ttyS*']
# Detect python version
if sys.version_info[0] < 3:
runningPython3 = False
else:
runningPython3 = True
# dictionary of bootloader {boardID: (firmware boardID, boardname), ...}
# designating firmware builds compatible with multiple boardIDs
compatible_IDs = {33: (9, 'AUAVX2.1')}
# CRC equivalent to crc_crc32() in AP_Math/crc.cpp
crctab = array.array('I', [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d])
def crc32(bytes, state=0):
'''crc32 exposed for use by chibios.py'''
for byte in bytes:
index = (state ^ byte) & 0xff
state = crctab[index] ^ (state >> 8)
return state
class firmware(object):
'''Loads a firmware file'''
desc = {}
image = bytes()
crcpad = bytearray(b'\xff\xff\xff\xff')
def __init__(self, path):
# read the file
f = open(path, "r")
self.desc = json.load(f)
f.close()
self.image = bytearray(zlib.decompress(base64.b64decode(self.desc['image'])))
# pad image to 4-byte length
while ((len(self.image) % 4) != 0):
self.image.append('\xff')
def property(self, propname):
return self.desc[propname]
def crc(self, padlen):
state = crc32(self.image, int(0))
for i in range(len(self.image), (padlen - 1), 4):
state = crc32(self.crcpad, state)
return state
class uploader(object):
'''Uploads a firmware file to the PX FMU bootloader'''
# protocol bytes
INSYNC = b'\x12'
EOC = b'\x20'
# reply bytes
OK = b'\x10'
FAILED = b'\x11'
INVALID = b'\x13' # rev3+
BAD_SILICON_REV = b'\x14' # rev5+
# command bytes
NOP = b'\x00' # guaranteed to be discarded by the bootloader
GET_SYNC = b'\x21'
GET_DEVICE = b'\x22'
CHIP_ERASE = b'\x23'
CHIP_VERIFY = b'\x24' # rev2 only
PROG_MULTI = b'\x27'
READ_MULTI = b'\x28' # rev2 only
GET_CRC = b'\x29' # rev3+
GET_OTP = b'\x2a' # rev4+ , get a word from OTP area
GET_SN = b'\x2b' # rev4+ , get a word from SN area
GET_CHIP = b'\x2c' # rev5+ , get chip version
SET_BOOT_DELAY = b'\x2d' # rev5+ , set boot delay
GET_CHIP_DES = b'\x2e' # rev5+ , get chip description in ASCII
MAX_DES_LENGTH = 20
REBOOT = b'\x30'
SET_BAUD = b'\x33' # set baud
INFO_BL_REV = b'\x01' # bootloader protocol revision
BL_REV_MIN = 2 # minimum supported bootloader protocol
BL_REV_MAX = 5 # maximum supported bootloader protocol
INFO_BOARD_ID = b'\x02' # board type
INFO_BOARD_REV = b'\x03' # board revision
INFO_FLASH_SIZE = b'\x04' # max firmware size in bytes
PROG_MULTI_MAX = 252 # protocol max is 255, must be multiple of 4
READ_MULTI_MAX = 252 # protocol max is 255
NSH_INIT = bytearray(b'\x0d\x0d\x0d')
NSH_REBOOT_BL = b"reboot -b\n"
NSH_REBOOT = b"reboot\n"
def __init__(self, portname, baudrate_bootloader, baudrate_flightstack, baudrate_bootloader_flash=None, target_system=None, target_component=None, source_system=None, source_component=None):
self.MAVLINK_REBOOT_ID1 = bytearray(b'\xfe\x21\x72\xff\x00\x4c\x00\x00\x40\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x00\x01\x00\x00\x53\x6b')
self.MAVLINK_REBOOT_ID0 = bytearray(b'\xfe\x21\x45\xff\x00\x4c\x00\x00\x40\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x00\x00\x00\x00\xcc\x37')
if target_component is None:
target_component = 1
if source_system is None:
source_system = 255
if source_component is None:
source_component = 1
# open the port, keep the default timeout short so we can poll quickly
self.port = serial.Serial(portname, baudrate_bootloader, timeout=1.0)
self.baudrate_bootloader = baudrate_bootloader
if baudrate_bootloader_flash is not None:
self.baudrate_bootloader_flash = baudrate_bootloader_flash
else:
self.baudrate_bootloader_flash = self.baudrate_bootloader
self.baudrate_flightstack = baudrate_flightstack
self.baudrate_flightstack_idx = -1
# generate mavlink reboot message:
if target_system is not None:
from pymavlink import mavutil
m = mavutil.mavlink.MAVLink_command_long_message(
target_system,
target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN,
1, # confirmation
3, # remain in bootloader
0,
0,
0,
0,
0,
0)
mav = mavutil.mavlink.MAVLink(self,
srcSystem=source_system,
srcComponent=source_component)
self.MAVLINK_REBOOT_ID1 = m.pack(mav)
self.MAVLINK_REBOOT_ID0 = None
def close(self):
if self.port is not None:
self.port.close()
def open(self):
timeout = time.time() + 0.2
# Attempt to open the port while it exists and until timeout occurs
while self.port is not None:
portopen = True
try:
portopen = self.port.is_open
except AttributeError:
portopen = self.port.isOpen()
if not portopen and time.time() < timeout:
try:
self.port.open()
except OSError:
# wait for the port to be ready
time.sleep(0.04)
except serial.SerialException:
# if open fails, try again later
time.sleep(0.04)
else:
break
def __send(self, c):
self.port.write(c)
def __recv(self, count=1):
c = self.port.read(count)
if len(c) < 1:
raise RuntimeError("timeout waiting for data (%u bytes)" % count)
# print("recv " + binascii.hexlify(c))
return c
def __recv_int(self):
raw = self.__recv(4)
val = struct.unpack("<I", raw)
return val[0]
def __getSync(self):
self.port.flush()
c = bytes(self.__recv())
if c != self.INSYNC:
raise RuntimeError("unexpected %s instead of INSYNC" % c)
c = self.__recv()
if c == self.INVALID:
raise RuntimeError("bootloader reports INVALID OPERATION")
if c == self.FAILED:
raise RuntimeError("bootloader reports OPERATION FAILED")
if c != self.OK:
raise RuntimeError("unexpected response 0x%x instead of OK" % ord(c))
# attempt to get back into sync with the bootloader
def __sync(self):
# send a stream of ignored bytes longer than the longest possible conversation
# that we might still have in progress
# self.__send(uploader.NOP * (uploader.PROG_MULTI_MAX + 2))
self.port.flushInput()
self.__send(uploader.GET_SYNC +
uploader.EOC)
self.__getSync()
def __trySync(self):
try:
self.port.flush()
if (self.__recv() != self.INSYNC):
# print("unexpected 0x%x instead of INSYNC" % ord(c))
return False
c = self.__recv()
if (c == self.BAD_SILICON_REV):
raise NotImplementedError()
if (c != self.OK):
# print("unexpected 0x%x instead of OK" % ord(c))
return False
return True
except NotImplementedError:
raise RuntimeError("Programing not supported for this version of silicon!\n"
"See https://pixhawk.org/help/errata")
except RuntimeError:
# timeout, no response yet
return False
# send the GET_DEVICE command and wait for an info parameter
def __getInfo(self, param):
self.__send(uploader.GET_DEVICE + param + uploader.EOC)
value = self.__recv_int()
self.__getSync()
return value
# send the GET_OTP command and wait for an info parameter
def __getOTP(self, param):
t = struct.pack("I", param) # int param as 32bit ( 4 byte ) char array.
self.__send(uploader.GET_OTP + t + uploader.EOC)
value = self.__recv(4)
self.__getSync()
return value
# send the GET_SN command and wait for an info parameter
def __getSN(self, param):
t = struct.pack("I", param) # int param as 32bit ( 4 byte ) char array.
self.__send(uploader.GET_SN + t + uploader.EOC)
value = self.__recv(4)
self.__getSync()
return value
# send the GET_CHIP command
def __getCHIP(self):
self.__send(uploader.GET_CHIP + uploader.EOC)
value = self.__recv_int()
self.__getSync()
return value
# send the GET_CHIP command
def __getCHIPDes(self):
self.__send(uploader.GET_CHIP_DES + uploader.EOC)
length = self.__recv_int()
value = self.__recv(length)
self.__getSync()
if runningPython3:
value = value.decode('ascii')
peices = value.split(",")
return peices
def __drawProgressBar(self, label, progress, maxVal):
if maxVal < progress:
progress = maxVal
percent = (float(progress) / float(maxVal)) * 100.0
sys.stdout.write("\r%s: [%-20s] %.1f%%" % (label, '='*int(percent/5.0), percent))
sys.stdout.flush()
# send the CHIP_ERASE command and wait for the bootloader to become ready
def __erase(self, label):
print("\n", end='')
self.__send(uploader.CHIP_ERASE +
uploader.EOC)
# erase is very slow, give it 20s
deadline = time.time() + 20.0
while time.time() < deadline:
# Draw progress bar (erase usually takes about 9 seconds to complete)
estimatedTimeRemaining = deadline-time.time()
if estimatedTimeRemaining >= 9.0:
self.__drawProgressBar(label, 20.0-estimatedTimeRemaining, 9.0)
else:
self.__drawProgressBar(label, 10.0, 10.0)
sys.stdout.write(" (timeout: %d seconds) " % int(deadline-time.time()))
sys.stdout.flush()
if self.__trySync():
self.__drawProgressBar(label, 10.0, 10.0)
return
raise RuntimeError("timed out waiting for erase")
# send a PROG_MULTI command to write a collection of bytes
def __program_multi(self, data):
if runningPython3:
length = len(data).to_bytes(1, byteorder='big')
else:
length = chr(len(data))
self.__send(uploader.PROG_MULTI)
self.__send(length)
self.__send(data)
self.__send(uploader.EOC)
self.__getSync()
# verify multiple bytes in flash
def __verify_multi(self, data):
if runningPython3:
length = len(data).to_bytes(1, byteorder='big')
else:
length = chr(len(data))
self.__send(uploader.READ_MULTI)
self.__send(length)
self.__send(uploader.EOC)
self.port.flush()
programmed = self.__recv(len(data))
if programmed != data:
print("got " + binascii.hexlify(programmed))
print("expect " + binascii.hexlify(data))
return False
self.__getSync()
return True
# read multiple bytes from flash
def __read_multi(self, length):
if runningPython3:
clength = length.to_bytes(1, byteorder='big')
else:
clength = chr(length)
self.__send(uploader.READ_MULTI)
self.__send(clength)
self.__send(uploader.EOC)
self.port.flush()
ret = self.__recv(length)
self.__getSync()
return ret
# send the reboot command
def __reboot(self):
self.__send(uploader.REBOOT +
uploader.EOC)
self.port.flush()
# v3+ can report failure if the first word flash fails
if self.bl_rev >= 3:
self.__getSync()
# split a sequence into a list of size-constrained pieces
def __split_len(self, seq, length):
return [seq[i:i+length] for i in range(0, len(seq), length)]
# upload code
def __program(self, label, fw):
print("\n", end='')
code = fw.image
groups = self.__split_len(code, uploader.PROG_MULTI_MAX)
uploadProgress = 0
for bytes in groups:
self.__program_multi(bytes)
# Print upload progress (throttled, so it does not delay upload progress)
uploadProgress += 1
if uploadProgress % 256 == 0:
self.__drawProgressBar(label, uploadProgress, len(groups))
self.__drawProgressBar(label, 100, 100)
# download code
def __download(self, label, fw):
print("\n", end='')
f = open(fw, 'wb')
downloadProgress = 0
readsize = uploader.READ_MULTI_MAX
total = 0
while True:
n = min(self.fw_maxsize - total, readsize)
bb = self.__read_multi(n)
f.write(bb)
total += len(bb)
# Print download progress (throttled, so it does not delay download progress)
downloadProgress += 1
if downloadProgress % 256 == 0:
self.__drawProgressBar(label, total, self.fw_maxsize)
if len(bb) < readsize:
break
f.close()
self.__drawProgressBar(label, total, self.fw_maxsize)
print("\nReceived %u bytes to %s" % (total, fw))
# verify code
def __verify_v2(self, label, fw):
print("\n", end='')
self.__send(uploader.CHIP_VERIFY +
uploader.EOC)
self.__getSync()
code = fw.image
groups = self.__split_len(code, uploader.READ_MULTI_MAX)
verifyProgress = 0
for bytes in groups:
verifyProgress += 1
if verifyProgress % 256 == 0:
self.__drawProgressBar(label, verifyProgress, len(groups))
if (not self.__verify_multi(bytes)):
raise RuntimeError("Verification failed")
self.__drawProgressBar(label, 100, 100)
def __verify_v3(self, label, fw):
print("\n", end='')
self.__drawProgressBar(label, 1, 100)
expect_crc = fw.crc(self.fw_maxsize)
self.__send(uploader.GET_CRC +
uploader.EOC)
report_crc = self.__recv_int()
self.__getSync()
if report_crc != expect_crc:
print("Expected 0x%x" % expect_crc)
print("Got 0x%x" % report_crc)
raise RuntimeError("Program CRC failed")
self.__drawProgressBar(label, 100, 100)
def __set_boot_delay(self, boot_delay):
self.__send(uploader.SET_BOOT_DELAY +
struct.pack("b", boot_delay) +
uploader.EOC)
self.__getSync()
def __setbaud(self, baud):
self.__send(uploader.SET_BAUD +
struct.pack("I", baud) +
uploader.EOC)
self.__getSync()
# get basic data about the board
def identify(self):
# make sure we are in sync before starting
self.__sync()
# get the bootloader protocol ID first
self.bl_rev = self.__getInfo(uploader.INFO_BL_REV)
if (self.bl_rev < uploader.BL_REV_MIN) or (self.bl_rev > uploader.BL_REV_MAX):
print("Unsupported bootloader protocol %d" % self.bl_rev)
raise RuntimeError("Bootloader protocol mismatch")
self.board_type = self.__getInfo(uploader.INFO_BOARD_ID)
self.board_rev = self.__getInfo(uploader.INFO_BOARD_REV)
self.fw_maxsize = self.__getInfo(uploader.INFO_FLASH_SIZE)
def dump_board_info(self):
# OTP added in v4:
print("Bootloader Protocol: %u" % self.bl_rev)
if self.bl_rev > 3:
otp = b''
for byte in range(0, 32*6, 4):
x = self.__getOTP(byte)
otp = otp + x
# print(binascii.hexlify(x).decode('Latin-1') + ' ', end='')
# see src/modules/systemlib/otp.h in px4 code:
otp_id = otp[0:4]
otp_idtype = otp[4:5]
otp_vid = otp[8:4:-1]
otp_pid = otp[12:8:-1]
otp_coa = otp[32:160]
# show user:
try:
print("OTP:")
print(" type: " + otp_id.decode('Latin-1'))
print(" idtype: " + binascii.b2a_qp(otp_idtype).decode('Latin-1'))
print(" vid: " + binascii.hexlify(otp_vid).decode('Latin-1'))
print(" pid: " + binascii.hexlify(otp_pid).decode('Latin-1'))
print(" coa: " + binascii.b2a_base64(otp_coa).decode('Latin-1'), end='')
print(" sn: ", end='')
for byte in range(0, 12, 4):
x = self.__getSN(byte)
x = x[::-1] # reverse the bytes
print(binascii.hexlify(x).decode('Latin-1'), end='') # show user
print('')
except Exception:
# ignore bad character encodings
pass
if self.bl_rev >= 5:
des = self.__getCHIPDes()
if (len(des) == 2):
print("ChipDes:")
print(" family: %s" % des[0])
print(" revision: %s" % des[1])
print("Chip:")
if self.bl_rev > 4:
chip = self.__getCHIP()
mcu_id = chip & 0xfff
revs = {}
F4_IDS = {
0x413: "STM32F40x_41x",
0x419: "STM32F42x_43x",
0x421: "STM32F42x_446xx",
}
F7_IDS = {
0x449: "STM32F74x_75x",
0x451: "STM32F76x_77x",
}
H7_IDS = {
0x450: "STM32H74x_75x",
}
family = mcu_id & 0xfff
chip_s = "%x [unknown family/revision]" % (chip)
if family in F4_IDS:
mcu = F4_IDS[family]
MCU_REV_STM32F4_REV_A = 0x1000
MCU_REV_STM32F4_REV_Z = 0x1001
MCU_REV_STM32F4_REV_Y = 0x1003
MCU_REV_STM32F4_REV_1 = 0x1007
MCU_REV_STM32F4_REV_3 = 0x2001
revs = {
MCU_REV_STM32F4_REV_A: ("A", True),
MCU_REV_STM32F4_REV_Z: ("Z", True),
MCU_REV_STM32F4_REV_Y: ("Y", True),
MCU_REV_STM32F4_REV_1: ("1", True),
MCU_REV_STM32F4_REV_3: ("3", False),
}
rev = (chip & 0xFFFF0000) >> 16
if rev in revs:
(label, flawed) = revs[rev]
if flawed and family == 0x419:
print(" %x %s rev%s (flawed; 1M limit, see STM32F42XX Errata sheet sec. 2.1.10)" % (chip, mcu, label,))
elif family == 0x419:
print(" %x %s rev%s (no 1M flaw)" % (chip, mcu, label,))
else:
print(" %x %s rev%s" % (chip, mcu, label,))
elif family in F7_IDS:
print(" %s %08x" % (F7_IDS[family], chip))
elif family in H7_IDS:
print(" %s %08x" % (H7_IDS[family], chip))
else:
print(" [unavailable; bootloader too old]")
print("Info:")
print(" flash size: %u" % self.fw_maxsize)
name = self.board_name_for_board_id(self.board_type)
if name is not None:
print(" board_type: %u (%s)" % (self.board_type, name))
else:
print(" board_type: %u" % self.board_type)
print(" board_rev: %u" % self.board_rev)
print("Identification complete")
def board_name_for_board_id(self, board_id):
'''return name for board_id, None if it can't be found'''
shared_ids = {
9: "fmuv3",
50: "fmuv5",
}
if board_id in shared_ids:
return shared_ids[board_id]
try:
ret = []
hwdef_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "libraries", "AP_HAL_ChibiOS", "hwdef")
# uploader.py is swiped into other places, so if the dir
# doesn't exist then fail silently
if os.path.exists(hwdef_dir):
dirs = [x if (x not in ["scripts","common","STM32CubeConf"] and os.path.isdir(os.path.join(hwdef_dir, x))) else None for x in os.listdir(hwdef_dir)]
for adir in dirs:
if adir is None:
continue
filepath = os.path.join(hwdef_dir, adir, "hwdef.dat")
if not os.path.exists(filepath):
continue
fh = open(filepath)
if fh is None:
# print("Failed to open (%s)" % filepath)
continue
text = fh.readlines()
for line in text:
m = re.match("^\s*APJ_BOARD_ID\s+(\d+)\s*$", line)
if m is None:
continue
if int(m.group(1)) == board_id:
ret.append(adir)
if len(ret) == 0:
return None
return " or ".join(ret)
except Exception as e:
print("Failed to get name: %s" % str(e))
return None
# upload the firmware
def upload(self, fw, force=False, boot_delay=None):
# Make sure we are doing the right thing
if self.board_type != fw.property('board_id'):
# ID mismatch: check compatibility
incomp = True
if self.board_type in compatible_IDs:
comp_fw_id = compatible_IDs[self.board_type][0]
board_name = compatible_IDs[self.board_type][1]
if comp_fw_id == fw.property('board_id'):
msg = "Target %s (board_id: %d) is compatible with firmware for board_id=%u)" % (
board_name, self.board_type, fw.property('board_id'))
print("INFO: %s" % msg)
incomp = False
if incomp:
msg = "Firmware not suitable for this board (board_type=%u (%s) board_id=%u (%s))" % (
self.board_type,
self.board_name_for_board_id(self.board_type),
fw.property('board_id'),
self.board_name_for_board_id(fw.property('board_id')))
print("WARNING: %s" % msg)
if force:
print("FORCED WRITE, FLASHING ANYWAY!")
else:
raise IOError(msg)
self.dump_board_info()
if self.fw_maxsize < fw.property('image_size'):
raise RuntimeError("Firmware image is too large for this board")
if self.baudrate_bootloader_flash != self.baudrate_bootloader:
print("Setting baudrate to %u" % self.baudrate_bootloader_flash)
self.__setbaud(self.baudrate_bootloader_flash)
self.port.baudrate = self.baudrate_bootloader_flash
self.__sync()
self.__erase("Erase ")
self.__program("Program", fw)
if self.bl_rev == 2:
self.__verify_v2("Verify ", fw)
else:
self.__verify_v3("Verify ", fw)
if boot_delay is not None:
self.__set_boot_delay(boot_delay)
print("\nRebooting.\n")
self.__reboot()
self.port.close()
def __next_baud_flightstack(self):
self.baudrate_flightstack_idx = self.baudrate_flightstack_idx + 1
if self.baudrate_flightstack_idx >= len(self.baudrate_flightstack):
return False
try:
self.port.baudrate = self.baudrate_flightstack[self.baudrate_flightstack_idx]
except Exception:
return False
return True
def send_reboot(self):
if (not self.__next_baud_flightstack()):
return False
print("Attempting reboot on %s with baudrate=%d..." % (self.port.port, self.port.baudrate), file=sys.stderr)
print("If the board does not respond, unplug and re-plug the USB connector.", file=sys.stderr)
try:
# try MAVLINK command first
self.port.flush()
if self.MAVLINK_REBOOT_ID1 is not None:
self.__send(self.MAVLINK_REBOOT_ID1)
if self.MAVLINK_REBOOT_ID0 is not None:
self.__send(self.MAVLINK_REBOOT_ID0)
# then try reboot via NSH
self.__send(uploader.NSH_INIT)
self.__send(uploader.NSH_REBOOT_BL)
self.__send(uploader.NSH_INIT)
self.__send(uploader.NSH_REBOOT)
self.port.flush()
self.port.baudrate = self.baudrate_bootloader
except Exception:
try:
self.port.flush()
self.port.baudrate = self.baudrate_bootloader
except Exception:
pass
return True
# upload the firmware
def download(self, fw):
if self.baudrate_bootloader_flash != self.baudrate_bootloader:
print("Setting baudrate to %u" % self.baudrate_bootloader_flash)
self.__setbaud(self.baudrate_bootloader_flash)
self.port.baudrate = self.baudrate_bootloader_flash
self.__sync()
self.__download("Download", fw)
self.port.close()
def ports_to_try(args):
portlist = []
if args.port is None:
patterns = default_ports
else:
patterns = args.port.split(",")
# use glob to support wildcard ports. This allows the use of
# /dev/serial/by-id/usb-ArduPilot on Linux, which prevents the
# upload from causing modem hangups etc
if "linux" in _platform or "darwin" in _platform or "cygwin" in _platform:
import glob
for pattern in patterns:
portlist += glob.glob(pattern)
else:
portlist = patterns
# filter ports based on platform:
if "cygwin" in _platform:
# Cygwin, don't open MAC OS and Win ports, we are more like
# Linux. Cygwin needs to be before Windows test
pass
elif "darwin" in _platform:
# OS X, don't open Windows and Linux ports
portlist = [port for port in portlist if "COM" not in port and "ACM" not in port]
elif "win" in _platform:
# Windows, don't open POSIX ports
portlist = [port for port in portlist if "/" not in port]
return portlist
def modemmanager_check():
if os.path.exists("/usr/sbin/ModemManager"):
print("""
==========================================================================================================
WARNING: You should uninstall ModemManager as it conflicts with any non-modem serial device (like Pixhawk)
==========================================================================================================
""")
def find_bootloader(up, port):
while (True):
up.open()
# port is open, try talking to it
try:
# identify the bootloader
up.identify()
print("Found board %x,%x bootloader rev %x on %s" % (up.board_type, up.board_rev, up.bl_rev, port))
return True
except Exception as e:
pass
reboot_sent = up.send_reboot()
# wait for the reboot, without we might run into Serial I/O Error 5
time.sleep(0.25)
# always close the port
up.close()
# wait for the close, without we might run into Serial I/O Error 6
time.sleep(0.3)
if not reboot_sent:
return False
def main():
# Parse commandline arguments
parser = argparse.ArgumentParser(description="Firmware uploader for the PX autopilot system.")
parser.add_argument('--port', action="store", help="Comma-separated list of serial port(s) to which the FMU may be attached",
default=None)
parser.add_argument('--baud-bootloader', action="store", type=int, default=115200, help="Baud rate of the serial port (default is 115200) when communicating with bootloader, only required for true serial ports.")
parser.add_argument('--baud-bootloader-flash', action="store", type=int, default=None, help="Attempt to negotiate this baudrate with bootloader for flashing.")
parser.add_argument('--baud-flightstack', action="store", default="57600", help="Comma-separated list of baud rate of the serial port (default is 57600) when communicating with flight stack (Mavlink or NSH), only required for true serial ports.")
parser.add_argument('--force', action='store_true', default=False, help='Override board type check and continue loading')
parser.add_argument('--boot-delay', type=int, default=None, help='minimum boot delay to store in flash')
parser.add_argument('--target-system', type=int, action="store", help="System ID to update")
parser.add_argument('--target-component', type=int, action="store", help="Component ID to update")
parser.add_argument('--source-system', type=int, action="store", help="Source system to send reboot mavlink packets from", default=255)
parser.add_argument('--source-component', type=int, action="store", help="Source component to send reboot mavlink packets from", default=0)
parser.add_argument('--download', action='store_true', default=False, help='download firmware from board')
parser.add_argument('--identify', action="store_true", help="Do not flash firmware; simply dump information about board")
parser.add_argument('firmware', nargs="?", action="store", default=None, help="Firmware file to be uploaded")
args = parser.parse_args()
# warn people about ModemManager which interferes badly with Pixhawk
modemmanager_check()
if args.firmware is None and not args.identify:
parser.error("Firmware filename required for upload or download")
sys.exit(1)
# Load the firmware file
if not args.download and not args.identify:
fw = firmware(args.firmware)
print("Loaded firmware for %x,%x, size: %d bytes, waiting for the bootloader..." % (fw.property('board_id'), fw.property('board_revision'), fw.property('image_size')))
print("If the board does not respond within 1-2 seconds, unplug and re-plug the USB connector.")
baud_flightstack = [int(x) for x in args.baud_flightstack.split(',')]
# Spin waiting for a device to show up
try:
while True:
for port in ports_to_try(args):
# print("Trying %s" % port)
# create an uploader attached to the port
try:
up = uploader(port,
args.baud_bootloader,
baud_flightstack,
args.baud_bootloader_flash,
args.target_system,
args.target_component,
args.source_system,
args.source_component)
except Exception as e:
if not is_WSL:
# open failed, WSL must cycle through all ttyS* ports quickly but rate limit everything else
print("Exception creating uploader: %s" % str(e))
time.sleep(0.05)
# and loop to the next port
continue
if not find_bootloader(up, port):
# Go to the next port
continue
try:
# ok, we have a bootloader, try flashing it
if args.identify:
up.dump_board_info()
elif args.download:
up.download(args.firmware)
else:
up.upload(fw, force=args.force, boot_delay=args.boot_delay)
except RuntimeError as ex:
# print the error
print("\nERROR: %s" % ex.args)
except IOError:
up.close()
continue
finally:
# always close the port
up.close()
# we could loop here if we wanted to wait for more boards...
sys.exit(0)
# Delay retries to < 20 Hz to prevent spin-lock from hogging the CPU
time.sleep(0.05)
# CTRL+C aborts the upload/spin-lock by interrupt mechanics
except KeyboardInterrupt:
print("\n Upload aborted by user.")
sys.exit(0)
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| gpl-3.0 |
joonro/PyTables | tables/tests/test_tree.py | 6 | 37679 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import time
import tempfile
import warnings
import tables
from tables import Group, Leaf, Table, Array
from tables import StringCol, IntCol, Int16Col, FloatCol, Float32Col
from tables.tests import common
from tables.tests.common import unittest
from tables.tests.common import PyTablesTestCase as TestCase
# Test Record class
class Record(tables.IsDescription):
var1 = StringCol(itemsize=4) # 4-character String
var2 = IntCol() # integer
var3 = Int16Col() # short integer
var4 = FloatCol() # double (double-precision)
var5 = Float32Col() # float (single-precision)
class TreeTestCase(common.TempFileMixin, TestCase):
open_mode = "w"
title = "This is the table title"
expectedrows = 10
appendrows = 5
def setUp(self):
super(TreeTestCase, self).setUp()
# Create an instance of HDF5 Table
self.populateFile()
self.h5file.close()
def populateFile(self):
group = self.h5file.root
maxshort = 1 << 15
# maxint = 2147483647 # (2 ** 31 - 1)
for j in range(3):
# Create a table
table = self.h5file.create_table(group, 'table'+str(j), Record,
title=self.title,
filters=None,
expectedrows=self.expectedrows)
# Get the record object associated with the new table
d = table.row
# Fill the table
for i in xrange(self.expectedrows):
d['var1'] = '%04d' % (self.expectedrows - i)
d['var2'] = i
d['var3'] = i % maxshort
d['var4'] = float(i)
d['var5'] = float(i)
d.append() # This injects the Record values
# Flush the buffer for this table
table.flush()
# Create a couple of arrays in each group
var1List = [x['var1'] for x in table.iterrows()]
var4List = [x['var4'] for x in table.iterrows()]
self.h5file.create_array(group, 'var1', var1List, "1")
self.h5file.create_array(group, 'var4', var4List, "4")
# Create a new group (descendant of group)
group2 = self.h5file.create_group(group, 'group'+str(j))
# Iterate over this new group (group2)
group = group2
def test00_getNode(self):
"""Checking the File.get_node() with string node names"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_getNode..." % self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
nodelist = ['/', '/table0', '/group0/var1', '/group0/group1/var4']
nodenames = []
for node in nodelist:
object = self.h5file.get_node(node)
nodenames.append(object._v_pathname)
self.assertEqual(nodenames, nodelist)
if common.verbose:
print("get_node(pathname) test passed")
nodegroups = [
'/', '/group0', '/group0/group1', '/group0/group1/group2']
nodenames = ['var1', 'var4']
nodepaths = []
for group in nodegroups:
for name in nodenames:
try:
object = self.h5file.get_node(group, name)
except LookupError:
pass
else:
nodepaths.append(object._v_pathname)
self.assertEqual(nodepaths,
['/var1', '/var4',
'/group0/var1', '/group0/var4',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("get_node(groupname, name) test passed")
nodelist = ['/', '/group0', '/group0/group1', '/group0/group1/group2',
'/table0']
nodenames = []
groupobjects = []
# warnings.filterwarnings("error", category=UserWarning)
for node in nodelist:
try:
object = self.h5file.get_node(node, classname='Group')
except LookupError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next LookupError was catched!")
print(value)
else:
nodenames.append(object._v_pathname)
groupobjects.append(object)
self.assertEqual(nodenames,
['/', '/group0', '/group0/group1',
'/group0/group1/group2'])
if common.verbose:
print("get_node(groupname, classname='Group') test passed")
# Reset the warning
# warnings.filterwarnings("default", category=UserWarning)
nodenames = ['var1', 'var4']
nodearrays = []
for group in groupobjects:
for name in nodenames:
try:
object = self.h5file.get_node(group, name, 'Array')
except:
pass
else:
nodearrays.append(object._v_pathname)
self.assertEqual(nodearrays,
['/var1', '/var4',
'/group0/var1', '/group0/var4',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("get_node(groupobject, name, classname='Array') test passed")
def test01_getNodeClass(self):
"""Checking the File.get_node() with instances"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_getNodeClass..." %
self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
# This tree ways of get_node usage should return a table instance
table = self.h5file.get_node("/group0/table1")
self.assertTrue(isinstance(table, Table))
table = self.h5file.get_node("/group0", "table1")
self.assertTrue(isinstance(table, Table))
table = self.h5file.get_node(self.h5file.root.group0, "table1")
self.assertTrue(isinstance(table, Table))
# This should return an array instance
arr = self.h5file.get_node("/group0/var1")
self.assertTrue(isinstance(arr, Array))
self.assertTrue(isinstance(arr, Leaf))
# And this a Group
group = self.h5file.get_node("/group0", "group1", "Group")
self.assertTrue(isinstance(group, Group))
def test02_listNodes(self):
"""Checking the File.list_nodes() method"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_listNodes..." % self.__class__.__name__)
# Made the warnings to raise an error
# warnings.filterwarnings("error", category=UserWarning)
self.h5file = tables.open_file(self.h5fname, "r")
self.assertRaises(TypeError,
self.h5file.list_nodes, '/', 'NoSuchClass')
nodelist = ['/', '/group0', '/group0/table1', '/group0/group1/group2',
'/var1']
nodenames = []
objects = []
for node in nodelist:
try:
objectlist = self.h5file.list_nodes(node)
except:
pass
else:
objects.extend(objectlist)
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0', '/table0', '/var1', '/var4',
'/group0/group1', '/group0/table1',
'/group0/var1', '/group0/var4'])
if common.verbose:
print("list_nodes(pathname) test passed")
nodenames = []
for node in objects:
try:
objectlist = self.h5file.list_nodes(node)
except:
pass
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/group1', '/group0/table1',
'/group0/var1', '/group0/var4',
'/group0/group1/group2', '/group0/group1/table2',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("list_nodes(groupobject) test passed")
nodenames = []
for node in objects:
try:
objectlist = self.h5file.list_nodes(node, 'Leaf')
except TypeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next TypeError was catched!")
print(value)
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/table1',
'/group0/var1', '/group0/var4',
'/group0/group1/table2',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("list_nodes(groupobject, classname = 'Leaf') test passed")
nodenames = []
for node in objects:
try:
objectlist = self.h5file.list_nodes(node, 'Table')
except TypeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next TypeError was catched!")
print(value)
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/table1', '/group0/group1/table2'])
if common.verbose:
print("list_nodes(groupobject, classname = 'Table') test passed")
# Reset the warning
# warnings.filterwarnings("default", category=UserWarning)
def test02b_iterNodes(self):
"""Checking the File.iter_nodes() method"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02b_iterNodes..." % self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
self.assertRaises(TypeError,
self.h5file.list_nodes, '/', 'NoSuchClass')
nodelist = ['/', '/group0', '/group0/table1', '/group0/group1/group2',
'/var1']
nodenames = []
objects = []
for node in nodelist:
try:
objectlist = [o for o in self.h5file.iter_nodes(node)]
except:
pass
else:
objects.extend(objectlist)
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0', '/table0', '/var1', '/var4',
'/group0/group1', '/group0/table1',
'/group0/var1', '/group0/var4'])
if common.verbose:
print("iter_nodes(pathname) test passed")
nodenames = []
for node in objects:
try:
objectlist = [o for o in self.h5file.iter_nodes(node)]
except:
pass
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/group1', '/group0/table1',
'/group0/var1', '/group0/var4',
'/group0/group1/group2', '/group0/group1/table2',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("iter_nodes(groupobject) test passed")
nodenames = []
for node in objects:
try:
objectlist = [o for o in self.h5file.iter_nodes(node, 'Leaf')]
except TypeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next TypeError was catched!")
print(value)
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/table1',
'/group0/var1', '/group0/var4',
'/group0/group1/table2',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("iter_nodes(groupobject, classname = 'Leaf') test passed")
nodenames = []
for node in objects:
try:
objectlist = [o for o in self.h5file.iter_nodes(node, 'Table')]
except TypeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next TypeError was catched!")
print(value)
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/table1', '/group0/group1/table2'])
if common.verbose:
print("iter_nodes(groupobject, classname = 'Table') test passed")
# Reset the warning
# warnings.filterwarnings("default", category=UserWarning)
def test03_TraverseTree(self):
"""Checking the File.walk_groups() method"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test03_TraverseTree..." %
self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
groups = []
tables_ = []
arrays = []
for group in self.h5file.walk_groups():
groups.append(group._v_pathname)
for table in self.h5file.list_nodes(group, 'Table'):
tables_.append(table._v_pathname)
for arr in self.h5file.list_nodes(group, 'Array'):
arrays.append(arr._v_pathname)
self.assertEqual(groups,
["/", "/group0", "/group0/group1",
"/group0/group1/group2"])
self.assertEqual(
tables_,
["/table0", "/group0/table1", "/group0/group1/table2"])
self.assertEqual(arrays,
['/var1', '/var4',
'/group0/var1', '/group0/var4',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("walk_groups() test passed")
groups = []
tables_ = []
arrays = []
for group in self.h5file.walk_groups("/group0/group1"):
groups.append(group._v_pathname)
for table in self.h5file.list_nodes(group, 'Table'):
tables_.append(table._v_pathname)
for arr in self.h5file.list_nodes(group, 'Array'):
arrays.append(arr._v_pathname)
self.assertEqual(groups,
["/group0/group1", "/group0/group1/group2"])
self.assertEqual(tables_, ["/group0/group1/table2"])
self.assertEqual(arrays, [
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("walk_groups(pathname) test passed")
def test04_walkNodes(self):
"""Checking File.walk_nodes"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04_walkNodes..." % self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
self.assertRaises(TypeError,
self.h5file.walk_nodes('/', 'NoSuchClass').next)
groups = []
tables1 = []
tables2 = []
arrays = []
for group in self.h5file.walk_nodes(classname="Group"):
groups.append(group._v_pathname)
for table in group._f_iter_nodes(classname='Table'):
tables1.append(table._v_pathname)
# Test the recursivity
for table in self.h5file.root._f_walknodes('Table'):
tables2.append(table._v_pathname)
for arr in self.h5file.walk_nodes(classname='Array'):
arrays.append(arr._v_pathname)
self.assertEqual(groups,
["/", "/group0", "/group0/group1",
"/group0/group1/group2"])
self.assertEqual(tables1,
["/table0", "/group0/table1",
"/group0/group1/table2"])
self.assertEqual(tables2,
["/table0", "/group0/table1",
"/group0/group1/table2"])
self.assertEqual(arrays,
['/var1', '/var4',
'/group0/var1', '/group0/var4',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("File.__iter__() and Group.__iter__ test passed")
groups = []
tables_ = []
arrays = []
for group in self.h5file.walk_nodes("/group0/group1",
classname="Group"):
groups.append(group._v_pathname)
for table in group._f_walknodes('Table'):
tables_.append(table._v_pathname)
for arr in self.h5file.walk_nodes(group, 'Array'):
arrays.append(arr._v_pathname)
self.assertEqual(groups,
["/group0/group1", "/group0/group1/group2"])
self.assertEqual(tables_, ["/group0/group1/table2"])
self.assertEqual(arrays, [
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("walk_nodes(pathname, classname) test passed")
class DeepTreeTestCase(common.TempFileMixin, TestCase):
"""Checks for deep hierarchy levels in PyTables trees."""
def setUp(self):
super(DeepTreeTestCase, self).setUp()
# Here we put a more conservative limit to deal with more platforms
# With maxdepth = 64 this test would take less than 40 MB
# of main memory to run, which is quite reasonable nowadays.
# With maxdepth = 1024 this test will take around 300 MB.
if common.heavy:
self.maxdepth = 256 # Takes around 60 MB of memory!
else:
self.maxdepth = 64 # This should be safe for most machines
if common.verbose:
print("Maximum depth tested :", self.maxdepth)
# Open a new empty HDF5 file
group = self.h5file.root
if common.verbose:
print("Depth writing progress: ", end=' ')
# Iterate until maxdepth
for depth in range(self.maxdepth):
# Save it on the HDF5 file
if common.verbose:
print("%3d," % (depth), end=' ')
# Create a couple of arrays here
self.h5file.create_array(
group, 'array', [1, 1], "depth: %d" % depth)
self.h5file.create_array(
group, 'array2', [1, 1], "depth: %d" % depth)
# And also a group
self.h5file.create_group(group, 'group2_' + str(depth))
# Finally, iterate over a new group
group = self.h5file.create_group(group, 'group' + str(depth))
# Close the file
self.h5file.close()
def _check_tree(self, filename):
# Open the previous HDF5 file in read-only mode
with tables.open_file(filename, mode="r") as h5file:
group = h5file.root
if common.verbose:
print("\nDepth reading progress: ", end=' ')
# Get the metadata on the previosly saved arrays
for depth in range(self.maxdepth):
if common.verbose:
print("%3d," % (depth), end=' ')
# Check the contents
self.assertEqual(group.array[:], [1, 1])
self.assertTrue("array2" in group)
self.assertTrue("group2_"+str(depth) in group)
# Iterate over the next group
group = h5file.get_node(group, 'group' + str(depth))
if common.verbose:
print() # This flush the stdout buffer
def test00_deepTree(self):
"""Creation of a large depth object tree."""
self._check_tree(self.h5fname)
def test01a_copyDeepTree(self):
"""Copy of a large depth object tree."""
self.h5file = tables.open_file(self.h5fname, mode="r")
h5fname2 = tempfile.mktemp(".h5")
try:
with tables.open_file(h5fname2, mode="w") as h5file2:
if common.verbose:
print("\nCopying deep tree...")
self.h5file.copy_node(self.h5file.root, h5file2.root,
recursive=True)
self.h5file.close()
self._check_tree(h5fname2)
finally:
if os.path.exists(h5fname2):
os.remove(h5fname2)
def test01b_copyDeepTree(self):
"""Copy of a large depth object tree with small node cache."""
self.h5file = tables.open_file(self.h5fname, mode="r",
node_cache_slots=10)
h5fname2 = tempfile.mktemp(".h5")
try:
with tables.open_file(h5fname2, mode="w",
node_cache_slots=10) as h5file2:
if common.verbose:
print("\nCopying deep tree...")
self.h5file.copy_node(self.h5file.root, h5file2.root,
recursive=True)
self.h5file.close()
self._check_tree(h5fname2)
finally:
if os.path.exists(h5fname2):
os.remove(h5fname2)
def test01c_copyDeepTree(self):
"""Copy of a large depth object tree with no node cache."""
self.h5file = tables.open_file(self.h5fname, mode="r",
node_cache_slots=0)
h5fname2 = tempfile.mktemp(".h5")
try:
with tables.open_file(h5fname2, mode="w",
node_cache_slots=0) as h5file2:
if common.verbose:
print("\nCopying deep tree...")
self.h5file.copy_node(self.h5file.root, h5file2.root,
recursive=True)
self.h5file.close()
self._check_tree(h5fname2)
finally:
if os.path.exists(h5fname2):
os.remove(h5fname2)
@unittest.skipUnless(common.heavy, 'only in heavy mode')
def test01d_copyDeepTree(self):
"""Copy of a large depth object tree with static node cache."""
self.h5file = tables.open_file(self.h5fname, mode="r",
node_cache_slots=-256)
h5fname2 = tempfile.mktemp(".h5")
try:
with tables.open_file(h5fname2, mode="w",
node_cache_slots=-256) as h5file2:
if common.verbose:
print("\nCopying deep tree...")
self.h5file.copy_node(self.h5file.root, h5file2.root,
recursive=True)
self.h5file.close()
self._check_tree(h5fname2)
finally:
if os.path.exists(h5fname2):
os.remove(h5fname2)
class WideTreeTestCase(common.TempFileMixin, TestCase):
"""Checks for maximum number of children for a Group."""
def test00_Leafs(self):
"""Checking creation of large number of leafs (1024) per group.
Variable 'maxchildren' controls this check. PyTables support up
to 4096 children per group, but this would take too much memory
(up to 64 MB) for testing purposes (may be we can add a test for
big platforms). A 1024 children run takes up to 30 MB. A 512
children test takes around 25 MB.
"""
if common.heavy:
maxchildren = 4096
else:
maxchildren = 256
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_wideTree..." %
self.__class__.__name__)
print("Maximum number of children tested :", maxchildren)
a = [1, 1]
if common.verbose:
print("Children writing progress: ", end=' ')
for child in range(maxchildren):
if common.verbose:
print("%3d," % (child), end=' ')
self.h5file.create_array(self.h5file.root, 'array' + str(child),
a, "child: %d" % child)
if common.verbose:
print()
t1 = time.time()
a = [1, 1]
# Open the previous HDF5 file in read-only mode
self._reopen()
if common.verbose:
print("\nTime spent opening a file with %d arrays: %s s" %
(maxchildren, time.time()-t1))
print("\nChildren reading progress: ", end=' ')
# Get the metadata on the previosly saved arrays
for child in range(maxchildren):
if common.verbose:
print("%3d," % (child), end=' ')
# Create an array for later comparison
# Get the actual array
array_ = getattr(self.h5file.root, 'array' + str(child))
b = array_.read()
# Arrays a and b must be equal
self.assertEqual(a, b)
if common.verbose:
print() # This flush the stdout buffer
def test01_wideTree(self):
"""Checking creation of large number of groups (1024) per group.
Variable 'maxchildren' controls this check. PyTables support up
to 4096 children per group, but this would take too much memory
(up to 64 MB) for testing purposes (may be we can add a test for
big platforms). A 1024 children run takes up to 30 MB. A 512
children test takes around 25 MB.
"""
if common.heavy:
# for big platforms!
maxchildren = 4096
else:
# for standard platforms
maxchildren = 256
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_wideTree..." %
self.__class__.__name__)
print("Maximum number of children tested :", maxchildren)
if common.verbose:
print("Children writing progress: ", end=' ')
for child in range(maxchildren):
if common.verbose:
print("%3d," % (child), end=' ')
self.h5file.create_group(self.h5file.root, 'group' + str(child),
"child: %d" % child)
if common.verbose:
print()
t1 = time.time()
# Open the previous HDF5 file in read-only mode
self._reopen()
if common.verbose:
print("\nTime spent opening a file with %d groups: %s s" %
(maxchildren, time.time()-t1))
print("\nChildren reading progress: ", end=' ')
# Get the metadata on the previosly saved arrays
for child in range(maxchildren):
if common.verbose:
print("%3d," % (child), end=' ')
# Get the actual group
group = getattr(self.h5file.root, 'group' + str(child))
# Arrays a and b must be equal
self.assertEqual(group._v_title, "child: %d" % child)
if common.verbose:
print() # This flush the stdout buffer
class HiddenTreeTestCase(common.TempFileMixin, TestCase):
"""Check for hidden groups, leaves and hierarchies."""
def setUp(self):
super(HiddenTreeTestCase, self).setUp()
self.visible = [] # list of visible object paths
self.hidden = [] # list of hidden object paths
# Create some visible nodes: a, g, g/a1, g/a2, g/g, g/g/a.
h5f = self.h5file
h5f.create_array('/', 'a', [0])
g = h5f.create_group('/', 'g')
h5f.create_array(g, 'a1', [0])
h5f.create_array(g, 'a2', [0])
g_g = h5f.create_group(g, 'g')
h5f.create_array(g_g, 'a', [0])
self.visible.extend(['/a', '/g', '/g/a1', '/g/a2', '/g/g', '/g/g/a'])
# Create some hidden nodes: _p_a, _p_g, _p_g/a, _p_g/_p_a, g/_p_a.
h5f.create_array('/', '_p_a', [0])
hg = h5f.create_group('/', '_p_g')
h5f.create_array(hg, 'a', [0])
h5f.create_array(hg, '_p_a', [0])
h5f.create_array(g, '_p_a', [0])
self.hidden.extend(
['/_p_a', '/_p_g', '/_p_g/a', '/_p_g/_p_a', '/g/_p_a'])
# The test behind commented out because the .objects dictionary
# has been removed (as well as .leaves and .groups)
def _test00_objects(self):
"""Absence of hidden nodes in `File.objects`."""
objects = self.h5file.objects
warnings.filterwarnings('ignore', category=DeprecationWarning)
for vpath in self.visible:
self.assertTrue(
vpath in objects,
"Missing visible node ``%s`` from ``File.objects``." % vpath)
for hpath in self.hidden:
self.assertTrue(
hpath not in objects,
"Found hidden node ``%s`` in ``File.objects``." % hpath)
warnings.filterwarnings('default', category=DeprecationWarning)
# The test behind commented out because the .objects dictionary
# has been removed (as well as .leaves and .groups)
def _test00b_objects(self):
"""Object dictionaries conformance with ``walk_nodes()``."""
def dictCheck(dictName, classname):
file_ = self.h5file
objects = getattr(file_, dictName)
walkPaths = [node._v_pathname
for node in file_.walk_nodes('/', classname)]
dictPaths = [path for path in objects]
walkPaths.sort()
dictPaths.sort()
self.assertEqual(
walkPaths, dictPaths,
"nodes in ``%s`` do not match those from ``walk_nodes()``"
% dictName)
self.assertEqual(
len(walkPaths), len(objects),
"length of ``%s`` differs from that of ``walk_nodes()``"
% dictName)
warnings.filterwarnings('ignore', category=DeprecationWarning)
dictCheck('objects', None)
dictCheck('groups', 'Group')
dictCheck('leaves', 'Leaf')
warnings.filterwarnings('default', category=DeprecationWarning)
def test01_getNode(self):
"""Node availability via `File.get_node()`."""
h5f = self.h5file
for vpath in self.visible:
h5f.get_node(vpath)
for hpath in self.hidden:
h5f.get_node(hpath)
def test02_walkGroups(self):
"""Hidden group absence in `File.walk_groups()`."""
hidden = self.hidden
for group in self.h5file.walk_groups('/'):
pathname = group._v_pathname
self.assertTrue(pathname not in hidden,
"Walked across hidden group ``%s``." % pathname)
def test03_walkNodes(self):
"""Hidden node absence in `File.walk_nodes()`."""
hidden = self.hidden
for node in self.h5file.walk_nodes('/'):
pathname = node._v_pathname
self.assertTrue(pathname not in hidden,
"Walked across hidden node ``%s``." % pathname)
def test04_listNodesVisible(self):
"""Listing visible nodes under a visible group (list_nodes)."""
hidden = self.hidden
for node in self.h5file.list_nodes('/g'):
pathname = node._v_pathname
self.assertTrue(pathname not in hidden,
"Listed hidden node ``%s``." % pathname)
def test04b_listNodesVisible(self):
"""Listing visible nodes under a visible group (iter_nodes)."""
hidden = self.hidden
for node in self.h5file.iter_nodes('/g'):
pathname = node._v_pathname
self.assertTrue(pathname not in hidden,
"Listed hidden node ``%s``." % pathname)
def test05_listNodesHidden(self):
"""Listing visible nodes under a hidden group (list_nodes)."""
hidden = self.hidden
node_to_find = '/_p_g/a'
found_node = False
for node in self.h5file.list_nodes('/_p_g'):
pathname = node._v_pathname
if pathname == node_to_find:
found_node = True
self.assertTrue(pathname in hidden,
"Listed hidden node ``%s``." % pathname)
self.assertTrue(found_node,
"Hidden node ``%s`` was not listed." % node_to_find)
def test05b_iterNodesHidden(self):
"""Listing visible nodes under a hidden group (iter_nodes)."""
hidden = self.hidden
node_to_find = '/_p_g/a'
found_node = False
for node in self.h5file.iter_nodes('/_p_g'):
pathname = node._v_pathname
if pathname == node_to_find:
found_node = True
self.assertTrue(pathname in hidden,
"Listed hidden node ``%s``." % pathname)
self.assertTrue(found_node,
"Hidden node ``%s`` was not listed." % node_to_find)
# The test behind commented out because the .objects dictionary
# has been removed (as well as .leaves and .groups)
def _test06_reopen(self):
"""Reopening a file with hidden nodes."""
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
self.test00_objects()
def test07_move(self):
"""Moving a node between hidden and visible groups."""
is_visible_node = self.h5file.is_visible_node
self.assertFalse(is_visible_node('/_p_g/a'))
self.h5file.move_node('/_p_g/a', '/g', 'a')
self.assertTrue(is_visible_node('/g/a'))
self.h5file.move_node('/g/a', '/_p_g', 'a')
self.assertFalse(is_visible_node('/_p_g/a'))
def test08_remove(self):
"""Removing a visible group with hidden children."""
self.assertTrue('/g/_p_a' in self.h5file)
self.h5file.root.g._f_remove(recursive=True)
self.assertFalse('/g/_p_a' in self.h5file)
class CreateParentsTestCase(common.TempFileMixin, TestCase):
"""Test the ``createparents`` flag.
These are mainly for the user interface. More thorough tests on the
workings of the flag can be found in the ``test_do_undo.py`` module.
"""
filters = tables.Filters(complevel=4) # simply non-default
def setUp(self):
super(CreateParentsTestCase, self).setUp()
self.h5file.create_array('/', 'array', [1])
self.h5file.create_group('/', 'group', filters=self.filters)
def test00_parentType(self):
"""Using the right type of parent node argument."""
h5file, root = self.h5file, self.h5file.root
self.assertRaises(TypeError, h5file.create_array,
root.group, 'arr', [1], createparents=True)
self.assertRaises(TypeError, h5file.copy_node,
'/array', root.group, createparents=True)
self.assertRaises(TypeError, h5file.move_node,
'/array', root.group, createparents=True)
self.assertRaises(TypeError, h5file.copy_children,
'/group', root, createparents=True)
def test01_inside(self):
"""Placing a node inside a nonexistent child of itself."""
self.assertRaises(tables.NodeError, self.h5file.move_node,
'/group', '/group/foo/bar',
createparents=True)
self.assertFalse('/group/foo' in self.h5file)
self.assertRaises(tables.NodeError, self.h5file.copy_node,
'/group', '/group/foo/bar',
recursive=True, createparents=True)
self.assertFalse('/group/foo' in self.h5fname)
def test02_filters(self):
"""Propagating the filters of created parent groups."""
self.h5file.create_group('/group/foo/bar', 'baz', createparents=True)
self.assertTrue('/group/foo/bar/baz' in self.h5file)
for group in self.h5file.walk_groups('/group'):
self.assertEqual(self.filters, group._v_filters)
def suite():
theSuite = unittest.TestSuite()
# This counter is useful when detecting memory leaks
niter = 1
for i in range(niter):
theSuite.addTest(unittest.makeSuite(TreeTestCase))
theSuite.addTest(unittest.makeSuite(DeepTreeTestCase))
theSuite.addTest(unittest.makeSuite(WideTreeTestCase))
theSuite.addTest(unittest.makeSuite(HiddenTreeTestCase))
theSuite.addTest(unittest.makeSuite(CreateParentsTestCase))
return theSuite
if __name__ == '__main__':
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
| bsd-3-clause |
mdanielwork/intellij-community | python/helpers/py2only/docutils/parsers/rst/languages/nl.py | 128 | 3708 | # $Id: nl.py 7119 2011-09-02 13:00:23Z milde $
# Author: Martijn Pieters <mjpieters@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Dutch-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attentie': 'attention',
'let-op': 'caution',
'code (translation required)': 'code',
'gevaar': 'danger',
'fout': 'error',
'hint': 'hint',
'belangrijk': 'important',
'opmerking': 'note',
'tip': 'tip',
'waarschuwing': 'warning',
'aanmaning': 'admonition',
'katern': 'sidebar',
'onderwerp': 'topic',
'lijn-blok': 'line-block',
'letterlijk-ontleed': 'parsed-literal',
'rubriek': 'rubric',
'opschrift': 'epigraph',
'hoogtepunten': 'highlights',
'pull-quote': 'pull-quote', # Dutch printers use the english term
'samenstelling': 'compound',
'verbinding': 'compound',
u'container (translation required)': 'container',
#'vragen': 'questions',
'tabel': 'table',
'csv-tabel': 'csv-table',
'lijst-tabel': 'list-table',
#'veelgestelde-vragen': 'questions',
'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
'beeld': 'image',
'figuur': 'figure',
'opnemen': 'include',
'onbewerkt': 'raw',
'vervang': 'replace',
'vervanging': 'replace',
'unicode': 'unicode',
'datum': 'date',
'klasse': 'class',
'rol': 'role',
u'default-role (translation required)': 'default-role',
'title (translation required)': 'title',
'inhoud': 'contents',
'sectnum': 'sectnum',
'sectie-nummering': 'sectnum',
'hoofdstuk-nummering': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'voetnoten': 'footnotes',
#'citaten': 'citations',
'verwijzing-voetnoten': 'target-notes',
'restructuredtext-test-instructie': 'restructuredtext-test-directive'}
"""Dutch name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'afkorting': 'abbreviation',
# 'ab': 'abbreviation',
'acroniem': 'acronym',
'ac': 'acronym',
u'code (translation required)': 'code',
'index': 'index',
'i': 'index',
'inferieur': 'subscript',
'inf': 'subscript',
'superieur': 'superscript',
'sup': 'superscript',
'titel-referentie': 'title-reference',
'titel': 'title-reference',
't': 'title-reference',
'pep-referentie': 'pep-reference',
'pep': 'pep-reference',
'rfc-referentie': 'rfc-reference',
'rfc': 'rfc-reference',
'nadruk': 'emphasis',
'extra': 'strong',
'extra-nadruk': 'strong',
'vet': 'strong',
'letterlijk': 'literal',
'math (translation required)': 'math',
'benoemde-referentie': 'named-reference',
'anonieme-referentie': 'anonymous-reference',
'voetnoot-referentie': 'footnote-reference',
'citaat-referentie': 'citation-reference',
'substitie-reference': 'substitution-reference',
'verwijzing': 'target',
'uri-referentie': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'onbewerkt': 'raw',}
"""Mapping of Dutch role names to canonical role names for interpreted text.
"""
| apache-2.0 |
chouseknecht/ansible | lib/ansible/module_utils/vmware.py | 2 | 56971 | # -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, James E. King III (@jeking3) <jking@apache.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import atexit
import ansible.module_utils.common._collections_compat as collections_compat
import json
import os
import re
import ssl
import time
import traceback
from random import randint
from distutils.version import StrictVersion
REQUESTS_IMP_ERR = None
try:
# requests is required for exception handling of the ConnectionError
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
PYVMOMI_IMP_ERR = None
try:
from pyVim import connect
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
HAS_PYVMOMIJSON = hasattr(VmomiSupport, 'VmomiJSONEncoder')
except ImportError:
PYVMOMI_IMP_ERR = traceback.format_exc()
HAS_PYVMOMI = False
HAS_PYVMOMIJSON = False
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.six import integer_types, iteritems, string_types, raise_from
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.basic import env_fallback, missing_required_lib
from ansible.module_utils.urls import generic_urlparse
class TaskError(Exception):
def __init__(self, *args, **kwargs):
super(TaskError, self).__init__(*args, **kwargs)
def wait_for_task(task, max_backoff=64, timeout=3600):
"""Wait for given task using exponential back-off algorithm.
Args:
task: VMware task object
max_backoff: Maximum amount of sleep time in seconds
timeout: Timeout for the given task in seconds
Returns: Tuple with True and result for successful task
Raises: TaskError on failure
"""
failure_counter = 0
start_time = time.time()
while True:
if time.time() - start_time >= timeout:
raise TaskError("Timeout")
if task.info.state == vim.TaskInfo.State.success:
return True, task.info.result
if task.info.state == vim.TaskInfo.State.error:
error_msg = task.info.error
host_thumbprint = None
try:
error_msg = error_msg.msg
if hasattr(task.info.error, 'thumbprint'):
host_thumbprint = task.info.error.thumbprint
except AttributeError:
pass
finally:
raise_from(TaskError(error_msg, host_thumbprint), task.info.error)
if task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:
sleep_time = min(2 ** failure_counter + randint(1, 1000) / 1000, max_backoff)
time.sleep(sleep_time)
failure_counter += 1
def wait_for_vm_ip(content, vm, timeout=300):
facts = dict()
interval = 15
while timeout > 0:
_facts = gather_vm_facts(content, vm)
if _facts['ipv4'] or _facts['ipv6']:
facts = _facts
break
time.sleep(interval)
timeout -= interval
return facts
def find_obj(content, vimtype, name, first=True, folder=None):
container = content.viewManager.CreateContainerView(folder or content.rootFolder, recursive=True, type=vimtype)
# Get all objects matching type (and name if given)
obj_list = [obj for obj in container.view if not name or to_text(obj.name) == to_text(name)]
container.Destroy()
# Return first match or None
if first:
if obj_list:
return obj_list[0]
return None
# Return all matching objects or empty list
return obj_list
def find_dvspg_by_name(dv_switch, portgroup_name):
portgroups = dv_switch.portgroup
for pg in portgroups:
if pg.name == portgroup_name:
return pg
return None
def find_object_by_name(content, name, obj_type, folder=None, recurse=True):
if not isinstance(obj_type, list):
obj_type = [obj_type]
objects = get_all_objs(content, obj_type, folder=folder, recurse=recurse)
for obj in objects:
if obj.name == name:
return obj
return None
def find_cluster_by_name(content, cluster_name, datacenter=None):
if datacenter:
folder = datacenter.hostFolder
else:
folder = content.rootFolder
return find_object_by_name(content, cluster_name, [vim.ClusterComputeResource], folder=folder)
def find_datacenter_by_name(content, datacenter_name):
return find_object_by_name(content, datacenter_name, [vim.Datacenter])
def get_parent_datacenter(obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
return datacenter
def find_datastore_by_name(content, datastore_name):
return find_object_by_name(content, datastore_name, [vim.Datastore])
def find_dvs_by_name(content, switch_name, folder=None):
return find_object_by_name(content, switch_name, [vim.DistributedVirtualSwitch], folder=folder)
def find_hostsystem_by_name(content, hostname):
return find_object_by_name(content, hostname, [vim.HostSystem])
def find_resource_pool_by_name(content, resource_pool_name):
return find_object_by_name(content, resource_pool_name, [vim.ResourcePool])
def find_network_by_name(content, network_name):
return find_object_by_name(content, network_name, [vim.Network])
def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None,
cluster=None, folder=None, match_first=False):
""" UUID is unique to a VM, every other id returns the first match. """
si = content.searchIndex
vm = None
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'uuid':
# Search By BIOS UUID rather than instance UUID
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True)
elif vm_id_type == 'instance_uuid':
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=True, uuid=vm_id, vmSearch=True)
elif vm_id_type == 'ip':
vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
elif vm_id_type == 'vm_name':
folder = None
if cluster:
folder = cluster
elif datacenter:
folder = datacenter.hostFolder
vm = find_vm_by_name(content, vm_id, folder)
elif vm_id_type == 'inventory_path':
searchpath = folder
# get all objects for this path
f_obj = si.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == vm_id:
vm = c_obj
if match_first:
break
return vm
def find_vm_by_name(content, vm_name, folder=None, recurse=True):
return find_object_by_name(content, vm_name, [vim.VirtualMachine], folder=folder, recurse=recurse)
def find_host_portgroup_by_name(host, portgroup_name):
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return None
def compile_folder_path_for_object(vobj):
""" make a /vm/foo/bar/baz like folder path for an object """
paths = []
if isinstance(vobj, vim.Folder):
paths.append(vobj.name)
thisobj = vobj
while hasattr(thisobj, 'parent'):
thisobj = thisobj.parent
try:
moid = thisobj._moId
except AttributeError:
moid = None
if moid in ['group-d1', 'ha-folder-root']:
break
if isinstance(thisobj, vim.Folder):
paths.append(thisobj.name)
paths.reverse()
return '/' + '/'.join(paths)
def _get_vm_prop(vm, attributes):
"""Safely get a property or return None"""
result = vm
for attribute in attributes:
try:
result = getattr(result, attribute)
except (AttributeError, IndexError):
return None
return result
def gather_vm_facts(content, vm):
""" Gather facts from vim.VirtualMachine object. """
facts = {
'module_hw': True,
'hw_name': vm.config.name,
'hw_power_status': vm.summary.runtime.powerState,
'hw_guest_full_name': vm.summary.guest.guestFullName,
'hw_guest_id': vm.summary.guest.guestId,
'hw_product_uuid': vm.config.uuid,
'hw_processor_count': vm.config.hardware.numCPU,
'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket,
'hw_memtotal_mb': vm.config.hardware.memoryMB,
'hw_interfaces': [],
'hw_datastores': [],
'hw_files': [],
'hw_esxi_host': None,
'hw_guest_ha_state': None,
'hw_is_template': vm.config.template,
'hw_folder': None,
'hw_version': vm.config.version,
'instance_uuid': vm.config.instanceUuid,
'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
'guest_question': vm.summary.runtime.question,
'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded,
'ipv4': None,
'ipv6': None,
'annotation': vm.config.annotation,
'customvalues': {},
'snapshots': [],
'current_snapshot': None,
'vnc': {},
'moid': vm._moId,
'vimref': "vim.VirtualMachine:%s" % vm._moId,
}
# facts that may or may not exist
if vm.summary.runtime.host:
try:
host = vm.summary.runtime.host
facts['hw_esxi_host'] = host.summary.config.name
facts['hw_cluster'] = host.parent.name if host.parent and isinstance(host.parent, vim.ClusterComputeResource) else None
except vim.fault.NoPermission:
# User does not have read permission for the host system,
# proceed without this value. This value does not contribute or hamper
# provisioning or power management operations.
pass
if vm.summary.runtime.dasVmProtection:
facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected
datastores = vm.datastore
for ds in datastores:
facts['hw_datastores'].append(ds.info.name)
try:
files = vm.config.files
layout = vm.layout
if files:
facts['hw_files'] = [files.vmPathName]
for item in layout.snapshot:
for snap in item.snapshotFile:
if 'vmsn' in snap:
facts['hw_files'].append(snap)
for item in layout.configFile:
facts['hw_files'].append(os.path.join(os.path.dirname(files.vmPathName), item))
for item in vm.layout.logFile:
facts['hw_files'].append(os.path.join(files.logDirectory, item))
for item in vm.layout.disk:
for disk in item.diskFile:
facts['hw_files'].append(disk)
except Exception:
pass
facts['hw_folder'] = PyVmomi.get_vm_path(content, vm)
cfm = content.customFieldsManager
# Resolve custom values
for value_obj in vm.summary.customValue:
kn = value_obj.key
if cfm is not None and cfm.field:
for f in cfm.field:
if f.key == value_obj.key:
kn = f.name
# Exit the loop immediately, we found it
break
facts['customvalues'][kn] = value_obj.value
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = list(device.ipAddress)
if vm.guest.ipAddress:
if ':' in vm.guest.ipAddress:
facts['ipv6'] = vm.guest.ipAddress
else:
facts['ipv4'] = vm.guest.ipAddress
ethernet_idx = 0
for entry in vm.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
if entry.macAddress:
mac_addr = entry.macAddress
mac_addr_dash = mac_addr.replace(':', '-')
else:
mac_addr = mac_addr_dash = None
if (hasattr(entry, 'backing') and hasattr(entry.backing, 'port') and
hasattr(entry.backing.port, 'portKey') and hasattr(entry.backing.port, 'portgroupKey')):
port_group_key = entry.backing.port.portgroupKey
port_key = entry.backing.port.portKey
else:
port_group_key = None
port_key = None
factname = 'hw_eth' + str(ethernet_idx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': mac_addr,
'ipaddresses': net_dict.get(entry.macAddress, None),
'macaddress_dash': mac_addr_dash,
'summary': entry.deviceInfo.summary,
'portgroup_portkey': port_key,
'portgroup_key': port_group_key,
}
facts['hw_interfaces'].append('eth' + str(ethernet_idx))
ethernet_idx += 1
snapshot_facts = list_snapshots(vm)
if 'snapshots' in snapshot_facts:
facts['snapshots'] = snapshot_facts['snapshots']
facts['current_snapshot'] = snapshot_facts['current_snapshot']
facts['vnc'] = get_vnc_extraconfig(vm)
return facts
def deserialize_snapshot_obj(obj):
return {'id': obj.id,
'name': obj.name,
'description': obj.description,
'creation_time': obj.createTime,
'state': obj.state}
def list_snapshots_recursively(snapshots):
snapshot_data = []
for snapshot in snapshots:
snapshot_data.append(deserialize_snapshot_obj(snapshot))
snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
return snapshot_data
def get_current_snap_obj(snapshots, snapob):
snap_obj = []
for snapshot in snapshots:
if snapshot.snapshot == snapob:
snap_obj.append(snapshot)
snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
return snap_obj
def list_snapshots(vm):
result = {}
snapshot = _get_vm_prop(vm, ('snapshot',))
if not snapshot:
return result
if vm.snapshot is None:
return result
result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
current_snapref = vm.snapshot.currentSnapshot
current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
if current_snap_obj:
result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
else:
result['current_snapshot'] = dict()
return result
def get_vnc_extraconfig(vm):
result = {}
for opts in vm.config.extraConfig:
for optkeyname in ['enabled', 'ip', 'port', 'password']:
if opts.key.lower() == "remotedisplay.vnc." + optkeyname:
result[optkeyname] = opts.value
return result
def vmware_argument_spec():
return dict(
hostname=dict(type='str',
required=False,
fallback=(env_fallback, ['VMWARE_HOST']),
),
username=dict(type='str',
aliases=['user', 'admin'],
required=False,
fallback=(env_fallback, ['VMWARE_USER'])),
password=dict(type='str',
aliases=['pass', 'pwd'],
required=False,
no_log=True,
fallback=(env_fallback, ['VMWARE_PASSWORD'])),
port=dict(type='int',
default=443,
fallback=(env_fallback, ['VMWARE_PORT'])),
validate_certs=dict(type='bool',
required=False,
default=True,
fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS'])
),
proxy_host=dict(type='str',
required=False,
default=None,
fallback=(env_fallback, ['VMWARE_PROXY_HOST'])),
proxy_port=dict(type='int',
required=False,
default=None,
fallback=(env_fallback, ['VMWARE_PROXY_PORT'])),
)
def connect_to_api(module, disconnect_atexit=True, return_si=False):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
port = module.params.get('port', 443)
validate_certs = module.params['validate_certs']
if not hostname:
module.fail_json(msg="Hostname parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'")
if not username:
module.fail_json(msg="Username parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_USER=ESXI_USERNAME'")
if not password:
module.fail_json(msg="Password parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'")
if validate_certs and not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or use validate_certs=false.')
elif validate_certs:
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.check_hostname = True
ssl_context.load_default_certs()
elif hasattr(ssl, 'SSLContext'):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
ssl_context.check_hostname = False
else: # Python < 2.7.9 or RHEL/Centos < 7.4
ssl_context = None
service_instance = None
proxy_host = module.params.get('proxy_host')
proxy_port = module.params.get('proxy_port')
connect_args = dict(
host=hostname,
port=port,
)
if ssl_context:
connect_args.update(sslContext=ssl_context)
msg_suffix = ''
try:
if proxy_host:
msg_suffix = " [proxy: %s:%d]" % (proxy_host, proxy_port)
connect_args.update(httpProxyHost=proxy_host, httpProxyPort=proxy_port)
smart_stub = connect.SmartStubAdapter(**connect_args)
session_stub = connect.VimSessionOrientedStub(smart_stub, connect.VimSessionOrientedStub.makeUserLoginMethod(username, password))
service_instance = vim.ServiceInstance('ServiceInstance', session_stub)
else:
connect_args.update(user=username, pwd=password)
service_instance = connect.SmartConnect(**connect_args)
except vim.fault.InvalidLogin as invalid_login:
msg = "Unable to log on to vCenter or ESXi API at %s:%s " % (hostname, port)
module.fail_json(msg="%s as %s: %s" % (msg, username, invalid_login.msg) + msg_suffix)
except vim.fault.NoPermission as no_permission:
module.fail_json(msg="User %s does not have required permission"
" to log on to vCenter or ESXi API at %s:%s : %s" % (username, hostname, port, no_permission.msg))
except (requests.ConnectionError, ssl.SSLError) as generic_req_exc:
module.fail_json(msg="Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (hostname, port, generic_req_exc))
except vmodl.fault.InvalidRequest as invalid_request:
# Request is malformed
msg = "Failed to get a response from server %s:%s " % (hostname, port)
module.fail_json(msg="%s as request is malformed: %s" % (msg, invalid_request.msg) + msg_suffix)
except Exception as generic_exc:
msg = "Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port) + msg_suffix
module.fail_json(msg="%s : %s" % (msg, generic_exc))
if service_instance is None:
msg = "Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port)
module.fail_json(msg=msg + msg_suffix)
# Disabling atexit should be used in special cases only.
# Such as IP change of the ESXi host which removes the connection anyway.
# Also removal significantly speeds up the return of the module
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
if return_si:
return service_instance, service_instance.RetrieveContent()
return service_instance.RetrieveContent()
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
try:
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
pm = content.guestOperationsManager.processManager
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
ps = vim.vm.guest.ProcessManager.ProgramSpec(
# programPath=program,
# arguments=args
programPath=program_path,
arguments=program_args,
workingDirectory=program_cwd,
)
res = pm.StartProgramInGuest(vm, creds, ps)
result['pid'] = res
pdata = pm.ListProcessesInGuest(vm, creds, [res])
# wait for pid to finish
while not pdata[0].endTime:
time.sleep(1)
pdata = pm.ListProcessesInGuest(vm, creds, [res])
result['owner'] = pdata[0].owner
result['startTime'] = pdata[0].startTime.isoformat()
result['endTime'] = pdata[0].endTime.isoformat()
result['exitCode'] = pdata[0].exitCode
if result['exitCode'] != 0:
result['failed'] = True
result['msg'] = "program exited non-zero"
else:
result['msg'] = "program completed successfully"
except Exception as e:
result['msg'] = str(e)
result['failed'] = True
return result
def serialize_spec(clonespec):
"""Serialize a clonespec or a relocation spec"""
data = {}
attrs = dir(clonespec)
attrs = [x for x in attrs if not x.startswith('_')]
for x in attrs:
xo = getattr(clonespec, x)
if callable(xo):
continue
xt = type(xo)
if xo is None:
data[x] = None
elif isinstance(xo, vim.vm.ConfigSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.RelocateSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDisk):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDeviceSpec.FileOperation):
data[x] = to_text(xo)
elif isinstance(xo, vim.Description):
data[x] = {
'dynamicProperty': serialize_spec(xo.dynamicProperty),
'dynamicType': serialize_spec(xo.dynamicType),
'label': serialize_spec(xo.label),
'summary': serialize_spec(xo.summary),
}
elif hasattr(xo, 'name'):
data[x] = to_text(xo) + ':' + to_text(xo.name)
elif isinstance(xo, vim.vm.ProfileSpec):
pass
elif issubclass(xt, list):
data[x] = []
for xe in xo:
data[x].append(serialize_spec(xe))
elif issubclass(xt, string_types + integer_types + (float, bool)):
if issubclass(xt, integer_types):
data[x] = int(xo)
else:
data[x] = to_text(xo)
elif issubclass(xt, bool):
data[x] = xo
elif issubclass(xt, dict):
data[to_text(x)] = {}
for k, v in xo.items():
k = to_text(k)
data[x][k] = serialize_spec(v)
else:
data[x] = str(xt)
return data
def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name):
dc = find_datacenter_by_name(content, datacenter_name)
if dc is None:
module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name)
cluster = find_cluster_by_name(content, cluster_name, datacenter=dc)
if cluster is None:
module.fail_json(msg="Unable to find cluster with name %s" % cluster_name)
for host in cluster.host:
if host.name == host_name:
return host, cluster
return None, cluster
def set_vm_power_state(content, vm, state, force, timeout=0):
"""
Set the power status for a VM determined by the current and
requested states. force is forceful
"""
facts = gather_vm_facts(content, vm)
expected_state = state.replace('_', '').replace('-', '').lower()
current_state = facts['hw_power_status'].lower()
result = dict(
changed=False,
failed=False,
)
# Need Force
if not force and current_state not in ['poweredon', 'poweredoff']:
result['failed'] = True
result['msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state
return result
# State is not already true
if current_state != expected_state:
task = None
try:
if expected_state == 'poweredoff':
task = vm.PowerOff()
elif expected_state == 'poweredon':
task = vm.PowerOn()
elif expected_state == 'restarted':
if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'):
task = vm.Reset()
else:
result['failed'] = True
result['msg'] = "Cannot restart virtual machine in the current state %s" % current_state
elif expected_state == 'suspended':
if current_state in ('poweredon', 'poweringon'):
task = vm.Suspend()
else:
result['failed'] = True
result['msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state
elif expected_state in ['shutdownguest', 'rebootguest']:
if current_state == 'poweredon':
if vm.guest.toolsRunningStatus == 'guestToolsRunning':
if expected_state == 'shutdownguest':
task = vm.ShutdownGuest()
if timeout > 0:
result.update(wait_for_poweroff(vm, timeout))
else:
task = vm.RebootGuest()
# Set result['changed'] immediately because
# shutdown and reboot return None.
result['changed'] = True
else:
result['failed'] = True
result['msg'] = "VMware tools should be installed for guest shutdown/reboot"
else:
result['failed'] = True
result['msg'] = "Virtual machine %s must be in poweredon state for guest shutdown/reboot" % vm.name
else:
result['failed'] = True
result['msg'] = "Unsupported expected state provided: %s" % expected_state
except Exception as e:
result['failed'] = True
result['msg'] = to_text(e)
if task:
wait_for_task(task)
if task.info.state == 'error':
result['failed'] = True
result['msg'] = task.info.error.msg
else:
result['changed'] = True
# need to get new metadata if changed
result['instance'] = gather_vm_facts(content, vm)
return result
def wait_for_poweroff(vm, timeout=300):
result = dict()
interval = 15
while timeout > 0:
if vm.runtime.powerState.lower() == 'poweredoff':
break
time.sleep(interval)
timeout -= interval
else:
result['failed'] = True
result['msg'] = 'Timeout while waiting for VM power off.'
return result
def is_integer(value, type_of='int'):
try:
VmomiSupport.vmodlTypes[type_of](value)
return True
except (TypeError, ValueError):
return False
def is_boolean(value):
if str(value).lower() in ['true', 'on', 'yes', 'false', 'off', 'no']:
return True
return False
def is_truthy(value):
if str(value).lower() in ['true', 'on', 'yes']:
return True
return False
class PyVmomi(object):
def __init__(self, module):
"""
Constructor
"""
if not HAS_REQUESTS:
module.fail_json(msg=missing_required_lib('requests'),
exception=REQUESTS_IMP_ERR)
if not HAS_PYVMOMI:
module.fail_json(msg=missing_required_lib('PyVmomi'),
exception=PYVMOMI_IMP_ERR)
self.module = module
self.params = module.params
self.current_vm_obj = None
self.si, self.content = connect_to_api(self.module, return_si=True)
self.custom_field_mgr = []
if self.content.customFieldsManager: # not an ESXi
self.custom_field_mgr = self.content.customFieldsManager.field
def is_vcenter(self):
"""
Check if given hostname is vCenter or ESXi host
Returns: True if given connection is with vCenter server
False if given connection is with ESXi server
"""
api_type = None
try:
api_type = self.content.about.apiType
except (vmodl.RuntimeFault, vim.fault.VimFault) as exc:
self.module.fail_json(msg="Failed to get status of vCenter server : %s" % exc.msg)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
def get_managed_objects_properties(self, vim_type, properties=None):
"""
Look up a Managed Object Reference in vCenter / ESXi Environment
:param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
:param properties: List of properties related to vim object e.g. Name
:return: local content object
"""
# Get Root Folder
root_folder = self.content.rootFolder
if properties is None:
properties = ['name']
# Create Container View with default root folder
mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True)
# Create Traversal spec
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name="traversal_spec",
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create Property Spec
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=vim_type, # Type of object to retrieved
all=False,
pathSet=properties
)
# Create Object Spec
object_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=mor,
skip=True,
selectSet=[traversal_spec]
)
# Create Filter Spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[object_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
return self.content.propertyCollector.RetrieveContents([filter_spec])
# Virtual Machine related functions
def get_vm(self):
"""
Find unique virtual machine either by UUID, MoID or Name.
Returns: virtual machine object if found, else None.
"""
vm_obj = None
user_desired_path = None
use_instance_uuid = self.params.get('use_instance_uuid') or False
if 'uuid' in self.params and self.params['uuid']:
if not use_instance_uuid:
vm_obj = find_vm_by_id(self.content, vm_id=self.params['uuid'], vm_id_type="uuid")
elif use_instance_uuid:
vm_obj = find_vm_by_id(self.content,
vm_id=self.params['uuid'],
vm_id_type="instance_uuid")
elif 'name' in self.params and self.params['name']:
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
vms = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == self.params['name']:
vms.append(temp_vm_object.obj)
break
# get_managed_objects_properties may return multiple virtual machine,
# following code tries to find user desired one depending upon the folder specified.
if len(vms) > 1:
# We have found multiple virtual machines, decide depending upon folder value
if self.params['folder'] is None:
self.module.fail_json(msg="Multiple virtual machines with same name [%s] found, "
"Folder value is a required parameter to find uniqueness "
"of the virtual machine" % self.params['name'],
details="Please see documentation of the vmware_guest module "
"for folder parameter.")
# Get folder path where virtual machine is located
# User provided folder where user thinks virtual machine is present
user_folder = self.params['folder']
# User defined datacenter
user_defined_dc = self.params['datacenter']
# User defined datacenter's object
datacenter_obj = find_datacenter_by_name(self.content, self.params['datacenter'])
# Get Path for Datacenter
dcpath = compile_folder_path_for_object(vobj=datacenter_obj)
# Nested folder does not return trailing /
if not dcpath.endswith('/'):
dcpath += '/'
if user_folder in [None, '', '/']:
# User provided blank value or
# User provided only root value, we fail
self.module.fail_json(msg="vmware_guest found multiple virtual machines with same "
"name [%s], please specify folder path other than blank "
"or '/'" % self.params['name'])
elif user_folder.startswith('/vm/'):
# User provided nested folder under VMware default vm folder i.e. folder = /vm/india/finance
user_desired_path = "%s%s%s" % (dcpath, user_defined_dc, user_folder)
else:
# User defined datacenter is not nested i.e. dcpath = '/' , or
# User defined datacenter is nested i.e. dcpath = '/F0/DC0' or
# User provided folder starts with / and datacenter i.e. folder = /ha-datacenter/ or
# User defined folder starts with datacenter without '/' i.e.
# folder = DC0/vm/india/finance or
# folder = DC0/vm
user_desired_path = user_folder
for vm in vms:
# Check if user has provided same path as virtual machine
actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vm)
if not actual_vm_folder_path.startswith("%s%s" % (dcpath, user_defined_dc)):
continue
if user_desired_path in actual_vm_folder_path:
vm_obj = vm
break
elif vms:
# Unique virtual machine found.
vm_obj = vms[0]
elif 'moid' in self.params and self.params['moid']:
vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.params['moid'], self.si._stub)
if vm_obj:
self.current_vm_obj = vm_obj
return vm_obj
def gather_facts(self, vm):
"""
Gather facts of virtual machine.
Args:
vm: Name of virtual machine.
Returns: Facts dictionary of the given virtual machine.
"""
return gather_vm_facts(self.content, vm)
@staticmethod
def get_vm_path(content, vm_name):
"""
Find the path of virtual machine.
Args:
content: VMware content object
vm_name: virtual machine managed object
Returns: Folder of virtual machine if exists, else None
"""
folder_name = None
folder = vm_name.parent
if folder:
folder_name = folder.name
fp = folder.parent
# climb back up the tree to find our path, stop before the root folder
while fp is not None and fp.name is not None and fp != content.rootFolder:
folder_name = fp.name + '/' + folder_name
try:
fp = fp.parent
except Exception:
break
folder_name = '/' + folder_name
return folder_name
def get_vm_or_template(self, template_name=None):
"""
Find the virtual machine or virtual machine template using name
used for cloning purpose.
Args:
template_name: Name of virtual machine or virtual machine template
Returns: virtual machine or virtual machine template object
"""
template_obj = None
if not template_name:
return template_obj
if "/" in template_name:
vm_obj_path = os.path.dirname(template_name)
vm_obj_name = os.path.basename(template_name)
template_obj = find_vm_by_id(self.content, vm_obj_name, vm_id_type="inventory_path", folder=vm_obj_path)
if template_obj:
return template_obj
else:
template_obj = find_vm_by_id(self.content, vm_id=template_name, vm_id_type="uuid")
if template_obj:
return template_obj
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
templates = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == template_name:
templates.append(temp_vm_object.obj)
break
if len(templates) > 1:
# We have found multiple virtual machine templates
self.module.fail_json(msg="Multiple virtual machines or templates with same name [%s] found." % template_name)
elif templates:
template_obj = templates[0]
return template_obj
# Cluster related functions
def find_cluster_by_name(self, cluster_name, datacenter_name=None):
"""
Find Cluster by name in given datacenter
Args:
cluster_name: Name of cluster name to find
datacenter_name: (optional) Name of datacenter
Returns: True if found
"""
return find_cluster_by_name(self.content, cluster_name, datacenter=datacenter_name)
def get_all_hosts_by_cluster(self, cluster_name):
"""
Get all hosts from cluster by cluster name
Args:
cluster_name: Name of cluster
Returns: List of hosts
"""
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
return [host for host in cluster_obj.host]
else:
return []
# Hosts related functions
def find_hostsystem_by_name(self, host_name):
"""
Find Host by name
Args:
host_name: Name of ESXi host
Returns: True if found
"""
return find_hostsystem_by_name(self.content, hostname=host_name)
def get_all_host_objs(self, cluster_name=None, esxi_host_name=None):
"""
Get all host system managed object
Args:
cluster_name: Name of Cluster
esxi_host_name: Name of ESXi server
Returns: A list of all host system managed objects, else empty list
"""
host_obj_list = []
if not self.is_vcenter():
hosts = get_all_objs(self.content, [vim.HostSystem]).keys()
if hosts:
host_obj_list.append(list(hosts)[0])
else:
if cluster_name:
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
host_obj_list = [host for host in cluster_obj.host]
else:
self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
elif esxi_host_name:
if isinstance(esxi_host_name, str):
esxi_host_name = [esxi_host_name]
for host in esxi_host_name:
esxi_host_obj = self.find_hostsystem_by_name(host_name=host)
if esxi_host_obj:
host_obj_list.append(esxi_host_obj)
else:
self.module.fail_json(changed=False, msg="ESXi '%s' not found" % host)
return host_obj_list
def host_version_at_least(self, version=None, vm_obj=None, host_name=None):
"""
Check that the ESXi Host is at least a specific version number
Args:
vm_obj: virtual machine object, required one of vm_obj, host_name
host_name (string): ESXi host name
version (tuple): a version tuple, for example (6, 7, 0)
Returns: bool
"""
if vm_obj:
host_system = vm_obj.summary.runtime.host
elif host_name:
host_system = self.find_hostsystem_by_name(host_name=host_name)
else:
self.module.fail_json(msg='VM object or ESXi host name must be set one.')
if host_system and version:
host_version = host_system.summary.config.product.version
return StrictVersion(host_version) >= StrictVersion('.'.join(map(str, version)))
else:
self.module.fail_json(msg='Unable to get the ESXi host from vm: %s, or hostname %s,'
'or the passed ESXi version: %s is None.' % (vm_obj, host_name, version))
# Network related functions
@staticmethod
def find_host_portgroup_by_name(host, portgroup_name):
"""
Find Portgroup on given host
Args:
host: Host config object
portgroup_name: Name of portgroup
Returns: True if found else False
"""
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return False
def get_all_port_groups_by_host(self, host_system):
"""
Get all Port Group by host
Args:
host_system: Name of Host System
Returns: List of Port Group Spec
"""
pgs_list = []
for pg in host_system.config.network.portgroup:
pgs_list.append(pg)
return pgs_list
def find_network_by_name(self, network_name=None):
"""
Get network specified by name
Args:
network_name: Name of network
Returns: List of network managed objects
"""
networks = []
if not network_name:
return networks
objects = self.get_managed_objects_properties(vim_type=vim.Network, properties=['name'])
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == network_name:
networks.append(temp_vm_object.obj)
break
return networks
def network_exists_by_name(self, network_name=None):
"""
Check if network with a specified name exists or not
Args:
network_name: Name of network
Returns: True if network exists else False
"""
ret = False
if not network_name:
return ret
ret = True if self.find_network_by_name(network_name=network_name) else False
return ret
# Datacenter
def find_datacenter_by_name(self, datacenter_name):
"""
Get datacenter managed object by name
Args:
datacenter_name: Name of datacenter
Returns: datacenter managed object if found else None
"""
return find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
def is_datastore_valid(self, datastore_obj=None):
"""
Check if datastore selected is valid or not
Args:
datastore_obj: datastore managed object
Returns: True if datastore is valid, False if not
"""
if not datastore_obj \
or datastore_obj.summary.maintenanceMode != 'normal' \
or not datastore_obj.summary.accessible:
return False
return True
def find_datastore_by_name(self, datastore_name):
"""
Get datastore managed object by name
Args:
datastore_name: Name of datastore
Returns: datastore managed object if found else None
"""
return find_datastore_by_name(self.content, datastore_name=datastore_name)
# Datastore cluster
def find_datastore_cluster_by_name(self, datastore_cluster_name):
"""
Get datastore cluster managed object by name
Args:
datastore_cluster_name: Name of datastore cluster
Returns: Datastore cluster managed object if found else None
"""
data_store_clusters = get_all_objs(self.content, [vim.StoragePod])
for dsc in data_store_clusters:
if dsc.name == datastore_cluster_name:
return dsc
return None
# Resource pool
def find_resource_pool_by_name(self, resource_pool_name, folder=None):
"""
Get resource pool managed object by name
Args:
resource_pool_name: Name of resource pool
Returns: Resource pool managed object if found else None
"""
if not folder:
folder = self.content.rootFolder
resource_pools = get_all_objs(self.content, [vim.ResourcePool], folder=folder)
for rp in resource_pools:
if rp.name == resource_pool_name:
return rp
return None
def find_resource_pool_by_cluster(self, resource_pool_name='Resources', cluster=None):
"""
Get resource pool managed object by cluster object
Args:
resource_pool_name: Name of resource pool
cluster: Managed object of cluster
Returns: Resource pool managed object if found else None
"""
desired_rp = None
if not cluster:
return desired_rp
if resource_pool_name != 'Resources':
# Resource pool name is different than default 'Resources'
resource_pools = cluster.resourcePool.resourcePool
if resource_pools:
for rp in resource_pools:
if rp.name == resource_pool_name:
desired_rp = rp
break
else:
desired_rp = cluster.resourcePool
return desired_rp
# VMDK stuff
def vmdk_disk_path_split(self, vmdk_path):
"""
Takes a string in the format
[datastore_name] path/to/vm_name.vmdk
Returns a tuple with multiple strings:
1. datastore_name: The name of the datastore (without brackets)
2. vmdk_fullpath: The "path/to/vm_name.vmdk" portion
3. vmdk_filename: The "vm_name.vmdk" portion of the string (os.path.basename equivalent)
4. vmdk_folder: The "path/to/" portion of the string (os.path.dirname equivalent)
"""
try:
datastore_name = re.match(r'^\[(.*?)\]', vmdk_path, re.DOTALL).groups()[0]
vmdk_fullpath = re.match(r'\[.*?\] (.*)$', vmdk_path).groups()[0]
vmdk_filename = os.path.basename(vmdk_fullpath)
vmdk_folder = os.path.dirname(vmdk_fullpath)
return datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder
except (IndexError, AttributeError) as e:
self.module.fail_json(msg="Bad path '%s' for filename disk vmdk image: %s" % (vmdk_path, to_native(e)))
def find_vmdk_file(self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folder):
"""
Return vSphere file object or fail_json
Args:
datastore_obj: Managed object of datastore
vmdk_fullpath: Path of VMDK file e.g., path/to/vm/vmdk_filename.vmdk
vmdk_filename: Name of vmdk e.g., VM0001_1.vmdk
vmdk_folder: Base dir of VMDK e.g, path/to/vm
"""
browser = datastore_obj.browser
datastore_name = datastore_obj.name
datastore_name_sq = "[" + datastore_name + "]"
if browser is None:
self.module.fail_json(msg="Unable to access browser for datastore %s" % datastore_name)
detail_query = vim.host.DatastoreBrowser.FileInfo.Details(
fileOwner=True,
fileSize=True,
fileType=True,
modification=True
)
search_spec = vim.host.DatastoreBrowser.SearchSpec(
details=detail_query,
matchPattern=[vmdk_filename],
searchCaseInsensitive=True,
)
search_res = browser.SearchSubFolders(
datastorePath=datastore_name_sq,
searchSpec=search_spec
)
changed = False
vmdk_path = datastore_name_sq + " " + vmdk_fullpath
try:
changed, result = wait_for_task(search_res)
except TaskError as task_e:
self.module.fail_json(msg=to_native(task_e))
if not changed:
self.module.fail_json(msg="No valid disk vmdk image found for path %s" % vmdk_path)
target_folder_paths = [
datastore_name_sq + " " + vmdk_folder + '/',
datastore_name_sq + " " + vmdk_folder,
]
for file_result in search_res.info.result:
for f in getattr(file_result, 'file'):
if f.path == vmdk_filename and file_result.folderPath in target_folder_paths:
return f
self.module.fail_json(msg="No vmdk file found for path specified [%s]" % vmdk_path)
#
# Conversion to JSON
#
def _deepmerge(self, d, u):
"""
Deep merges u into d.
Credit:
https://bit.ly/2EDOs1B (stackoverflow question 3232943)
License:
cc-by-sa 3.0 (https://creativecommons.org/licenses/by-sa/3.0/)
Changes:
using collections_compat for compatibility
Args:
- d (dict): dict to merge into
- u (dict): dict to merge into d
Returns:
dict, with u merged into d
"""
for k, v in iteritems(u):
if isinstance(v, collections_compat.Mapping):
d[k] = self._deepmerge(d.get(k, {}), v)
else:
d[k] = v
return d
def _extract(self, data, remainder):
"""
This is used to break down dotted properties for extraction.
Args:
- data (dict): result of _jsonify on a property
- remainder: the remainder of the dotted property to select
Return:
dict
"""
result = dict()
if '.' not in remainder:
result[remainder] = data[remainder]
return result
key, remainder = remainder.split('.', 1)
result[key] = self._extract(data[key], remainder)
return result
def _jsonify(self, obj):
"""
Convert an object from pyVmomi into JSON.
Args:
- obj (object): vim object
Return:
dict
"""
return json.loads(json.dumps(obj, cls=VmomiSupport.VmomiJSONEncoder,
sort_keys=True, strip_dynamic=True))
def to_json(self, obj, properties=None):
"""
Convert a vSphere (pyVmomi) Object into JSON. This is a deep
transformation. The list of properties is optional - if not
provided then all properties are deeply converted. The resulting
JSON is sorted to improve human readability.
Requires upstream support from pyVmomi > 6.7.1
(https://github.com/vmware/pyvmomi/pull/732)
Args:
- obj (object): vim object
- properties (list, optional): list of properties following
the property collector specification, for example:
["config.hardware.memoryMB", "name", "overallStatus"]
default is a complete object dump, which can be large
Return:
dict
"""
if not HAS_PYVMOMIJSON:
self.module.fail_json(msg='The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1')
result = dict()
if properties:
for prop in properties:
try:
if '.' in prop:
key, remainder = prop.split('.', 1)
tmp = dict()
tmp[key] = self._extract(self._jsonify(getattr(obj, key)), remainder)
self._deepmerge(result, tmp)
else:
result[prop] = self._jsonify(getattr(obj, prop))
# To match gather_vm_facts output
prop_name = prop
if prop.lower() == '_moid':
prop_name = 'moid'
elif prop.lower() == '_vimref':
prop_name = 'vimref'
result[prop_name] = result[prop]
except (AttributeError, KeyError):
self.module.fail_json(msg="Property '{0}' not found.".format(prop))
else:
result = self._jsonify(obj)
return result
def get_folder_path(self, cur):
full_path = '/' + cur.name
while hasattr(cur, 'parent') and cur.parent:
if cur.parent == self.content.rootFolder:
break
cur = cur.parent
full_path = '/' + cur.name + full_path
return full_path
| gpl-3.0 |
GuneetAtwal/kernel_n9005 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
RedhawkSDR/integration-gnuhawk | gnuradio/gnuradio-core/src/python/gnuradio/gr/hier_block2.py | 7 | 4376 | #
# Copyright 2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio_core import hier_block2_swig
#
# This hack forces a 'has-a' relationship to look like an 'is-a' one.
#
# It allows Python classes to subclass this one, while passing through
# method calls to the C++ class shared pointer from SWIG.
#
# It also allows us to intercept method calls if needed
#
class hier_block2(object):
"""
Python wrapper around the C++ hierarchical block implementation.
Provides convenience functions and allows proper Python subclassing.
"""
def __init__(self, name, input_signature, output_signature):
"""
Create a hierarchical block with a given name and I/O signatures.
"""
self._hb = hier_block2_swig(name, input_signature, output_signature)
def __getattr__(self, name):
"""
Pass-through member requests to the C++ object.
"""
if not hasattr(self, "_hb"):
raise RuntimeError("hier_block2: invalid state--did you forget to call gr.hier_block2.__init__ in a derived class?")
return getattr(self._hb, name)
def connect(self, *points):
"""
Connect two or more block endpoints. An endpoint is either a (block, port)
tuple or a block instance. In the latter case, the port number is assumed
to be zero.
To connect the hierarchical block external inputs or outputs to internal block
inputs or outputs, use 'self' in the connect call.
If multiple arguments are provided, connect will attempt to wire them in series,
interpreting the endpoints as inputs or outputs as appropriate.
"""
if len (points) < 1:
raise ValueError, ("connect requires at least one endpoint; %d provided." % (len (points),))
else:
if len(points) == 1:
self._hb.primitive_connect(points[0].to_basic_block())
else:
for i in range (1, len (points)):
self._connect(points[i-1], points[i])
def _connect(self, src, dst):
(src_block, src_port) = self._coerce_endpoint(src)
(dst_block, dst_port) = self._coerce_endpoint(dst)
self._hb.primitive_connect(src_block.to_basic_block(), src_port,
dst_block.to_basic_block(), dst_port)
def _coerce_endpoint(self, endp):
if hasattr(endp, 'to_basic_block'):
return (endp, 0)
else:
if hasattr(endp, "__getitem__") and len(endp) == 2:
return endp # Assume user put (block, port)
else:
raise ValueError("unable to coerce endpoint")
def disconnect(self, *points):
"""
Disconnect two endpoints in the flowgraph.
To disconnect the hierarchical block external inputs or outputs to internal block
inputs or outputs, use 'self' in the connect call.
If more than two arguments are provided, they are disconnected successively.
"""
if len (points) < 1:
raise ValueError, ("disconnect requires at least one endpoint; %d provided." % (len (points),))
else:
if len (points) == 1:
self._hb.primitive_disconnect(points[0].to_basic_block())
else:
for i in range (1, len (points)):
self._disconnect(points[i-1], points[i])
def _disconnect(self, src, dst):
(src_block, src_port) = self._coerce_endpoint(src)
(dst_block, dst_port) = self._coerce_endpoint(dst)
self._hb.primitive_disconnect(src_block.to_basic_block(), src_port,
dst_block.to_basic_block(), dst_port)
| gpl-3.0 |
boyuegame/kbengine | kbe/src/lib/python/Lib/test/test_keyword.py | 98 | 5840 | import keyword
import unittest
from test import support
import filecmp
import os
import sys
import subprocess
import shutil
import textwrap
KEYWORD_FILE = support.findfile('keyword.py')
GRAMMAR_FILE = os.path.join(os.path.split(__file__)[0],
'..', '..', 'Python', 'graminit.c')
TEST_PY_FILE = 'keyword_test.py'
GRAMMAR_TEST_FILE = 'graminit_test.c'
PY_FILE_WITHOUT_KEYWORDS = 'minimal_keyword.py'
NONEXISTENT_FILE = 'not_here.txt'
class Test_iskeyword(unittest.TestCase):
def test_true_is_a_keyword(self):
self.assertTrue(keyword.iskeyword('True'))
def test_uppercase_true_is_not_a_keyword(self):
self.assertFalse(keyword.iskeyword('TRUE'))
def test_none_value_is_not_a_keyword(self):
self.assertFalse(keyword.iskeyword(None))
# This is probably an accident of the current implementation, but should be
# preserved for backward compatibility.
def test_changing_the_kwlist_does_not_affect_iskeyword(self):
oldlist = keyword.kwlist
self.addCleanup(setattr, keyword, 'kwlist', oldlist)
keyword.kwlist = ['its', 'all', 'eggs', 'beans', 'and', 'a', 'slice']
self.assertFalse(keyword.iskeyword('eggs'))
class TestKeywordGeneration(unittest.TestCase):
def _copy_file_without_generated_keywords(self, source_file, dest_file):
with open(source_file, 'rb') as fp:
lines = fp.readlines()
nl = lines[0][len(lines[0].strip()):]
with open(dest_file, 'wb') as fp:
fp.writelines(lines[:lines.index(b"#--start keywords--" + nl) + 1])
fp.writelines(lines[lines.index(b"#--end keywords--" + nl):])
def _generate_keywords(self, grammar_file, target_keyword_py_file):
proc = subprocess.Popen([sys.executable,
KEYWORD_FILE,
grammar_file,
target_keyword_py_file], stderr=subprocess.PIPE)
stderr = proc.communicate()[1]
return proc.returncode, stderr
@unittest.skipIf(not os.path.exists(GRAMMAR_FILE),
'test only works from source build directory')
def test_real_grammar_and_keyword_file(self):
self._copy_file_without_generated_keywords(KEYWORD_FILE, TEST_PY_FILE)
self.addCleanup(support.unlink, TEST_PY_FILE)
self.assertFalse(filecmp.cmp(KEYWORD_FILE, TEST_PY_FILE))
self.assertEqual((0, b''), self._generate_keywords(GRAMMAR_FILE,
TEST_PY_FILE))
self.assertTrue(filecmp.cmp(KEYWORD_FILE, TEST_PY_FILE))
def test_grammar(self):
self._copy_file_without_generated_keywords(KEYWORD_FILE, TEST_PY_FILE)
self.addCleanup(support.unlink, TEST_PY_FILE)
with open(GRAMMAR_TEST_FILE, 'w') as fp:
# Some of these are probably implementation accidents.
fp.writelines(textwrap.dedent("""\
{2, 1},
{11, "encoding_decl", 0, 2, states_79,
"\000\000\040\000\000\000\000\000\000\000\000\000"
"\000\000\000\000\000\000\000\000\000"},
{1, "jello"},
{326, 0},
{1, "turnip"},
\t{1, "This one is tab indented"
{278, 0},
{1, "crazy but legal"
"also legal" {1, "
{1, "continue"},
{1, "lemon"},
{1, "tomato"},
{1, "wigii"},
{1, 'no good'}
{283, 0},
{1, "too many spaces"}"""))
self.addCleanup(support.unlink, GRAMMAR_TEST_FILE)
self._generate_keywords(GRAMMAR_TEST_FILE, TEST_PY_FILE)
expected = [
" 'This one is tab indented',",
" 'also legal',",
" 'continue',",
" 'crazy but legal',",
" 'jello',",
" 'lemon',",
" 'tomato',",
" 'turnip',",
" 'wigii',",
]
with open(TEST_PY_FILE) as fp:
lines = fp.read().splitlines()
start = lines.index("#--start keywords--") + 1
end = lines.index("#--end keywords--")
actual = lines[start:end]
self.assertEqual(actual, expected)
def test_empty_grammar_results_in_no_keywords(self):
self._copy_file_without_generated_keywords(KEYWORD_FILE,
PY_FILE_WITHOUT_KEYWORDS)
self.addCleanup(support.unlink, PY_FILE_WITHOUT_KEYWORDS)
shutil.copyfile(KEYWORD_FILE, TEST_PY_FILE)
self.addCleanup(support.unlink, TEST_PY_FILE)
self.assertEqual((0, b''), self._generate_keywords(os.devnull,
TEST_PY_FILE))
self.assertTrue(filecmp.cmp(TEST_PY_FILE, PY_FILE_WITHOUT_KEYWORDS))
def test_keywords_py_without_markers_produces_error(self):
rc, stderr = self._generate_keywords(os.devnull, os.devnull)
self.assertNotEqual(rc, 0)
self.assertRegex(stderr, b'does not contain format markers')
def test_missing_grammar_file_produces_error(self):
rc, stderr = self._generate_keywords(NONEXISTENT_FILE, KEYWORD_FILE)
self.assertNotEqual(rc, 0)
self.assertRegex(stderr, b'(?ms)' + NONEXISTENT_FILE.encode())
def test_missing_keywords_py_file_produces_error(self):
rc, stderr = self._generate_keywords(os.devnull, NONEXISTENT_FILE)
self.assertNotEqual(rc, 0)
self.assertRegex(stderr, b'(?ms)' + NONEXISTENT_FILE.encode())
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
mheap/ansible | lib/ansible/modules/system/ufw.py | 23 | 11228 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
# Copyright: (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
# Copyright: (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
# Copyright: (c) 2013, James Martin <jmartin@basho.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ufw
short_description: Manage firewall with UFW
description:
- Manage firewall with UFW.
version_added: 1.6
author:
- Aleksey Ovcharenko (@ovcharenko)
- Jarno Keskikangas (@pyykkis)
- Ahti Kitsik (@ahtik)
notes:
- See C(man ufw) for more examples.
requirements:
- C(ufw) package
options:
state:
description:
- C(enabled) reloads firewall and enables firewall on boot.
- C(disabled) unloads firewall and disables firewall on boot.
- C(reloaded) reloads firewall.
- C(reset) disables and resets firewall to installation defaults.
choices: [ disabled, enabled, reloaded, reset ]
policy:
description:
- Change the default policy for incoming or outgoing traffic.
aliases: [ default ]
choices: [ allow, deny, reject ]
direction:
description:
- Select direction for a rule or default policy command.
choices: [ in, incoming, out, outgoing, routed ]
logging:
description:
- Toggles logging. Logged packets use the LOG_KERN syslog facility.
choices: [ on, off, low, medium, high, full ]
insert:
description:
- Insert the corresponding rule as rule number NUM
rule:
description:
- Add firewall rule
choices: ['allow', 'deny', 'limit', 'reject']
log:
description:
- Log new connections matched to this rule
type: bool
from_ip:
description:
- Source IP address.
aliases: [ from, src ]
default: any
from_port:
description:
- Source port.
to_ip:
description:
- Destination IP address.
aliases: [ dest, to]
default: any
to_port:
description:
- Destination port.
aliases: [ port ]
proto:
description:
- TCP/IP protocol.
choices: [ any, tcp, udp, ipv6, esp, ah ]
name:
description:
- Use profile located in C(/etc/ufw/applications.d).
aliases: [ app ]
delete:
description:
- Delete rule.
type: bool
interface:
description:
- Specify interface for rule.
aliases: [ if ]
route:
description:
- Apply the rule to routed/forwarded packets.
type: bool
comment:
description:
- Add a comment to the rule. Requires UFW version >=0.35.
version_added: "2.4"
'''
EXAMPLES = '''
- name: Allow everything and enable UFW
ufw:
state: enabled
policy: allow
- name: Set logging
ufw:
logging: on
# Sometimes it is desirable to let the sender know when traffic is
# being denied, rather than simply ignoring it. In these cases, use
# reject instead of deny. In addition, log rejected connections:
- ufw:
rule: reject
port: auth
log: yes
# ufw supports connection rate limiting, which is useful for protecting
# against brute-force login attacks. ufw will deny connections if an IP
# address has attempted to initiate 6 or more connections in the last
# 30 seconds. See http://www.debian-administration.org/articles/187
# for details. Typical usage is:
- ufw:
rule: limit
port: ssh
proto: tcp
# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
# a rule=allow task can leave those ports exposed. Either use delete=yes
# or a separate state=reset task)
- ufw:
rule: allow
name: OpenSSH
- name: Delete OpenSSH rule
ufw:
rule: allow
name: OpenSSH
delete: yes
- name: Deny all access to port 53
ufw:
rule: deny
port: 53
- name: Allow port range 60000-61000
ufw:
rule: allow
port: 60000:61000
- name: Allow all access to tcp port 80
ufw:
rule: allow
port: 80
proto: tcp
- name: Allow all access from RFC1918 networks to this host
ufw:
rule: allow
src: '{{ item }}'
with_items:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
ufw:
rule: deny
proto: udp
src: 1.2.3.4
port: 514
comment: Block syslog
- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
ufw:
rule: allow
interface: eth0
direction: in
proto: udp
src: 1.2.3.5
from_port: 5469
dest: 1.2.3.4
to_port: 5469
# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
ufw:
rule: deny
proto: tcp
src: 2001:db8::/32
port: 25
# Can be used to further restrict a global FORWARD policy set to allow
- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
ufw:
rule: deny
route: yes
src: 1.2.3.0/24
dest: 4.5.6.0/24
'''
import re
from operator import itemgetter
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete=dict(type='bool', default=False),
route=dict(type='bool', default=False),
insert=dict(type='str'),
rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
interface=dict(type='str', aliases=['if']),
log=dict(type='bool', default=False),
from_ip=dict(type='str', default='any', aliases=['from', 'src']),
from_port=dict(type='str'),
to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
to_port=dict(type='str', aliases=['port']),
proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp']),
app=dict(type='str', aliases=['name']),
comment=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['app', 'proto', 'logging']
],
)
cmds = []
def execute(cmd):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd)
if rc != 0:
module.fail_json(msg=err or out)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
rc, out, err = module.run_command("%s --version" % ufw_bin)
if rc != 0:
module.fail_json(
msg="Failed to get ufw version.", rc=rc, out=out, err=err
)
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
# Ensure at least one of the command arguments are given
command_keys = ['state', 'default', 'rule', 'logging']
commands = dict((key, params[key]) for key in command_keys if params[key])
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
if (params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
# Save the pre state and rules in order to recognize changes
(_, pre_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules")
# Execute commands
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = {'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset'}
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
execute(cmd + [[command], [value]])
elif command == 'default':
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
# Rules are constructed according to the long format
#
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([module.boolean(params['delete']), 'delete'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
('to_ip', "to %s"), ('to_port', "port %s"),
('proto', "proto %s"), ('app', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, _ = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
execute(cmd)
# Get the new state
(_, post_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules")
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
if __name__ == '__main__':
main()
| gpl-3.0 |
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/cmp/cmppolicy_csvserver_binding.py | 3 | 6033 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cmppolicy_csvserver_binding(base_resource) :
""" Binding class showing the csvserver that can be bound to cmppolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def name(self) :
ur"""Name of the HTTP compression policy for which to display details.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the HTTP compression policy for which to display details.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
ur"""The name of the entity to which the policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
ur"""The name of the entity to which the policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cmppolicy_csvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cmppolicy_csvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch cmppolicy_csvserver_binding resources.
"""
try :
obj = cmppolicy_csvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of cmppolicy_csvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cmppolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count cmppolicy_csvserver_binding resources configued on NetScaler.
"""
try :
obj = cmppolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of cmppolicy_csvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cmppolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class cmppolicy_csvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.cmppolicy_csvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cmppolicy_csvserver_binding = [cmppolicy_csvserver_binding() for _ in range(length)]
| apache-2.0 |
wkhtmltopdf/qtwebkit | Tools/Scripts/webkitpy/common/system/profiler.py | 182 | 9421 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import itertools
_log = logging.getLogger(__name__)
class ProfilerFactory(object):
@classmethod
def create_profiler(cls, host, executable_path, output_dir, profiler_name=None, identifier=None):
profilers = cls.profilers_for_platform(host.platform)
if not profilers:
return None
profiler_name = profiler_name or cls.default_profiler_name(host.platform)
profiler_class = next(itertools.ifilter(lambda profiler: profiler.name == profiler_name, profilers), None)
if not profiler_class:
return None
return profilers[0](host, executable_path, output_dir, identifier)
@classmethod
def default_profiler_name(cls, platform):
profilers = cls.profilers_for_platform(platform)
return profilers[0].name if profilers else None
@classmethod
def profilers_for_platform(cls, platform):
# GooglePProf requires TCMalloc/google-perftools, but is available everywhere.
profilers_by_os_name = {
'mac': [IProfiler, Sample, GooglePProf],
'linux': [Perf, GooglePProf],
# Note: freebsd, win32 have no profilers defined yet, thus --profile will be ignored
# by default, but a profiler can be selected with --profiler=PROFILER explicitly.
}
return profilers_by_os_name.get(platform.os_name, [])
class Profiler(object):
# Used by ProfilerFactory to lookup a profiler from the --profiler=NAME option.
name = None
def __init__(self, host, executable_path, output_dir, identifier=None):
self._host = host
self._executable_path = executable_path
self._output_dir = output_dir
self._identifier = "test"
self._host.filesystem.maybe_make_directory(self._output_dir)
def adjusted_environment(self, env):
return env
def attach_to_pid(self, pid):
pass
def profile_after_exit(self):
pass
class SingleFileOutputProfiler(Profiler):
def __init__(self, host, executable_path, output_dir, output_suffix, identifier=None):
super(SingleFileOutputProfiler, self).__init__(host, executable_path, output_dir, identifier)
# FIXME: Currently all reports are kept as test.*, until we fix that, search up to 1000 names before giving up.
self._output_path = self._host.workspace.find_unused_filename(self._output_dir, self._identifier, output_suffix, search_limit=1000)
assert(self._output_path)
class GooglePProf(SingleFileOutputProfiler):
name = 'pprof'
def __init__(self, host, executable_path, output_dir, identifier=None):
super(GooglePProf, self).__init__(host, executable_path, output_dir, "pprof", identifier)
def adjusted_environment(self, env):
env['CPUPROFILE'] = self._output_path
return env
def _first_ten_lines_of_profile(self, pprof_output):
match = re.search("^Total:[^\n]*\n((?:[^\n]*\n){0,10})", pprof_output, re.MULTILINE)
return match.group(1) if match else None
def _pprof_path(self):
# FIXME: We should have code to find the right google-pprof executable, some Googlers have
# google-pprof installed as "pprof" on their machines for them.
return '/usr/bin/google-pprof'
def profile_after_exit(self):
# google-pprof doesn't check its arguments, so we have to.
if not (self._host.filesystem.exists(self._output_path)):
print "Failed to gather profile, %s does not exist." % self._output_path
return
pprof_args = [self._pprof_path(), '--text', self._executable_path, self._output_path]
profile_text = self._host.executive.run_command(pprof_args)
print "First 10 lines of pprof --text:"
print self._first_ten_lines_of_profile(profile_text)
print "http://google-perftools.googlecode.com/svn/trunk/doc/cpuprofile.html documents output."
print
print "To interact with the the full profile, including produce graphs:"
print ' '.join([self._pprof_path(), self._executable_path, self._output_path])
class Perf(SingleFileOutputProfiler):
name = 'perf'
def __init__(self, host, executable_path, output_dir, identifier=None):
super(Perf, self).__init__(host, executable_path, output_dir, "data", identifier)
self._perf_process = None
self._pid_being_profiled = None
def _perf_path(self):
# FIXME: We may need to support finding the perf binary in other locations.
return 'perf'
def attach_to_pid(self, pid):
assert(not self._perf_process and not self._pid_being_profiled)
self._pid_being_profiled = pid
cmd = [self._perf_path(), "record", "--call-graph", "--pid", pid, "--output", self._output_path]
self._perf_process = self._host.executive.popen(cmd)
def _first_ten_lines_of_profile(self, perf_output):
match = re.search("^#[^\n]*\n((?: [^\n]*\n){1,10})", perf_output, re.MULTILINE)
return match.group(1) if match else None
def profile_after_exit(self):
# Perf doesn't automatically watch the attached pid for death notifications,
# so we have to do it for it, and then tell it its time to stop sampling. :(
self._host.executive.wait_limited(self._pid_being_profiled, limit_in_seconds=10)
perf_exitcode = self._perf_process.poll()
if perf_exitcode is None: # This should always be the case, unless perf error'd out early.
self._host.executive.interrupt(self._perf_process.pid)
perf_exitcode = self._perf_process.wait()
if perf_exitcode not in (0, -2): # The exit code should always be -2, as we're always interrupting perf.
print "'perf record' failed (exit code: %i), can't process results:" % perf_exitcode
return
perf_args = [self._perf_path(), 'report', '--call-graph', 'none', '--input', self._output_path]
print "First 10 lines of 'perf report --call-graph=none':"
print " ".join(perf_args)
perf_output = self._host.executive.run_command(perf_args)
print self._first_ten_lines_of_profile(perf_output)
print "To view the full profile, run:"
print ' '.join([self._perf_path(), 'report', '-i', self._output_path])
print # An extra line between tests looks nicer.
class Sample(SingleFileOutputProfiler):
name = 'sample'
def __init__(self, host, executable_path, output_dir, identifier=None):
super(Sample, self).__init__(host, executable_path, output_dir, "txt", identifier)
self._profiler_process = None
def attach_to_pid(self, pid):
cmd = ["sample", pid, "-mayDie", "-file", self._output_path]
self._profiler_process = self._host.executive.popen(cmd)
def profile_after_exit(self):
self._profiler_process.wait()
class IProfiler(SingleFileOutputProfiler):
name = 'iprofiler'
def __init__(self, host, executable_path, output_dir, identifier=None):
super(IProfiler, self).__init__(host, executable_path, output_dir, "dtps", identifier)
self._profiler_process = None
def attach_to_pid(self, pid):
# FIXME: iprofiler requires us to pass the directory separately
# from the basename of the file, with no control over the extension.
fs = self._host.filesystem
cmd = ["iprofiler", "-timeprofiler", "-a", pid,
"-d", fs.dirname(self._output_path), "-o", fs.splitext(fs.basename(self._output_path))[0]]
# FIXME: Consider capturing instead of letting instruments spam to stderr directly.
self._profiler_process = self._host.executive.popen(cmd)
def profile_after_exit(self):
# It seems like a nicer user experiance to wait on the profiler to exit to prevent
# it from spewing to stderr at odd times.
self._profiler_process.wait()
| gpl-2.0 |
alfred82santa/tarrabme2 | src/orgs/models.py | 1 | 1856 | from django.db import models
from common.models import CommonModel, AbstractContact, AbstractAddress
from django.contrib.auth.models import Group
from imagekit.models import ProcessedImageField, ImageSpecField
from imagekit.processors import ResizeToFill
class Organization(CommonModel):
name = models.CharField(max_length=100, unique=True)
commercial_name = models.CharField(max_length=150, unique=True)
prefix = models.CharField(max_length=6, unique=True)
active = models.BooleanField('active', default=True)
logo = ProcessedImageField(
upload_to="logos",
processors=[ResizeToFill(400, 400)],
)
logo_thumbnail = ImageSpecField(source='logo',
processors=[ResizeToFill(50, 50)],)
def logo_thumbnail_img(self):
return '<img src="%s"/>' % self.logo_thumbnail.url
logo_thumbnail_img.allow_tags = True
logo_thumbnail_img.short_description = ''
class Meta:
pass
def __unicode__(self):
return self.name
class Contact(AbstractContact):
organization = models.ForeignKey(Organization, blank=False,
null=False, related_name="contacts_list"
)
class BillingAccount(AbstractAddress):
fiscal_number = models.CharField(max_length=126, unique=True)
payment_method = models.CharField(max_length=126, unique=True)
payment_data = models.CharField(max_length=126, unique=True)
organization = models.ForeignKey(Organization, blank=False,
null=False, related_name="contacts"
)
class OrganizationRole(Group):
organization = models.ForeignKey(Organization, blank=False,
null=False, related_name="roles"
)
| gpl-3.0 |
technicalpickles/zulip | zerver/migrations/0002_django_1_8.py | 125 | 2229 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='userprofile',
managers=[
(b'objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='appledevicetoken',
name='last_updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='mituser',
name='email',
field=models.EmailField(unique=True, max_length=254),
),
migrations.AlterField(
model_name='preregistrationuser',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='preregistrationuser',
name='streams',
field=models.ManyToManyField(to='zerver.Stream'),
),
migrations.AlterField(
model_name='pushdevicetoken',
name='last_updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='referral',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='userprofile',
name='email',
field=models.EmailField(unique=True, max_length=254, db_index=True),
),
migrations.AlterField(
model_name='userprofile',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='userprofile',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
]
| apache-2.0 |
hujiajie/chromium-crosswalk | tools/perf/benchmarks/session_restore.py | 7 | 2377 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from measurements import session_restore
import page_sets
from telemetry import benchmark
class _SessionRestoreTypical25(perf_benchmark.PerfBenchmark):
"""Base Benchmark class for session restore benchmarks.
A cold start means none of the Chromium files are in the disk cache.
A warm start assumes the OS has already cached much of Chromium's content.
For warm tests, you should repeat the page set to ensure it's cached.
Use Typical25PageSet to match what the SmallProfileCreator uses.
TODO(slamm): Make SmallProfileCreator and this use the same page_set ref.
"""
page_set = page_sets.Typical25PageSetWithProfile
tag = None # override with 'warm' or 'cold'
@classmethod
def Name(cls):
return 'session_restore'
@classmethod
def ValueCanBeAddedPredicate(cls, _, is_first_result):
return cls.tag == 'cold' or not is_first_result
def CreateStorySet(self, _):
"""Return a story set that only has the first story.
The session restore measurement skips the navigation step and
only tests session restore by having the browser start-up.
The first story is used to get WPR set up and hold results.
"""
story_set = self.page_set()
for story in story_set.stories[1:]:
story_set.RemoveStory(story)
return story_set
def CreatePageTest(self, options):
is_cold = (self.tag == 'cold')
return session_restore.SessionRestore(cold=is_cold)
@benchmark.Disabled('android',
'mac') # crbug.com/563594
class SessionRestoreColdTypical25(_SessionRestoreTypical25):
"""Test by clearing system cache and profile before repeats."""
tag = 'cold'
options = {'pageset_repeat': 5}
@classmethod
def Name(cls):
return 'session_restore.cold.typical_25'
@benchmark.Disabled('android',
'mac', # crbug.com/563594
'linux', 'xp') # crbug.com/539056
class SessionRestoreWarmTypical25(_SessionRestoreTypical25):
"""Test without clearing system cache or profile before repeats.
The first result is discarded.
"""
tag = 'warm'
options = {'pageset_repeat': 20}
@classmethod
def Name(cls):
return 'session_restore.warm.typical_25'
| bsd-3-clause |
kevinlee12/oppia | core/domain/storage_model_audit_jobs_test.py | 2 | 4211 | # Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Oppia storage model audit jobs."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import inspect
from core.domain import prod_validation_jobs_one_off
from core.platform import models
from core.tests import test_utils
# This list includes statistics models. The statistics models are included here
# because the audit jobs for statistics models are defined in
# core/domain/stats_jobs_one_off.py. These jobs should be updated and moved to
# core/domain/prod_validation_jobs_one_off.py and the statistics model
# class names can then be removed from this list.
# The corresponding issue is #7100.
MODEL_CLASS_NAMES_TO_EXCLUDE = {
'StateCounterModel',
'AnswerSubmittedEventLogEntryModel',
'ExplorationActualStartEventLogEntryModel',
'SolutionHitEventLogEntryModel',
'StartExplorationEventLogEntryModel',
'MaybeLeaveExplorationEventLogEntryModel',
'CompleteExplorationEventLogEntryModel',
'RateExplorationEventLogEntryModel',
'StateHitEventLogEntryModel',
'StateCompleteEventLogEntryModel',
'LeaveForRefresherExplorationEventLogEntryModel',
'ExplorationStatsModel',
'ExplorationIssuesModel',
'LearnerAnswerDetailsModel',
'ExplorationAnnotationsModel',
'StateAnswersModel',
'StateAnswersCalcOutputModel',
}
class StorageModelAuditJobsTest(test_utils.GenericTestBase):
"""Tests for Oppia storage model audit jobs."""
def test_all_models_have_audit_jobs(self):
all_model_module_names = []
# As models.NAMES is an enum, it cannot be iterated. So we use the
# __dict__ property which can be iterated.
for name in models.NAMES.__dict__:
if '__' not in name:
all_model_module_names.append(name)
names_of_ndb_model_subclasses = []
for module_name in all_model_module_names:
# We skip base models since there are no specific audit jobs
# for base models. The audit jobs for subclasses of base models
# cover the test cases for base models, so extra audit jobs
# for base models are not required.
if module_name == 'base_model':
continue
(module, ) = models.Registry.import_models([module_name])
for member_name, member_obj in inspect.getmembers(module):
if inspect.isclass(member_obj):
clazz = getattr(module, member_name)
if clazz.__name__ in MODEL_CLASS_NAMES_TO_EXCLUDE:
continue
all_base_classes = [
base_class.__name__ for base_class in inspect.getmro(
clazz)]
if 'Model' in all_base_classes:
names_of_ndb_model_subclasses.append(clazz.__name__)
names_of_all_audit_job_classes = (
prod_validation_jobs_one_off.ProdValidationAuditOneOffJobMetaClass
.get_model_audit_job_names())
model_class_names_with_missing_audit_jobs = [
model_class_name
for model_class_name in names_of_ndb_model_subclasses if (
model_class_name + 'AuditOneOffJob' not in (
names_of_all_audit_job_classes))]
self.assertFalse(
model_class_names_with_missing_audit_jobs,
msg=(
'Following model classes do not have an audit job: %s' % (
(', ').join(model_class_names_with_missing_audit_jobs))))
| apache-2.0 |
seanfisk/powerline | powerline/lint/markedjson/parser.py | 37 | 8124 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline.lint.markedjson.error import MarkedError
from powerline.lint.markedjson import tokens
from powerline.lint.markedjson import events
class ParserError(MarkedError):
pass
class Parser:
def __init__(self):
self.current_event = None
self.yaml_version = None
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = events.StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(tokens.StreamEndToken):
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = events.DocumentStartEvent(start_mark, end_mark, explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse an explicit document.
if not self.check_token(tokens.StreamEndToken):
token = self.peek_token()
self.echoerr(
None, None,
('expected \'<stream end>\', but found %r' % token.id), token.start_mark
)
return events.StreamEndEvent(token.start_mark, token.end_mark)
else:
# Parse the end of the stream.
token = self.get_token()
event = events.StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
event = events.DocumentEndEvent(start_mark, end_mark, explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
return self.parse_node()
def parse_node(self, indentless_sequence=False):
start_mark = end_mark = None
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = True
if self.check_token(tokens.ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if token.plain:
implicit = (True, False)
else:
implicit = (False, True)
event = events.ScalarEvent(implicit, token.value, start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(tokens.FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = events.SequenceStartEvent(implicit, start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(tokens.FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = events.MappingStartEvent(implicit, start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
else:
token = self.peek_token()
raise ParserError(
'while parsing a flow node', start_mark,
'expected the node content, but found %r' % token.id,
token.start_mark
)
return event
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(tokens.FlowSequenceEndToken):
if not first:
if self.check_token(tokens.FlowEntryToken):
self.get_token()
if self.check_token(tokens.FlowSequenceEndToken):
token = self.peek_token()
self.echoerr(
'While parsing a flow sequence', self.marks[-1],
('expected sequence value, but got %r' % token.id), token.start_mark
)
else:
token = self.peek_token()
raise ParserError(
'while parsing a flow sequence', self.marks[-1],
('expected \',\' or \']\', but got %r' % token.id), token.start_mark
)
if not self.check_token(tokens.FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_node()
token = self.get_token()
event = events.SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return events.MappingEndEvent(token.start_mark, token.start_mark)
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(tokens.FlowMappingEndToken):
if not first:
if self.check_token(tokens.FlowEntryToken):
self.get_token()
if self.check_token(tokens.FlowMappingEndToken):
token = self.peek_token()
self.echoerr(
'While parsing a flow mapping', self.marks[-1],
('expected mapping key, but got %r' % token.id), token.start_mark
)
else:
token = self.peek_token()
raise ParserError(
'while parsing a flow mapping', self.marks[-1],
('expected \',\' or \'}\', but got %r' % token.id), token.start_mark
)
if self.check_token(tokens.KeyToken):
token = self.get_token()
if not self.check_token(tokens.ValueToken, tokens.FlowEntryToken, tokens.FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_node()
else:
token = self.peek_token()
raise ParserError(
'while parsing a flow mapping', self.marks[-1],
('expected value, but got %r' % token.id), token.start_mark
)
elif not self.check_token(tokens.FlowMappingEndToken):
token = self.peek_token()
expect_key = self.check_token(tokens.ValueToken, tokens.FlowEntryToken)
if not expect_key:
self.get_token()
expect_key = self.check_token(tokens.ValueToken)
if expect_key:
raise ParserError(
'while parsing a flow mapping', self.marks[-1],
('expected string key, but got %r' % token.id), token.start_mark
)
else:
token = self.peek_token()
raise ParserError(
'while parsing a flow mapping', self.marks[-1],
('expected \':\', but got %r' % token.id), token.start_mark
)
token = self.get_token()
event = events.MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(tokens.ValueToken):
token = self.get_token()
if not self.check_token(tokens.FlowEntryToken, tokens.FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_node()
token = self.peek_token()
raise ParserError(
'while parsing a flow mapping', self.marks[-1],
('expected mapping value, but got %r' % token.id), token.start_mark
)
| mit |
mdavid/teams | markdown/extensions/tables.py | 65 | 3229 | """
Tables Extension for Python-Markdown
====================================
Added parsing of tables to Python-Markdown.
A simple example:
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
Copyright 2009 - [Waylan Limberg](http://achinghead.com)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor
from ..util import etree
class TableProcessor(BlockProcessor):
""" Process Tables. """
def test(self, parent, block):
rows = block.split('\n')
return (len(rows) > 2 and '|' in rows[0] and
'|' in rows[1] and '-' in rows[1] and
rows[1].strip()[0] in ['|', ':', '-'])
def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header.startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(seperator, border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row.strip(), tbody, align, border)
def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
if a:
c.set('align', a)
def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|')
class TableExtension(Extension):
""" Add tables to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader')
def makeExtension(configs={}):
return TableExtension(configs=configs)
| agpl-3.0 |
LLNL/spack | lib/spack/spack/test/pattern.py | 5 | 2078 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import spack.util.pattern as pattern
@pytest.fixture()
def interface():
"""Returns the interface class for the composite."""
class Base:
counter = 0
def add(self):
raise NotImplementedError('add not implemented')
def subtract(self):
raise NotImplementedError('subtract not implemented')
return Base
@pytest.fixture()
def implementation(interface):
"""Returns an implementation of the interface"""
class Implementation(interface):
def __init__(self, value):
self.value = value
def add(self):
interface.counter += self.value
def subtract(self):
interface.counter -= self.value
return Implementation
@pytest.fixture(params=[
'interface',
'method_list'
])
def composite(interface, implementation, request):
"""Returns a composite that contains an instance of `implementation(1)`
and one of `implementation(2)`.
"""
if request.param == 'interface':
@pattern.composite(interface=interface)
class Composite:
pass
else:
@pattern.composite(method_list=['add', 'subtract'])
class Composite:
pass
c = Composite()
c.append(implementation(1))
c.append(implementation(2))
return c
def test_composite_interface_calls(interface, composite):
composite.add()
assert interface.counter == 3
composite.pop()
composite.subtract()
assert interface.counter == 2
def test_composite_wrong_container(interface):
with pytest.raises(TypeError):
@pattern.composite(interface=interface, container=2)
class CompositeFromInterface:
pass
def test_composite_no_methods():
with pytest.raises(TypeError):
@pattern.composite()
class CompositeFromInterface:
pass
| lgpl-2.1 |
niknow/scipy | scipy/fftpack/setup.py | 102 | 1514 | #!/usr/bin/env python
# Created by Pearu Peterson, August 2002
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fftpack',parent_package, top_path)
config.add_data_dir('tests')
dfftpack_src = [join('src/dfftpack','*.f')]
config.add_library('dfftpack', sources=dfftpack_src)
fftpack_src = [join('src/fftpack','*.f')]
config.add_library('fftpack', sources=fftpack_src)
sources = ['fftpack.pyf','src/zfft.c','src/drfft.c','src/zrfft.c',
'src/zfftnd.c', 'src/dct.c.src', 'src/dst.c.src']
config.add_extension('_fftpack',
sources=sources,
libraries=['dfftpack', 'fftpack'],
include_dirs=['src'],
depends=(dfftpack_src + fftpack_src))
config.add_extension('convolve',
sources=['convolve.pyf','src/convolve.c'],
libraries=['dfftpack'],
depends=dfftpack_src,
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
from fftpack_version import fftpack_version
setup(version=fftpack_version,
description='fftpack - Discrete Fourier Transform package',
author='Pearu Peterson',
author_email='pearu@cens.ioc.ee',
maintainer_email='scipy-dev@scipy.org',
license='SciPy License (BSD Style)',
**configuration(top_path='').todict())
| bsd-3-clause |
bperez77/ensemble_colorization | server/myproject/settings.py | 2 | 3190 | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-q@x+fbn4vl-+qs!*a=+(u%j1w76z_(7re-1*b+yb&a+rj=-&+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myproject.myapp'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'myproject', 'myapp', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| mit |
JioEducation/edx-platform | common/test/acceptance/pages/lms/create_mode.py | 148 | 2538 | """Mode creation page (used to add modes to courses during testing)."""
import re
import urllib
from bok_choy.page_object import PageObject
from . import BASE_URL
class ModeCreationPage(PageObject):
"""The mode creation page.
When allowed by the Django settings file, visiting this page allows modes to be
created for an existing course.
"""
def __init__(self, browser, course_id, mode_slug=None, mode_display_name=None, min_price=None, suggested_prices=None, currency=None):
"""The mode creation page is an endpoint for HTTP GET requests.
By default, it will create an 'honor' mode for the given course with display name
'Honor Code', a minimum price of 0, no suggested prices, and using USD as the currency.
Arguments:
browser (Browser): The browser instance.
course_id (unicode): The ID of the course for which modes are to be created.
Keyword Arguments:
mode_slug (str): The mode to add, either 'honor', 'verified', or 'professional'
mode_display_name (str): Describes the new course mode
min_price (int): The minimum price a user must pay to enroll in the new course mode
suggested_prices (str): Comma-separated prices to suggest to the user.
currency (str): The currency in which to list prices.
"""
super(ModeCreationPage, self).__init__(browser)
self._course_id = course_id
self._parameters = {}
if mode_slug is not None:
self._parameters['mode_slug'] = mode_slug
if mode_display_name is not None:
self._parameters['mode_display_name'] = mode_display_name
if min_price is not None:
self._parameters['min_price'] = min_price
if suggested_prices is not None:
self._parameters['suggested_prices'] = suggested_prices
if currency is not None:
self._parameters['currency'] = currency
@property
def url(self):
"""Construct the mode creation URL."""
url = '{base}/course_modes/create_mode/{course_id}/'.format(
base=BASE_URL,
course_id=self._course_id
)
query_string = urllib.urlencode(self._parameters)
if query_string:
url += '?' + query_string
return url
def is_browser_on_page(self):
message = self.q(css='BODY').text[0]
match = re.search(r'Mode ([^$]+) created for ([^$]+).$', message)
return True if match else False
| agpl-3.0 |
sxjscience/mxnet | python/mxnet/numpy_extension/utils.py | 3 | 7986 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util functions for the numpy module."""
import ctypes
from ..util import is_np_array, is_np_shape
from ..base import _LIB, check_call, string_types, c_str_array
from ..base import c_handle_array, c_str, mx_uint, NDArrayHandle, py_str
from ..dlpack import ndarray_to_dlpack_for_read, ndarray_to_dlpack_for_write
from ..dlpack import ndarray_from_dlpack, ndarray_from_numpy
from ..numpy import ndarray, array
__all__ = ['save', 'load', 'to_dlpack_for_read', 'to_dlpack_for_write',
'from_dlpack', 'from_numpy']
def save(file, arr):
"""Saves a list of `ndarray`s or a dict of `str`->`ndarray` to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
file : str
Filename to which the data is saved.
arr : `ndarray` or list of `ndarray`s or dict of `str` to `ndarray`
The data to be saved.
Notes
-----
This function can only be called within numpy semantics, i.e., `npx.is_np_shape()`
and `npx.is_np_array()` must both return true.
"""
if not (is_np_shape() and is_np_array()):
raise ValueError('Cannot save `mxnet.numpy.ndarray` in legacy mode. Please activate'
' numpy semantics by calling `npx.set_np()` in the global scope'
' before calling this function.')
if isinstance(arr, ndarray):
arr = [arr]
if isinstance(arr, dict):
str_keys = arr.keys()
nd_vals = arr.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, ndarray) for v in nd_vals):
raise TypeError('Only accepts dict str->ndarray or list of ndarrays')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(arr, list):
if any(not isinstance(v, ndarray) for v in arr):
raise TypeError('Only accepts dict str->ndarray or list of ndarrays')
keys = None
handles = c_handle_array(arr)
else:
raise ValueError("data needs to either be a ndarray, dict of (str, ndarray) pairs "
"or a list of ndarrays.")
check_call(_LIB.MXNDArraySave(c_str(file),
mx_uint(len(handles)),
handles,
keys))
def load(file):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
file : str
The filename.
Returns
-------
result : list of ndarrays or dict of str -> ndarray
Data stored in the file.
Notes
-----
This function can only be called within numpy semantics, i.e., `npx.is_np_shape()`
and `npx.is_np_array()` must both return true.
"""
if not (is_np_shape() and is_np_array()):
raise ValueError('Cannot load `mxnet.numpy.ndarray` in legacy mode. Please activate'
' numpy semantics by calling `npx.set_np()` in the global scope'
' before calling this function.')
if not isinstance(file, string_types):
raise TypeError('file required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(file),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [ndarray(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), ndarray(NDArrayHandle(handles[i])))
for i in range(out_size.value))
from_dlpack = ndarray_from_dlpack(ndarray)
from_dlpack_doc = """Returns a np.ndarray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
np.ndarray
an ndarray backed by a dlpack tensor
Examples
--------
>>> x = mx.np.ones((2,3))
>>> y = mx.npx.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.npx.from_dlpack(y)
>>> type(z)
<class 'mxnet.numpy.ndarray'>
>>> z
array([[1., 1., 1.],
[1., 1., 1.]])
>>> w = mx.npx.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.npx.from_dlpack(w)
>>> u += 1
>>> x
array([[2., 2., 2.],
[2., 2., 2.]])
"""
from_dlpack.__doc__ = from_dlpack_doc
from_numpy = ndarray_from_numpy(ndarray, array)
from_numpy_doc = """Returns an MXNet's np.ndarray backed by numpy's ndarray.
When `zero_copy` is set to be true,
this API consumes numpy's ndarray and produces MXNet's np.ndarray
without having to copy the content. In this case, we disallow
users to modify the given numpy ndarray, and it is suggested
not to read the numpy ndarray as well for internal correctness.
Parameters
----------
ndarray: np.ndarray
input data
zero_copy: bool
Whether we use DLPack's zero-copy conversion to convert to MXNet's
np.ndarray.
This is only available for c-contiguous arrays, i.e. array.flags[C_CONTIGUOUS] == True.
Returns
-------
np.ndarray
a np.ndarray backed by a dlpack tensor
"""
from_numpy.__doc__ = from_numpy_doc
to_dlpack_for_read = ndarray_to_dlpack_for_read()
to_dlpack_for_read_doc = """Returns a reference view of np.ndarray that represents
as DLManagedTensor until all previous write operations on the current array are finished.
Parameters
----------
data: np.ndarray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of ndarray that represents as DLManagedTensor.
Examples
--------
>>> x = mx.np.ones((2,3))
>>> y = mx.npx.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.npx.from_dlpack(y)
>>> z
array([[1., 1., 1.],
[1., 1., 1.]])
"""
to_dlpack_for_read.__doc__ = to_dlpack_for_read_doc
to_dlpack_for_write = ndarray_to_dlpack_for_write()
to_dlpack_for_write_doc = """Returns a reference view of ndarray that represents
as DLManagedTensor until all previous read/write operations on the current array are finished.
Parameters
----------
data: np.ndarray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of np.ndarray that represents as DLManagedTensor.
Examples
--------
>>> x = mx.np.ones((2,3))
>>> w = mx.npx.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.npx.from_dlpack(w)
>>> u += 1
>>> x
array([[2., 2., 2.],
[2., 2., 2.]])
"""
to_dlpack_for_write.__doc__ = to_dlpack_for_write_doc
| apache-2.0 |
wuhengzhi/chromium-crosswalk | build/print_python_deps.py | 8 | 3498 | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints all non-system dependencies for the given module.
The primary use-case for this script is to genererate the list of python modules
required for .isolate files.
"""
import argparse
import imp
import os
import pipes
import sys
# Don't use any helper modules, or else they will end up in the results.
_SRC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def _ComputePythonDependencies():
"""Gets the paths of imported non-system python modules.
A path is assumed to be a "system" import if it is outside of chromium's
src/. The paths will be relative to the current directory.
"""
module_paths = (m.__file__ for m in sys.modules.values()
if m and hasattr(m, '__file__'))
src_paths = set()
for path in module_paths:
if path == __file__:
continue
path = os.path.abspath(path)
if not path.startswith(_SRC_ROOT):
continue
if path.endswith('.pyc'):
path = path[:-1]
src_paths.add(path)
return src_paths
def _NormalizeCommandLine(options):
"""Returns a string that when run from SRC_ROOT replicates the command."""
args = ['build/print_python_deps.py']
root = os.path.relpath(options.root, _SRC_ROOT)
if root != '.':
args.extend(('--root', root))
if options.output:
args.extend(('--output', os.path.relpath(options.output, _SRC_ROOT)))
for whitelist in sorted(options.whitelists):
args.extend(('--whitelist', os.path.relpath(whitelist, _SRC_ROOT)))
args.append(os.path.relpath(options.module, _SRC_ROOT))
return ' '.join(pipes.quote(x) for x in args)
def _FindPythonInDirectory(directory):
"""Returns an iterable of all non-test python files in the given directory."""
files = []
for root, _dirnames, filenames in os.walk(directory):
for filename in filenames:
if filename.endswith('.py') and not filename.endswith('_test.py'):
yield os.path.join(root, filename)
def main():
parser = argparse.ArgumentParser(
description='Prints all non-system dependencies for the given module.')
parser.add_argument('module',
help='The python module to analyze.')
parser.add_argument('--root', default='.',
help='Directory to make paths relative to.')
parser.add_argument('--output',
help='Write output to a file rather than stdout.')
parser.add_argument('--whitelist', default=[], action='append',
dest='whitelists',
help='Recursively include all non-test python files '
'within this directory. May be specified multiple times.')
options = parser.parse_args()
sys.path.append(os.path.dirname(options.module))
imp.load_source('NAME', options.module)
paths_set = _ComputePythonDependencies()
for path in options.whitelists:
paths_set.update(os.path.abspath(p) for p in _FindPythonInDirectory(path))
paths = [os.path.relpath(p, options.root) for p in paths_set]
normalized_cmdline = _NormalizeCommandLine(options)
out = open(options.output, 'w') if options.output else sys.stdout
with out:
out.write('# Generated by running:\n')
out.write('# %s\n' % normalized_cmdline)
for path in sorted(paths):
out.write(path + '\n')
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
yigong/AY250 | hw2/virt_env/virt1/lib/python2.7/distutils/__init__.py | 1211 | 3983 | import os
import sys
import warnings
import imp
import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib
# Important! To work on pypy, this must be a module that resides in the
# lib-python/modified-x.y.z directory
dirname = os.path.dirname
distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
warnings.warn(
"The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
else:
__path__.insert(0, distutils_path)
real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ('', '', imp.PKG_DIRECTORY))
# Copy the relevant attributes
try:
__revision__ = real_distutils.__revision__
except AttributeError:
pass
__version__ = real_distutils.__version__
from distutils import dist, sysconfig
try:
basestring
except NameError:
basestring = str
## patch build_ext (distutils doesn't know how to get the libs directory
## path on windows - it hardcodes the paths around the patched sys.prefix)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext as old_build_ext
class build_ext(old_build_ext):
def finalize_options (self):
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, basestring):
self.library_dirs = self.library_dirs.split(os.pathsep)
self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
old_build_ext.finalize_options(self)
from distutils.command import build_ext as build_ext_module
build_ext_module.build_ext = build_ext
## distutils.dist patches:
old_find_config_files = dist.Distribution.find_config_files
def find_config_files(self):
found = old_find_config_files(self)
system_distutils = os.path.join(distutils_path, 'distutils.cfg')
#if os.path.exists(system_distutils):
# found.insert(0, system_distutils)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
user_filename = os.path.join(sys.prefix, user_filename)
if os.path.isfile(user_filename):
for item in list(found):
if item.endswith('pydistutils.cfg'):
found.remove(item)
found.append(user_filename)
return found
dist.Distribution.find_config_files = find_config_files
## distutils.sysconfig patches:
old_get_python_inc = sysconfig.get_python_inc
def sysconfig_get_python_inc(plat_specific=0, prefix=None):
if prefix is None:
prefix = sys.real_prefix
return old_get_python_inc(plat_specific, prefix)
sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
sysconfig.get_python_inc = sysconfig_get_python_inc
old_get_python_lib = sysconfig.get_python_lib
def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
if standard_lib and prefix is None:
prefix = sys.real_prefix
return old_get_python_lib(plat_specific, standard_lib, prefix)
sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
sysconfig.get_python_lib = sysconfig_get_python_lib
old_get_config_vars = sysconfig.get_config_vars
def sysconfig_get_config_vars(*args):
real_vars = old_get_config_vars(*args)
if sys.platform == 'win32':
lib_dir = os.path.join(sys.real_prefix, "libs")
if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars:
real_vars['LIBDIR'] = lib_dir # asked for all
elif isinstance(real_vars, list) and 'LIBDIR' in args:
real_vars = real_vars + [lib_dir] # asked for list
return real_vars
sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
sysconfig.get_config_vars = sysconfig_get_config_vars
| mit |
ericbaze/continuum_code_2012 | pydata/moin/pythonenv/local/lib/python2.7/encodings/mac_farsi.py | 593 | 15426 | """ Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-farsi',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE, left-right
u'!' # 0x21 -> EXCLAMATION MARK, left-right
u'"' # 0x22 -> QUOTATION MARK, left-right
u'#' # 0x23 -> NUMBER SIGN, left-right
u'$' # 0x24 -> DOLLAR SIGN, left-right
u'%' # 0x25 -> PERCENT SIGN, left-right
u'&' # 0x26 -> AMPERSAND, left-right
u"'" # 0x27 -> APOSTROPHE, left-right
u'(' # 0x28 -> LEFT PARENTHESIS, left-right
u')' # 0x29 -> RIGHT PARENTHESIS, left-right
u'*' # 0x2A -> ASTERISK, left-right
u'+' # 0x2B -> PLUS SIGN, left-right
u',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
u'-' # 0x2D -> HYPHEN-MINUS, left-right
u'.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
u'/' # 0x2F -> SOLIDUS, left-right
u'0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE
u'2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO
u'3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE
u':' # 0x3A -> COLON, left-right
u';' # 0x3B -> SEMICOLON, left-right
u'<' # 0x3C -> LESS-THAN SIGN, left-right
u'=' # 0x3D -> EQUALS SIGN, left-right
u'>' # 0x3E -> GREATER-THAN SIGN, left-right
u'?' # 0x3F -> QUESTION MARK, left-right
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET, left-right
u'\\' # 0x5C -> REVERSE SOLIDUS, left-right
u']' # 0x5D -> RIGHT SQUARE BRACKET, left-right
u'^' # 0x5E -> CIRCUMFLEX ACCENT, left-right
u'_' # 0x5F -> LOW LINE, left-right
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET, left-right
u'|' # 0x7C -> VERTICAL LINE, left-right
u'}' # 0x7D -> RIGHT CURLY BRACKET, left-right
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xa0' # 0x81 -> NO-BREAK SPACE, right-left
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA
u'\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0x9B -> DIVISION SIGN, right-left
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u' ' # 0xA0 -> SPACE, right-left
u'!' # 0xA1 -> EXCLAMATION MARK, right-left
u'"' # 0xA2 -> QUOTATION MARK, right-left
u'#' # 0xA3 -> NUMBER SIGN, right-left
u'$' # 0xA4 -> DOLLAR SIGN, right-left
u'\u066a' # 0xA5 -> ARABIC PERCENT SIGN
u'&' # 0xA6 -> AMPERSAND, right-left
u"'" # 0xA7 -> APOSTROPHE, right-left
u'(' # 0xA8 -> LEFT PARENTHESIS, right-left
u')' # 0xA9 -> RIGHT PARENTHESIS, right-left
u'*' # 0xAA -> ASTERISK, right-left
u'+' # 0xAB -> PLUS SIGN, right-left
u'\u060c' # 0xAC -> ARABIC COMMA
u'-' # 0xAD -> HYPHEN-MINUS, right-left
u'.' # 0xAE -> FULL STOP, right-left
u'/' # 0xAF -> SOLIDUS, right-left
u'\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override)
u'\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override)
u'\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override)
u'\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override)
u'\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override)
u'\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override)
u'\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override)
u'\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override)
u'\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override)
u'\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override)
u':' # 0xBA -> COLON, right-left
u'\u061b' # 0xBB -> ARABIC SEMICOLON
u'<' # 0xBC -> LESS-THAN SIGN, right-left
u'=' # 0xBD -> EQUALS SIGN, right-left
u'>' # 0xBE -> GREATER-THAN SIGN, right-left
u'\u061f' # 0xBF -> ARABIC QUESTION MARK
u'\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
u'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
u'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0xC7 -> ARABIC LETTER ALEF
u'\u0628' # 0xC8 -> ARABIC LETTER BEH
u'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0xCA -> ARABIC LETTER TEH
u'\u062b' # 0xCB -> ARABIC LETTER THEH
u'\u062c' # 0xCC -> ARABIC LETTER JEEM
u'\u062d' # 0xCD -> ARABIC LETTER HAH
u'\u062e' # 0xCE -> ARABIC LETTER KHAH
u'\u062f' # 0xCF -> ARABIC LETTER DAL
u'\u0630' # 0xD0 -> ARABIC LETTER THAL
u'\u0631' # 0xD1 -> ARABIC LETTER REH
u'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
u'\u0633' # 0xD3 -> ARABIC LETTER SEEN
u'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
u'\u0635' # 0xD5 -> ARABIC LETTER SAD
u'\u0636' # 0xD6 -> ARABIC LETTER DAD
u'\u0637' # 0xD7 -> ARABIC LETTER TAH
u'\u0638' # 0xD8 -> ARABIC LETTER ZAH
u'\u0639' # 0xD9 -> ARABIC LETTER AIN
u'\u063a' # 0xDA -> ARABIC LETTER GHAIN
u'[' # 0xDB -> LEFT SQUARE BRACKET, right-left
u'\\' # 0xDC -> REVERSE SOLIDUS, right-left
u']' # 0xDD -> RIGHT SQUARE BRACKET, right-left
u'^' # 0xDE -> CIRCUMFLEX ACCENT, right-left
u'_' # 0xDF -> LOW LINE, right-left
u'\u0640' # 0xE0 -> ARABIC TATWEEL
u'\u0641' # 0xE1 -> ARABIC LETTER FEH
u'\u0642' # 0xE2 -> ARABIC LETTER QAF
u'\u0643' # 0xE3 -> ARABIC LETTER KAF
u'\u0644' # 0xE4 -> ARABIC LETTER LAM
u'\u0645' # 0xE5 -> ARABIC LETTER MEEM
u'\u0646' # 0xE6 -> ARABIC LETTER NOON
u'\u0647' # 0xE7 -> ARABIC LETTER HEH
u'\u0648' # 0xE8 -> ARABIC LETTER WAW
u'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0xEA -> ARABIC LETTER YEH
u'\u064b' # 0xEB -> ARABIC FATHATAN
u'\u064c' # 0xEC -> ARABIC DAMMATAN
u'\u064d' # 0xED -> ARABIC KASRATAN
u'\u064e' # 0xEE -> ARABIC FATHA
u'\u064f' # 0xEF -> ARABIC DAMMA
u'\u0650' # 0xF0 -> ARABIC KASRA
u'\u0651' # 0xF1 -> ARABIC SHADDA
u'\u0652' # 0xF2 -> ARABIC SUKUN
u'\u067e' # 0xF3 -> ARABIC LETTER PEH
u'\u0679' # 0xF4 -> ARABIC LETTER TTEH
u'\u0686' # 0xF5 -> ARABIC LETTER TCHEH
u'\u06d5' # 0xF6 -> ARABIC LETTER AE
u'\u06a4' # 0xF7 -> ARABIC LETTER VEH
u'\u06af' # 0xF8 -> ARABIC LETTER GAF
u'\u0688' # 0xF9 -> ARABIC LETTER DDAL
u'\u0691' # 0xFA -> ARABIC LETTER RREH
u'{' # 0xFB -> LEFT CURLY BRACKET, right-left
u'|' # 0xFC -> VERTICAL LINE, right-left
u'}' # 0xFD -> RIGHT CURLY BRACKET, right-left
u'\u0698' # 0xFE -> ARABIC LETTER JEH
u'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
ppasq/geonode | geonode/themes/migrations/0001_initial.py | 3 | 3871 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-15 00:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GeoNodeThemeCustomization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(editable=False, max_length=255)),
('name', models.CharField(max_length=100)),
('date', models.DateTimeField(auto_now_add=True)),
('description', models.TextField(blank=True, null=True)),
('is_enabled', models.BooleanField(default=False)),
('logo', models.ImageField(blank=True, null=True, upload_to=b'img/%Y/%m')),
('jumbotron_bg', models.ImageField(blank=True, null=True, upload_to=b'img/%Y/%m')),
('jumbotron_welcome_hide', models.BooleanField(default=False)),
('jumbotron_welcome_title', models.CharField(blank=True, max_length=255, null=True)),
('jumbotron_welcome_content', models.TextField(blank=True, null=True)),
('jumbotron_site_description', models.TextField(blank=True, null=True)),
('body_color', models.CharField(default=b'#333333', max_length=10)),
('navbar_color', models.CharField(default=b'#333333', max_length=10)),
('jumbotron_color', models.CharField(default=b'#2c689c', max_length=10)),
('copyright_color', models.CharField(default=b'#2c689c', max_length=10)),
('contactus', models.BooleanField(default=False)),
('copyright', models.TextField(blank=True, null=True)),
('contact_name', models.TextField(blank=True, null=True)),
('contact_position', models.TextField(blank=True, null=True)),
('contact_administrative_area', models.TextField(blank=True, null=True)),
('contact_street', models.TextField(blank=True, null=True)),
('contact_postal_code', models.TextField(blank=True, null=True)),
('contact_city', models.TextField(blank=True, null=True)),
('contact_country', models.TextField(blank=True, null=True)),
('contact_delivery_point', models.TextField(blank=True, null=True)),
('contact_voice', models.TextField(blank=True, null=True)),
('contact_facsimile', models.TextField(blank=True, null=True)),
('contact_email', models.TextField(blank=True, null=True)),
('partners_title', models.CharField(blank=True, default=b'Our Partners', max_length=100, null=True)),
],
options={
'ordering': ('date',),
'verbose_name_plural': 'Themes',
},
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('logo', models.ImageField(blank=True, null=True, upload_to=b'img/%Y/%m')),
('name', models.CharField(max_length=100)),
('title', models.CharField(max_length=255)),
('href', models.CharField(max_length=255)),
],
options={
'ordering': ('name',),
'verbose_name_plural': 'Partners',
},
),
migrations.AddField(
model_name='geonodethemecustomization',
name='partners',
field=models.ManyToManyField(blank=True, related_name='partners', to='geonode_themes.Partner'),
),
]
| gpl-3.0 |
pombredanne/algos-urv | django/contrib/auth/backends.py | 9 | 4446 | from django.db import connection
from django.contrib.auth.models import User, Permission
class ModelBackend(object):
"""
Authenticates against django.contrib.auth.models.User.
"""
supports_object_permissions = False
supports_anonymous_user = True
# TODO: Model, login attribute name and password attribute name should be
# configurable.
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_group_permissions(self, user_obj):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if not hasattr(user_obj, '_group_perm_cache'):
perms = Permission.objects.filter(group__user=user_obj
).values_list('content_type__app_label', 'codename'
).order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms])
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj):
if user_obj.is_anonymous():
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set([u"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()])
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm):
return perm in self.get_all_permissions(user_obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = User.objects.get_or_create(username=username)
if created:
user = self.configure_user(user)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
| bsd-3-clause |
edonyM/toolkitem | fileprocess/mergefile/packages/__init__.py | 1 | 1096 | # -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-05-12 16:43
#
# Filename: __init__.py
#
# Description: All Rights Are Reserved
#
"""
from __future__ import absolute_import
from . import filesline
| mit |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/mpl_toolkits/axes_grid1/anchored_artists.py | 10 | 12803 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import docstring
from matplotlib.offsetbox import (AnchoredOffsetbox, AnchoredText,
AnnotationBbox, AuxTransformBox, DrawingArea,
TextArea, VPacker)
from matplotlib.patches import Rectangle, Ellipse
class AnchoredDrawingArea(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, width, height, xdescent, ydescent,
loc, pad=0.4, borderpad=0.5, prop=None, frameon=True,
**kwargs):
"""
An anchored container with a fixed size and fillable DrawingArea.
Artists added to the *drawing_area* will have their coordinates
interpreted as pixels. Any transformations set on the artists will be
overridden.
Parameters
----------
width, height : int or float
width and height of the container, in pixels.
xdescent, ydescent : int or float
descent of the container in the x- and y- direction, in pixels.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.DrawingArea`
A container for artists to display.
Examples
--------
To display blue and red circles of different sizes in the upper right
of an axes *ax*:
>>> ada = AnchoredDrawingArea(20, 20, 0, 0, loc=1, frameon=False)
>>> ada.drawing_area.add_artist(Circle((10, 10), 10, fc="b"))
>>> ada.drawing_area.add_artist(Circle((30, 10), 5, fc="r"))
>>> ax.add_artist(ada)
"""
self.da = DrawingArea(width, height, xdescent, ydescent)
self.drawing_area = self.da
super(AnchoredDrawingArea, self).__init__(
loc, pad=pad, borderpad=borderpad, child=self.da, prop=None,
frameon=frameon, **kwargs
)
class AnchoredAuxTransformBox(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, loc,
pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs):
"""
An anchored container with transformed coordinates.
Artists added to the *drawing_area* are scaled according to the
coordinates of the transformation used. The dimensions of this artist
will scale to contain the artists added.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.AuxTransformBox`
A container for artists to display.
Examples
--------
To display an ellipse in the upper left, with a width of 0.1 and
height of 0.4 in data coordinates:
>>> box = AnchoredAuxTransformBox(ax.transData, loc=2)
>>> el = Ellipse((0,0), width=0.1, height=0.4, angle=30)
>>> box.drawing_area.add_artist(el)
>>> ax.add_artist(box)
"""
self.drawing_area = AuxTransformBox(transform)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self.drawing_area,
prop=prop,
frameon=frameon,
**kwargs)
class AnchoredEllipse(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, width, height, angle, loc,
pad=0.1, borderpad=0.1, prop=None, frameon=True, **kwargs):
"""
Draw an anchored ellipse of a given size.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
width, height : int or float
Width and height of the ellipse, given in coordinates of
*transform*.
angle : int or float
Rotation of the ellipse, in degrees, anti-clockwise.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the ellipse, in fraction of the font size. Defaults
to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size. Defaults to 0.1.
frameon : bool, optional
If True, draw a box around the ellipse. Defaults to True.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
ellipse : `matplotlib.patches.Ellipse`
Ellipse patch drawn.
"""
self._box = AuxTransformBox(transform)
self.ellipse = Ellipse((0, 0), width, height, angle)
self._box.add_artist(self.ellipse)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=prop,
frameon=frameon, **kwargs)
class AnchoredSizeBar(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, size, label, loc,
pad=0.1, borderpad=0.1, sep=2,
frameon=True, size_vertical=0, color='black',
label_top=False, fontproperties=None,
**kwargs):
"""
Draw a horizontal scale bar with a center-aligned label underneath.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
size : int or float
Horizontal length of the size bar, given in coordinates of
*transform*.
label : str
Label to display.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the label and size bar, in fraction of the font
size. Defaults to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.1.
sep : int or float, optional
Seperation between the label and the size bar, in points.
Defaults to 2.
frameon : bool, optional
If True, draw a box around the horizontal bar and label.
Defaults to True.
size_vertical : int or float, optional
Vertical length of the size bar, given in coordinates of
*transform*. Defaults to 0.
color : str, optional
Color for the size bar and label.
Defaults to black.
label_top : bool, optional
If True, the label will be over the size bar.
Defaults to False.
fontproperties : `matplotlib.font_manager.FontProperties`, optional
Font properties for the label text.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
size_bar : `matplotlib.offsetbox.AuxTransformBox`
Container for the size bar.
txt_label : `matplotlib.offsetbox.TextArea`
Container for the label of the size bar.
Notes
-----
If *prop* is passed as a keyworded argument, but *fontproperties* is
not, then *prop* is be assumed to be the intended *fontproperties*.
Using both *prop* and *fontproperties* is not supported.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpl_toolkits.axes_grid1.anchored_artists import \
AnchoredSizeBar
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.random.random((10,10)))
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 data units', 4)
>>> ax.add_artist(bar)
>>> fig.show()
Using all the optional parameters
>>> import matplotlib.font_manager as fm
>>> fontprops = fm.FontProperties(size=14, family='monospace')
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 units', 4, pad=0.5, \
sep=5, borderpad=0.5, frameon=False, \
size_vertical=0.5, color='white', \
fontproperties=fontprops)
"""
self.size_bar = AuxTransformBox(transform)
self.size_bar.add_artist(Rectangle((0, 0), size, size_vertical,
fill=False, facecolor=color,
edgecolor=color))
if fontproperties is None and 'prop' in kwargs:
fontproperties = kwargs.pop('prop')
if fontproperties is None:
textprops = {'color': color}
else:
textprops = {'color': color, 'fontproperties': fontproperties}
self.txt_label = TextArea(
label,
minimumdescent=False,
textprops=textprops)
if label_top:
_box_children = [self.txt_label, self.size_bar]
else:
_box_children = [self.size_bar, self.txt_label]
self._box = VPacker(children=_box_children,
align="center",
pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=fontproperties,
frameon=frameon, **kwargs)
| bsd-3-clause |
hendradarwin/VTK | Examples/Rendering/Python/assembly.py | 42 | 2694 | #!/usr/bin/env python
# This example demonstrates the use of vtkAssembly. In an assembly,
# the motion of one actor affects the position of other actors.
import vtk
# Create four parts: a top level assembly (in this case, a
# vtkCylinder) and three primitives (using vtkSphereSource,
# vtkCubeSource, and vtkConeSource). Set up mappers and actors for
# each part of the assembly to carry information about material
# properties and associated geometry.
sphere = vtk.vtkSphereSource()
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphere.GetOutputPort())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor.SetOrigin(2, 1, 3)
sphereActor.RotateY(6)
sphereActor.SetPosition(2.25, 0, 0)
sphereActor.GetProperty().SetColor(1, 0, 1)
cube = vtk.vtkCubeSource()
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputConnection(cube.GetOutputPort())
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
cubeActor.SetPosition(0.0, .25, 0)
cubeActor.GetProperty().SetColor(0, 0, 1)
cone = vtk.vtkConeSource()
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.SetPosition(0, 0, .25)
coneActor.GetProperty().SetColor(0, 1, 0)
# top part of the assembly
cylinder = vtk.vtkCylinderSource()
cylinderMapper = vtk.vtkPolyDataMapper()
cylinderMapper.SetInputConnection(cylinder.GetOutputPort())
cylinderMapper.SetResolveCoincidentTopologyToPolygonOffset()
cylinderActor = vtk.vtkActor()
cylinderActor.SetMapper(cylinderMapper)
cylinderActor.GetProperty().SetColor(1, 0, 0)
# Create the assembly and add the 4 parts to it. Also set the origin,
# position and orientation in space.
assembly = vtk.vtkAssembly()
assembly.AddPart(cylinderActor)
assembly.AddPart(sphereActor)
assembly.AddPart(cubeActor)
assembly.AddPart(coneActor)
assembly.SetOrigin(5, 10, 15)
assembly.AddPosition(5, 0, 0)
assembly.RotateX(15)
# Create the Renderer, RenderWindow, and RenderWindowInteractor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(assembly)
ren.AddActor(coneActor)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(200, 200)
# Set up the camera to get a particular view of the scene
camera = vtk.vtkCamera()
camera.SetClippingRange(21.9464, 30.0179)
camera.SetFocalPoint(3.49221, 2.28844, -0.970866)
camera.SetPosition(3.49221, 2.28844, 24.5216)
camera.SetViewAngle(30)
camera.SetViewUp(0, 1, 0)
ren.SetActiveCamera(camera)
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause |
anneline/Bika-LIMS | bika/lims/utils/__init__.py | 1 | 13899 | from time import time
from AccessControl import ModuleSecurityInfo, allow_module
from bika.lims import logger
from bika.lims.browser import BrowserView
from DateTime import DateTime
from email import Encoders
from email.MIMEBase import MIMEBase
from plone.memoize import ram
from plone.registry.interfaces import IRegistry
from Products.Archetypes.public import DisplayList
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import safe_unicode
from zope.component import queryUtility
from zope.i18n import translate
from zope.i18n.locales import locales
import App
import Globals
import os
import re
import urllib2
ModuleSecurityInfo('email.Utils').declarePublic('formataddr')
allow_module('csv')
def to_utf8(text):
if text is None:
text = ''
return safe_unicode(text).encode('utf-8')
def to_unicode(text):
if text is None:
text = ''
return safe_unicode(text)
def t(i18n_msg):
"""Safely translate and convert to UTF8, any zope i18n msgid returned from
a bikaMessageFactory _
"""
return to_utf8(translate(i18n_msg))
# Wrapper for PortalTransport's sendmail - don't know why there sendmail
# method is marked private
ModuleSecurityInfo('Products.bika.utils').declarePublic('sendmail')
# Protected( Publish, 'sendmail')
def sendmail(portal, from_addr, to_addrs, msg):
mailspool = portal.portal_mailspool
mailspool.sendmail(from_addr, to_addrs, msg)
class js_log(BrowserView):
def __call__(self, message):
"""Javascript sends a string for us to place into the log.
"""
self.logger.info(message)
class js_err(BrowserView):
def __call__(self, message):
"""Javascript sends a string for us to place into the error log
"""
self.logger.error(message);
ModuleSecurityInfo('Products.bika.utils').declarePublic('printfile')
def printfile(portal, from_addr, to_addrs, msg):
""" set the path, then the cmd 'lpr filepath'
temp_path = 'C:/Zope2/Products/Bika/version.txt'
os.system('lpr "%s"' %temp_path)
"""
pass
def _cache_key_getUsers(method, context, roles=[], allow_empty=True):
key = time() // (60 * 60), roles, allow_empty
return key
@ram.cache(_cache_key_getUsers)
def getUsers(context, roles, allow_empty=True):
""" Present a DisplayList containing users in the specified
list of roles
"""
mtool = getToolByName(context, 'portal_membership')
pairs = allow_empty and [['', '']] or []
users = mtool.searchForMembers(roles=roles)
for user in users:
uid = user.getId()
fullname = user.getProperty('fullname')
if not fullname:
fullname = uid
pairs.append((uid, fullname))
pairs.sort(lambda x, y: cmp(x[1], y[1]))
return DisplayList(pairs)
def isActive(obj):
""" Check if obj is inactive or cancelled.
"""
wf = getToolByName(obj, 'portal_workflow')
if (hasattr(obj, 'inactive_state') and obj.inactive_state == 'inactive') or \
wf.getInfoFor(obj, 'inactive_state', 'active') == 'inactive':
return False
if (hasattr(obj, 'cancellation_state') and obj.inactive_state == 'cancelled') or \
wf.getInfoFor(obj, 'cancellation_state', 'active') == 'cancelled':
return False
return True
def formatDateQuery(context, date_id):
""" Obtain and reformat the from and to dates
into a date query construct
"""
from_date = context.REQUEST.get('%s_fromdate' % date_id, None)
if from_date:
from_date = from_date + ' 00:00'
to_date = context.REQUEST.get('%s_todate' % date_id, None)
if to_date:
to_date = to_date + ' 23:59'
date_query = {}
if from_date and to_date:
date_query = {'query': [from_date, to_date],
'range': 'min:max'}
elif from_date or to_date:
date_query = {'query': from_date or to_date,
'range': from_date and 'min' or 'max'}
return date_query
def formatDateParms(context, date_id):
""" Obtain and reformat the from and to dates
into a printable date parameter construct
"""
from_date = context.REQUEST.get('%s_fromdate' % date_id, None)
to_date = context.REQUEST.get('%s_todate' % date_id, None)
date_parms = {}
if from_date and to_date:
date_parms = 'from %s to %s' % (from_date, to_date)
elif from_date:
date_parms = 'from %s' % (from_date)
elif to_date:
date_parms = 'to %s' % (to_date)
return date_parms
def formatDuration(context, totminutes):
""" Format a time period in a usable manner: eg. 3h24m
"""
mins = totminutes % 60
hours = (totminutes - mins) / 60
if mins:
mins_str = '%sm' % mins
else:
mins_str = ''
if hours:
hours_str = '%sh' % hours
else:
hours_str = ''
return '%s%s' % (hours_str, mins_str)
def formatDecimalMark(value, decimalmark='.'):
""" Dummy method to replace decimal mark from an input string.
Assumes that 'value' uses '.' as decimal mark and ',' as
thousand mark.
"""
rawval = value
if decimalmark == ',':
rawval = rawval.replace('.', '[comma]')
rawval = rawval.replace(',', '.')
rawval = rawval.replace('[comma]', ',')
return rawval
# encode_header function copied from roundup's rfc2822 package.
hqre = re.compile(r'^[A-z0-9!"#$%%&\'()*+,-./:;<=>?@\[\]^_`{|}~ ]+$')
ModuleSecurityInfo('Products.bika.utils').declarePublic('encode_header')
def encode_header(header, charset='utf-8'):
""" Will encode in quoted-printable encoding only if header
contains non latin characters
"""
# Return empty headers unchanged
if not header:
return header
# return plain header if it does not contain non-ascii characters
if hqre.match(header):
return header
quoted = ''
# max_encoded = 76 - len(charset) - 7
for c in header:
# Space may be represented as _ instead of =20 for readability
if c == ' ':
quoted += '_'
# These characters can be included verbatim
elif hqre.match(c):
quoted += c
# Otherwise, replace with hex value like =E2
else:
quoted += "=%02X" % ord(c)
return '=?%s?q?%s?=' % (charset, quoted)
def zero_fill(matchobj):
return matchobj.group().zfill(8)
num_sort_regex = re.compile('\d+')
ModuleSecurityInfo('Products.bika.utils').declarePublic('sortable_title')
def sortable_title(portal, title):
"""Convert title to sortable title
"""
if not title:
return ''
def_charset = portal.plone_utils.getSiteEncoding()
sortabletitle = title.lower().strip()
# Replace numbers with zero filled numbers
sortabletitle = num_sort_regex.sub(zero_fill, sortabletitle)
# Truncate to prevent bloat
for charset in [def_charset, 'latin-1', 'utf-8']:
try:
sortabletitle = safe_unicode(sortabletitle, charset)[:30]
sortabletitle = sortabletitle.encode(def_charset or 'utf-8')
break
except UnicodeError:
pass
except TypeError:
# If we get a TypeError if we already have a unicode string
sortabletitle = sortabletitle[:30]
break
return sortabletitle
def logged_in_client(context, member=None):
if not member:
membership_tool = getToolByName(context, 'portal_membership')
member = membership_tool.getAuthenticatedMember()
client = None
groups_tool = context.portal_groups
member_groups = [groups_tool.getGroupById(group.id).getGroupName()
for group in groups_tool.getGroupsByUserId(member.id)]
if 'Clients' in member_groups:
for obj in context.clients.objectValues("Client"):
if member.id in obj.users_with_local_role('Owner'):
client = obj
return client
def changeWorkflowState(content, wf_id, state_id, acquire_permissions=False,
portal_workflow=None, **kw):
"""Change the workflow state of an object
@param content: Content obj which state will be changed
@param state_id: name of the state to put on content
@param acquire_permissions: True->All permissions unchecked and on riles and
acquired
False->Applies new state security map
@param portal_workflow: Provide workflow tool (optimisation) if known
@param kw: change the values of same name of the state mapping
@return: None
"""
if portal_workflow is None:
portal_workflow = getToolByName(content, 'portal_workflow')
# Might raise IndexError if no workflow is associated to this type
found_wf = 0
for wf_def in portal_workflow.getWorkflowsFor(content):
if wf_id == wf_def.getId():
found_wf = 1
break
if not found_wf:
logger.error("%s: Cannot find workflow id %s" % (content, wf_id))
wf_state = {
'action': None,
'actor': None,
'comments': "Setting state to %s" % state_id,
'review_state': state_id,
'time': DateTime(),
}
# Updating wf_state from keyword args
for k in kw.keys():
# Remove unknown items
if k not in wf_state:
del kw[k]
if 'review_state' in kw:
del kw['review_state']
wf_state.update(kw)
portal_workflow.setStatusOf(wf_id, content, wf_state)
if acquire_permissions:
# Acquire all permissions
for permission in content.possible_permissions():
content.manage_permission(permission, acquire=1)
else:
# Setting new state permissions
wf_def.updateRoleMappingsFor(content)
# Map changes to the catalogs
content.reindexObject(idxs=['allowedRolesAndUsers', 'review_state'])
return
def tmpID():
import binascii
return binascii.hexlify(os.urandom(16))
def isnumber(s):
try:
float(s)
return True
except ValueError:
return False
def createPdf(htmlreport, outfile=None, css=None):
debug_mode = App.config.getConfiguration().debug_mode
# XXX css must be a local file - urllib fails under robotframework tests.
css_def = ''
if css:
if css.startswith("http://") or css.startswith("https://"):
# Download css file in temp dir
u = urllib2.urlopen(css)
_cssfile = Globals.INSTANCE_HOME + '/var/' + tmpID() + '.css'
localFile = open(_cssfile, 'w')
localFile.write(u.read())
localFile.close()
else:
_cssfile = css
cssfile = open(_cssfile, 'r')
css_def = cssfile.read()
if not outfile:
outfile = Globals.INSTANCE_HOME + "/var/" + tmpID() + ".pdf"
from weasyprint import HTML, CSS
import os
if css:
HTML(string=htmlreport, encoding='utf-8').write_pdf(outfile,
stylesheets=[CSS(string=css_def)])
else:
HTML(string=htmlreport, encoding='utf-8').write_pdf(outfile)
if debug_mode:
htmlfilepath = Globals.INSTANCE_HOME + "/var/" + tmpID() + ".html"
htmlfile = open(htmlfilepath, 'w')
htmlfile.write(htmlreport)
htmlfile.close()
return open(outfile, 'r').read();
def attachPdf(mimemultipart, pdfreport, filename=None):
part = MIMEBase('application', "application/pdf")
part.add_header('Content-Disposition',
'attachment; filename="%s.pdf"' % (filename or tmpID()))
part.set_payload(pdfreport)
Encoders.encode_base64(part)
mimemultipart.attach(part)
def get_invoice_item_description(obj):
if obj.portal_type == 'AnalysisRequest':
sample = obj.getSample()
samplepoint = sample.getSamplePoint()
samplepoint = samplepoint and samplepoint.Title() or ''
sampletype = sample.getSampleType()
sampletype = sampletype and sampletype.Title() or ''
description = sampletype + ' ' + samplepoint
elif obj.portal_type == 'SupplyOrder':
products = obj.folderlistingFolderContents()
products = [o.getProduct().Title() for o in products]
description = ', '.join(products)
return description
def currency_format(context, locale):
locale = locales.getLocale(locale)
currency = context.bika_setup.getCurrency()
symbol = locale.numbers.currencies[currency].symbol
def format(val):
return '%s %0.2f' % (symbol, val)
return format
def getHiddenAttributesForClass(classname):
try:
registry = queryUtility(IRegistry)
hiddenattributes = registry.get('bika.lims.hiddenattributes', ())
if hiddenattributes is not None:
for alist in hiddenattributes:
if alist[0] == classname:
return alist[1:]
except:
logger.warning(
'Probem accessing optionally hidden attributes in registry')
return []
def isAttributeHidden(classname, fieldname):
try:
registry = queryUtility(IRegistry)
hiddenattributes = registry.get('bika.lims.hiddenattributes', ())
if hiddenattributes is not None:
for alist in hiddenattributes:
if alist[0] == classname:
return fieldname in alist[1:]
except:
logger.warning(
'Probem accessing optionally hidden attributes in registry')
return False
def dicts_to_dict(dictionaries, key_subfieldname):
"""Convert a list of dictionaries into a dictionary of dictionaries.
key_subfieldname must exist in each Record's subfields and have a value,
which will be used as the key for the new dictionary. If a key is duplicated,
the earlier value will be overwritten.
"""
result = {}
for d in dictionaries:
result[d[key_subfieldname]] = d
return result
| agpl-3.0 |
peterbe/peekaboo | vendor-local/lib/python/sorl/thumbnail/engines/pil_engine.py | 9 | 2767 | from cStringIO import StringIO
from sorl.thumbnail.engines.base import EngineBase
try:
from PIL import Image, ImageFile, ImageDraw
except ImportError:
import Image, ImageFile, ImageDraw
class Engine(EngineBase):
def get_image(self, source):
buf = StringIO(source.read())
return Image.open(buf)
def get_image_size(self, image):
return image.size
def is_valid_image(self, raw_data):
buf = StringIO(raw_data)
try:
trial_image = Image.open(buf)
trial_image.verify()
except Exception:
return False
return True
def _orientation(self, image):
try:
exif = image._getexif()
except AttributeError:
exif = None
if exif:
orientation = exif.get(0x0112)
if orientation == 2:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
image = image.rotate(180)
elif orientation == 4:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
elif orientation == 5:
image = image.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
image = image.rotate(-90)
elif orientation == 7:
image = image.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
image = image.rotate(90)
return image
def _colorspace(self, image, colorspace):
if colorspace == 'RGB':
if image.mode == 'RGBA':
return image # RGBA is just RGB + Alpha
if image.mode == 'P' and 'transparency' in image.info:
return image.convert('RGBA')
return image.convert('RGB')
if colorspace == 'GRAY':
return image.convert('L')
return image
def _scale(self, image, width, height):
return image.resize((width, height), resample=Image.ANTIALIAS)
def _crop(self, image, width, height, x_offset, y_offset):
return image.crop((x_offset, y_offset,
width + x_offset, height + y_offset))
def _get_raw_data(self, image, format_, quality, progressive=False):
ImageFile.MAXBLOCK = 1024 * 1024
buf = StringIO()
params = {
'format': format_,
'quality': quality,
'optimize': 1,
}
if format_ == 'JPEG' and progressive:
params['progressive'] = True
try:
image.save(buf, **params)
except IOError:
params.pop('optimize')
image.save(buf, **params)
raw_data = buf.getvalue()
buf.close()
return raw_data
| mpl-2.0 |
jmighion/ansible | test/units/modules/network/eos/test_eos_user.py | 20 | 4062 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.eos import eos_user
from .eos_module import TestEosModule, load_fixture, set_module_args
class TestEosUserModule(TestEosModule):
module = eos_user
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.eos.eos_user.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_user.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('eos_user_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_user_create(self):
set_module_args(dict(name='test', nopassword=True))
commands = ['username test nopassword']
self.execute_module(changed=True, commands=commands)
def test_eos_user_delete(self):
set_module_args(dict(name='ansible', state='absent'))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_password(self):
set_module_args(dict(name='ansible', configured_password='test'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_privilege(self):
set_module_args(dict(name='ansible', privilege=15, configured_password='test'))
result = self.execute_module(changed=True)
self.assertIn('username ansible privilege 15', result['commands'])
def test_eos_user_privilege_invalid(self):
set_module_args(dict(name='ansible', privilege=25, configured_password='test'))
self.execute_module(failed=True)
def test_eos_user_purge(self):
set_module_args(dict(purge=True))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_role(self):
set_module_args(dict(name='ansible', role='test', configured_password='test'))
result = self.execute_module(changed=True)
self.assertIn('username ansible role test', result['commands'])
def test_eos_user_sshkey(self):
set_module_args(dict(name='ansible', sshkey='test'))
commands = ['username ansible sshkey test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_changed(self):
set_module_args(dict(name='test', configured_password='test', update_password='on_create'))
commands = ['username test secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_on_create_ok(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='on_create'))
self.execute_module()
def test_eos_user_update_password_always(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='always'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
diegoguimaraes/django | django/db/backends/mysql/client.py | 32 | 1506 | import subprocess
from django.db.backends import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
@classmethod
def settings_to_cmd_args(cls, settings_dict):
args = [cls.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
cert = settings_dict['OPTIONS'].get('ssl', {}).get('ca')
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if cert:
args += ["--ssl-ca=%s" % cert]
if db:
args += [db]
return args
def runshell(self):
args = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
subprocess.call(args)
| bsd-3-clause |
mgerhardy/fips | colorama/win32.py | 32 | 4918 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort, POINTER
)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", wintypes._COORD),
("dwCursorPosition", wintypes._COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", wintypes._COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
wintypes._COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = wintypes._COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = wintypes._COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
| mit |
js0701/chromium-crosswalk | tools/grit/grit/tclib_unittest.py | 25 | 8079 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.tclib'''
import sys
import os.path
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import types
import unittest
from grit import tclib
from grit import exception
import grit.extern.tclib
class TclibUnittest(unittest.TestCase):
def testInit(self):
msg = tclib.Message(text=u'Hello Earthlings',
description='Greetings\n\t message')
self.failUnlessEqual(msg.GetPresentableContent(), 'Hello Earthlings')
self.failUnless(isinstance(msg.GetPresentableContent(), types.StringTypes))
self.failUnlessEqual(msg.GetDescription(), 'Greetings message')
def testGetAttr(self):
msg = tclib.Message()
msg.AppendText(u'Hello') # Tests __getattr__
self.failUnless(msg.GetPresentableContent() == 'Hello')
self.failUnless(isinstance(msg.GetPresentableContent(), types.StringTypes))
def testAll(self):
text = u'Howdie USERNAME'
phs = [tclib.Placeholder(u'USERNAME', u'%s', 'Joi')]
msg = tclib.Message(text=text, placeholders=phs)
self.failUnless(msg.GetPresentableContent() == 'Howdie USERNAME')
trans = tclib.Translation(text=text, placeholders=phs)
self.failUnless(trans.GetPresentableContent() == 'Howdie USERNAME')
self.failUnless(isinstance(trans.GetPresentableContent(), types.StringTypes))
def testUnicodeReturn(self):
text = u'\u00fe'
msg = tclib.Message(text=text)
self.failUnless(msg.GetPresentableContent() == text)
from_list = msg.GetContent()[0]
self.failUnless(from_list == text)
def testRegressionTranslationInherited(self):
'''Regression tests a bug that was caused by grit.tclib.Translation
inheriting from the translation console's Translation object
instead of only owning an instance of it.
'''
msg = tclib.Message(text=u"BLA1\r\nFrom: BLA2 \u00fe BLA3",
placeholders=[
tclib.Placeholder('BLA1', '%s', '%s'),
tclib.Placeholder('BLA2', '%s', '%s'),
tclib.Placeholder('BLA3', '%s', '%s')])
transl = tclib.Translation(text=msg.GetPresentableContent(),
placeholders=msg.GetPlaceholders())
content = transl.GetContent()
self.failUnless(isinstance(content[3], types.UnicodeType))
def testFingerprint(self):
# This has Windows line endings. That is on purpose.
id = grit.extern.tclib.GenerateMessageId(
'Google Desktop for Enterprise\r\n'
'All Rights Reserved\r\n'
'\r\n'
'---------\r\n'
'Contents\r\n'
'---------\r\n'
'This distribution contains the following files:\r\n'
'\r\n'
'GoogleDesktopSetup.msi - Installation and setup program\r\n'
'GoogleDesktop.adm - Group Policy administrative template file\r\n'
'AdminGuide.pdf - Google Desktop for Enterprise administrative guide\r\n'
'\r\n'
'\r\n'
'--------------\r\n'
'Documentation\r\n'
'--------------\r\n'
'Full documentation and installation instructions are in the \r\n'
'administrative guide, and also online at \r\n'
'http://desktop.google.com/enterprise/adminguide.html.\r\n'
'\r\n'
'\r\n'
'------------------------\r\n'
'IBM Lotus Notes Plug-In\r\n'
'------------------------\r\n'
'The Lotus Notes plug-in is included in the release of Google \r\n'
'Desktop for Enterprise. The IBM Lotus Notes Plug-in for Google \r\n'
'Desktop indexes mail, calendar, task, contact and journal \r\n'
'documents from Notes. Discussion documents including those from \r\n'
'the discussion and team room templates can also be indexed by \r\n'
'selecting an option from the preferences. Once indexed, this data\r\n'
'will be returned in Google Desktop searches. The corresponding\r\n'
'document can be opened in Lotus Notes from the Google Desktop \r\n'
'results page.\r\n'
'\r\n'
'Install: The plug-in will install automatically during the Google \r\n'
'Desktop setup process if Lotus Notes is already installed. Lotus \r\n'
'Notes must not be running in order for the install to occur. \r\n'
'\r\n'
'Preferences: Preferences and selection of databases to index are\r\n'
'set in the \'Google Desktop for Notes\' dialog reached through the \r\n'
'\'Actions\' menu.\r\n'
'\r\n'
'Reindexing: Selecting \'Reindex all databases\' will index all the \r\n'
'documents in each database again.\r\n'
'\r\n'
'\r\n'
'Notes Plug-in Known Issues\r\n'
'---------------------------\r\n'
'\r\n'
'If the \'Google Desktop for Notes\' item is not available from the \r\n'
'Lotus Notes Actions menu, then installation was not successful. \r\n'
'Installation consists of writing one file, notesgdsplugin.dll, to \r\n'
'the Notes application directory and a setting to the notes.ini \r\n'
'configuration file. The most likely cause of an unsuccessful \r\n'
'installation is that the installer was not able to locate the \r\n'
'notes.ini file. Installation will complete if the user closes Notes\r\n'
'and manually adds the following setting to this file on a new line:\r\n'
'AddinMenus=notegdsplugin.dll\r\n'
'\r\n'
'If the notesgdsplugin.dll file is not in the application directory\r\n'
'(e.g., C:\Program Files\Lotus\Notes) after Google Desktop \r\n'
'installation, it is likely that Notes was not installed correctly. \r\n'
'\r\n'
'Only local databases can be indexed. If they can be determined, \r\n'
'the user\'s local mail file and address book will be included in the\r\n'
'list automatically. Mail archives and other databases must be \r\n'
'added with the \'Add\' button.\r\n'
'\r\n'
'Some users may experience performance issues during the initial \r\n'
'indexing of a database. The \'Perform the initial index of a \r\n'
'database only when I\'m idle\' option will limit the indexing process\r\n'
'to times when the user is not using the machine. If this does not \r\n'
'alleviate the problem or the user would like to continually index \r\n'
'but just do so more slowly or quickly, the GoogleWaitTime notes.ini\r\n'
'value can be set. Increasing the GoogleWaitTime value will slow \r\n'
'down the indexing process, and lowering the value will speed it up.\r\n'
'A value of zero causes the fastest possible indexing. Removing the\r\n'
'ini parameter altogether returns it to the default (20).\r\n'
'\r\n'
'Crashes have been known to occur with certain types of history \r\n'
'bookmarks. If the Notes client seems to crash randomly, try \r\n'
'disabling the \'Index note history\' option. If it crashes before,\r\n'
'you can get to the preferences, add the following line to your \r\n'
'notes.ini file:\r\n'
'GDSNoIndexHistory=1\r\n')
self.failUnless(id == '3138901326664699350')
def testPlaceholderNameChecking(self):
try:
ph = tclib.Placeholder('BINGO BONGO', 'bla', 'bla')
raise Exception("We shouldn't get here")
except exception.InvalidPlaceholderName:
pass # Expect exception to be thrown because presentation contained space
def testTagsWithCommonSubstring(self):
word = 'ABCDEFGHIJ'
text = ' '.join([word[:i] for i in range(1, 11)])
phs = [tclib.Placeholder(word[:i], str(i), str(i)) for i in range(1, 11)]
try:
msg = tclib.Message(text=text, placeholders=phs)
self.failUnless(msg.GetRealContent() == '1 2 3 4 5 6 7 8 9 10')
except:
self.fail('tclib.Message() should handle placeholders that are '
'substrings of each other')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
alfkjartan/nvgimu | nvg/testing/inspection.py | 3 | 1156 | """
Utilities for code introspection.
"""
# Copyright (C) 2009-2011 University of Edinburgh
#
# This file is part of IMUSim.
#
# IMUSim is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMUSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IMUSim. If not, see <http://www.gnu.org/licenses/>.
import inspect
def getImplementations(module, superclass):
"""
Get algorithm implementations from a module.
@return: All concrete subclasses of the superclass found in the module.
"""
predicate = lambda obj: inspect.isclass(obj) \
and not inspect.isabstract(obj)
return( imp for name,imp in inspect.getmembers(module,
predicate) if issubclass(imp, superclass) )
| gpl-3.0 |
rudij7/android_kernel_oneplus_one | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
SummerLW/Perf-Insight-Report | third_party/gsutil/third_party/boto/boto/route53/record.py | 136 | 14689 | # Copyright (c) 2010 Chris Moyer http://coredumped.org/
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF']
from boto.resultset import ResultSet
class ResourceRecordSets(ResultSet):
"""
A list of resource records.
:ivar hosted_zone_id: The ID of the hosted zone.
:ivar comment: A comment that will be stored with the change.
:ivar changes: A list of changes.
"""
ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeBatch>
<Comment>%(comment)s</Comment>
<Changes>%(changes)s</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>"""
ChangeXML = """<Change>
<Action>%(action)s</Action>
%(record)s
</Change>"""
def __init__(self, connection=None, hosted_zone_id=None, comment=None):
self.connection = connection
self.hosted_zone_id = hosted_zone_id
self.comment = comment
self.changes = []
self.next_record_name = None
self.next_record_type = None
self.next_record_identifier = None
super(ResourceRecordSets, self).__init__([('ResourceRecordSet', Record)])
def __repr__(self):
if self.changes:
record_list = ','.join([c.__repr__() for c in self.changes])
else:
record_list = ','.join([record.__repr__() for record in self])
return '<ResourceRecordSets:%s [%s]' % (self.hosted_zone_id,
record_list)
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
"""
Add a change request to the set.
:type action: str
:param action: The action to perform ('CREATE'|'DELETE'|'UPSERT')
:type name: str
:param name: The name of the domain you want to perform the action on.
:type type: str
:param type: The DNS record type. Valid values are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
:type ttl: int
:param ttl: The resource record cache time to live (TTL), in seconds.
:type alias_hosted_zone_id: str
:param alias_dns_name: *Alias resource record sets only* The value
of the hosted zone ID, CanonicalHostedZoneNameId, for
the LoadBalancer.
:type alias_dns_name: str
:param alias_hosted_zone_id: *Alias resource record sets only*
Information about the domain to which you are redirecting traffic.
:type identifier: str
:param identifier: *Weighted and latency-based resource record sets
only* An identifier that differentiates among multiple resource
record sets that have the same combination of DNS name and type.
:type weight: int
:param weight: *Weighted resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines what portion of traffic for the current
resource record set is routed to the associated location
:type region: str
:param region: *Latency-based resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines which region this should be associated with
for the latency-based routing
:type alias_evaluate_target_health: bool
:param alias_evaluate_target_health: *Required for alias resource record
sets* Indicates whether this Resource Record Set should respect the
health status of any health checks associated with the ALIAS target
record which it is linked to.
:type health_check: str
:param health_check: Health check to associate with this record
:type failover: str
:param failover: *Failover resource record sets only* Whether this is the
primary or secondary resource record set.
"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
weight=weight, region=region,
alias_evaluate_target_health=alias_evaluate_target_health,
health_check=health_check, failover=failover)
self.changes.append([action, change])
return change
def add_change_record(self, action, change):
"""Add an existing record to a change set with the specified action"""
self.changes.append([action, change])
return
def to_xml(self):
"""Convert this ResourceRecordSet into XML
to be saved via the ChangeResourceRecordSetsRequest"""
changesXML = ""
for change in self.changes:
changeParams = {"action": change[0], "record": change[1].to_xml()}
changesXML += self.ChangeXML % changeParams
params = {"comment": self.comment, "changes": changesXML}
return self.ChangeResourceRecordSetsBody % params
def commit(self):
"""Commit this change"""
if not self.connection:
import boto
self.connection = boto.connect_route53()
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
def endElement(self, name, value, connection):
"""Overwritten to also add the NextRecordName,
NextRecordType and NextRecordIdentifier to the base object"""
if name == 'NextRecordName':
self.next_record_name = value
elif name == 'NextRecordType':
self.next_record_type = value
elif name == 'NextRecordIdentifier':
self.next_record_identifier = value
else:
return super(ResourceRecordSets, self).endElement(name, value, connection)
def __iter__(self):
"""Override the next function to support paging"""
results = super(ResourceRecordSets, self).__iter__()
truncated = self.is_truncated
while results:
for obj in results:
yield obj
if self.is_truncated:
self.is_truncated = False
results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name,
type=self.next_record_type,
identifier=self.next_record_identifier)
else:
results = None
self.is_truncated = truncated
class Record(object):
"""An individual ResourceRecordSet"""
HealthCheckBody = """<HealthCheckId>%s</HealthCheckId>"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
%(health_check)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
RRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
FailoverBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Failover>%(failover)s</Failover>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%(hosted_zone_id)s</HostedZoneId>
<DNSName>%(dns_name)s</DNSName>
%(eval_target_health)s
</AliasTarget>"""
EvaluateTargetHealth = """<EvaluateTargetHealth>%s</EvaluateTargetHealth>"""
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
self.name = name
self.type = type
self.ttl = ttl
if resource_records is None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
self.region = region
self.alias_evaluate_target_health = alias_evaluate_target_health
self.health_check = health_check
self.failover = failover
def __repr__(self):
return '<Record:%s:%s:%s>' % (self.name, self.type, self.to_print())
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name,
alias_evaluate_target_health=False):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.alias_evaluate_target_health = alias_evaluate_target_health
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
if self.alias_evaluate_target_health is not None:
eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false')
else:
eval_target_health = ""
body = self.AliasBody % {"hosted_zone_id": self.alias_hosted_zone_id,
"dns_name": self.alias_dns_name,
"eval_target_health": eval_target_health}
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier is not None and self.weight is not None:
weight = self.WRRBody % {"identifier": self.identifier,
"weight": self.weight}
elif self.identifier is not None and self.region is not None:
weight = self.RRRBody % {"identifier": self.identifier,
"region": self.region}
elif self.identifier is not None and self.failover is not None:
weight = self.FailoverBody % {"identifier": self.identifier,
"failover": self.failover}
health_check = ""
if self.health_check is not None:
health_check = self.HealthCheckBody % (self.health_check)
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
"health_check": health_check
}
return self.XMLBody % params
def to_print(self):
rr = ""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
if self.alias_evaluate_target_health is not None:
rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
if self.identifier is not None and self.weight is not None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
elif self.identifier is not None and self.region is not None:
rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region)
elif self.identifier is not None and self.failover is not None:
rr += ' (FAILOVER id=%s, failover=%s)' % (self.identifier, self.failover)
return rr
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Type':
self.type = value
elif name == 'TTL':
self.ttl = value
elif name == 'Value':
self.resource_records.append(value)
elif name == 'HostedZoneId':
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
elif name == 'EvaluateTargetHealth':
self.alias_evaluate_target_health = value.lower() == 'true'
elif name == 'Weight':
self.weight = value
elif name == 'Region':
self.region = value
elif name == 'Failover':
self.failover = value
elif name == 'HealthCheckId':
self.health_check = value
def startElement(self, name, attrs, connection):
return None
| bsd-3-clause |
boa19861105/android_444_KitKat_kernel_htc_B2_UHL | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
ColinIanKing/autotest | frontend/setup_test_environment.py | 3 | 1806 | import tempfile, shutil, os
from django.core import management
from django.conf import settings
try:
import autotest.common as common
except ImportError:
import common
# we need to set DATABASE_ENGINE now, at import time, before the Django database
# system gets initialized.
# django.conf.settings.LazySettings is buggy and requires us to get something
# from it before we set stuff on it.
getattr(settings, 'DATABASES')
settings.DATABASES['default']['ENGINE'] = (
'autotest.frontend.db.backends.afe_sqlite')
settings.DATABASES['default']['NAME'] = ':memory:'
from django.db import connection
from autotest.frontend.afe import readonly_connection
def run_syncdb(verbosity=0):
management.call_command('syncdb', verbosity=verbosity, interactive=False)
def destroy_test_database():
connection.close()
# Django brilliantly ignores close() requests on in-memory DBs to keep us
# naive users from accidentally destroying data. So reach in and close
# the real connection ourselves.
# Note this depends on Django internals and will likely need to be changed
# when we move to Django 1.x.
real_connection = connection.connection
if real_connection is not None:
real_connection.close()
connection.connection = None
def set_up():
run_syncdb()
readonly_connection.ReadOnlyConnection.set_globally_disabled(True)
def tear_down():
readonly_connection.ReadOnlyConnection.set_globally_disabled(False)
destroy_test_database()
def print_queries():
"""
Print all SQL queries executed so far. Useful for debugging failing tests -
you can call it from tearDown(), and then execute the single test case of
interest from the command line.
"""
for query in connection.queries:
print query['sql'] + ';\n'
| gpl-2.0 |
Serag8/Bachelor | google_appengine/lib/setuptools-0.6c11/setuptools/tests/__init__.py | 32 | 12345 | """Tests for the 'setuptools' package"""
from unittest import TestSuite, TestCase, makeSuite, defaultTestLoader
import distutils.core, distutils.cmd
from distutils.errors import DistutilsOptionError, DistutilsPlatformError
from distutils.errors import DistutilsSetupError
import setuptools, setuptools.dist
from setuptools import Feature
from distutils.core import Extension
extract_constant, get_module_constant = None, None
from setuptools.depends import *
from distutils.version import StrictVersion, LooseVersion
from distutils.util import convert_path
import sys, os.path
def additional_tests():
import doctest, unittest
suite = unittest.TestSuite((
doctest.DocFileSuite(
'api_tests.txt',
optionflags=doctest.ELLIPSIS, package='pkg_resources',
),
))
if sys.platform == 'win32':
suite.addTest(doctest.DocFileSuite('win_script_wrapper.txt'))
return suite
def makeSetup(**args):
"""Return distribution from 'setup(**args)', without executing commands"""
distutils.core._setup_stop_after = "commandline"
# Don't let system command line leak into tests!
args.setdefault('script_args',['install'])
try:
return setuptools.setup(**args)
finally:
distutils.core_setup_stop_after = None
class DependsTests(TestCase):
def testExtractConst(self):
if not extract_constant: return # skip on non-bytecode platforms
def f1():
global x,y,z
x = "test"
y = z
# unrecognized name
self.assertEqual(extract_constant(f1.func_code,'q', -1), None)
# constant assigned
self.assertEqual(extract_constant(f1.func_code,'x', -1), "test")
# expression assigned
self.assertEqual(extract_constant(f1.func_code,'y', -1), -1)
# recognized name, not assigned
self.assertEqual(extract_constant(f1.func_code,'z', -1), None)
def testFindModule(self):
self.assertRaises(ImportError, find_module, 'no-such.-thing')
self.assertRaises(ImportError, find_module, 'setuptools.non-existent')
f,p,i = find_module('setuptools.tests'); f.close()
def testModuleExtract(self):
if not get_module_constant: return # skip on non-bytecode platforms
from distutils import __version__
self.assertEqual(
get_module_constant('distutils','__version__'), __version__
)
self.assertEqual(
get_module_constant('sys','version'), sys.version
)
self.assertEqual(
get_module_constant('setuptools.tests','__doc__'),__doc__
)
def testRequire(self):
if not extract_constant: return # skip on non-bytecode platforms
req = Require('Distutils','1.0.3','distutils')
self.assertEqual(req.name, 'Distutils')
self.assertEqual(req.module, 'distutils')
self.assertEqual(req.requested_version, '1.0.3')
self.assertEqual(req.attribute, '__version__')
self.assertEqual(req.full_name(), 'Distutils-1.0.3')
from distutils import __version__
self.assertEqual(req.get_version(), __version__)
self.failUnless(req.version_ok('1.0.9'))
self.failIf(req.version_ok('0.9.1'))
self.failIf(req.version_ok('unknown'))
self.failUnless(req.is_present())
self.failUnless(req.is_current())
req = Require('Distutils 3000','03000','distutils',format=LooseVersion)
self.failUnless(req.is_present())
self.failIf(req.is_current())
self.failIf(req.version_ok('unknown'))
req = Require('Do-what-I-mean','1.0','d-w-i-m')
self.failIf(req.is_present())
self.failIf(req.is_current())
req = Require('Tests', None, 'tests', homepage="http://example.com")
self.assertEqual(req.format, None)
self.assertEqual(req.attribute, None)
self.assertEqual(req.requested_version, None)
self.assertEqual(req.full_name(), 'Tests')
self.assertEqual(req.homepage, 'http://example.com')
paths = [os.path.dirname(p) for p in __path__]
self.failUnless(req.is_present(paths))
self.failUnless(req.is_current(paths))
class DistroTests(TestCase):
def setUp(self):
self.e1 = Extension('bar.ext',['bar.c'])
self.e2 = Extension('c.y', ['y.c'])
self.dist = makeSetup(
packages=['a', 'a.b', 'a.b.c', 'b', 'c'],
py_modules=['b.d','x'],
ext_modules = (self.e1, self.e2),
package_dir = {},
)
def testDistroType(self):
self.failUnless(isinstance(self.dist,setuptools.dist.Distribution))
def testExcludePackage(self):
self.dist.exclude_package('a')
self.assertEqual(self.dist.packages, ['b','c'])
self.dist.exclude_package('b')
self.assertEqual(self.dist.packages, ['c'])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1, self.e2])
self.dist.exclude_package('c')
self.assertEqual(self.dist.packages, [])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1])
# test removals from unspecified options
makeSetup().exclude_package('x')
def testIncludeExclude(self):
# remove an extension
self.dist.exclude(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2])
# add it back in
self.dist.include(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
# should not add duplicate
self.dist.include(ext_modules=[self.e1])
self.assertEqual(self.dist.ext_modules, [self.e2, self.e1])
def testExcludePackages(self):
self.dist.exclude(packages=['c','b','a'])
self.assertEqual(self.dist.packages, [])
self.assertEqual(self.dist.py_modules, ['x'])
self.assertEqual(self.dist.ext_modules, [self.e1])
def testEmpty(self):
dist = makeSetup()
dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
dist = makeSetup()
dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
def testContents(self):
self.failUnless(self.dist.has_contents_for('a'))
self.dist.exclude_package('a')
self.failIf(self.dist.has_contents_for('a'))
self.failUnless(self.dist.has_contents_for('b'))
self.dist.exclude_package('b')
self.failIf(self.dist.has_contents_for('b'))
self.failUnless(self.dist.has_contents_for('c'))
self.dist.exclude_package('c')
self.failIf(self.dist.has_contents_for('c'))
def testInvalidIncludeExclude(self):
self.assertRaises(DistutilsSetupError,
self.dist.include, nonexistent_option='x'
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, nonexistent_option='x'
)
self.assertRaises(DistutilsSetupError,
self.dist.include, packages={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, packages={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.include, ext_modules={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, ext_modules={'x':'y'}
)
self.assertRaises(DistutilsSetupError,
self.dist.include, package_dir=['q']
)
self.assertRaises(DistutilsSetupError,
self.dist.exclude, package_dir=['q']
)
class FeatureTests(TestCase):
def setUp(self):
self.req = Require('Distutils','1.0.3','distutils')
self.dist = makeSetup(
features={
'foo': Feature("foo",standard=True,require_features=['baz',self.req]),
'bar': Feature("bar", standard=True, packages=['pkg.bar'],
py_modules=['bar_et'], remove=['bar.ext'],
),
'baz': Feature(
"baz", optional=False, packages=['pkg.baz'],
scripts = ['scripts/baz_it'],
libraries=[('libfoo','foo/foofoo.c')]
),
'dwim': Feature("DWIM", available=False, remove='bazish'),
},
script_args=['--without-bar', 'install'],
packages = ['pkg.bar', 'pkg.foo'],
py_modules = ['bar_et', 'bazish'],
ext_modules = [Extension('bar.ext',['bar.c'])]
)
def testDefaults(self):
self.failIf(
Feature(
"test",standard=True,remove='x',available=False
).include_by_default()
)
self.failUnless(
Feature("test",standard=True,remove='x').include_by_default()
)
# Feature must have either kwargs, removes, or require_features
self.assertRaises(DistutilsSetupError, Feature, "test")
def testAvailability(self):
self.assertRaises(
DistutilsPlatformError,
self.dist.features['dwim'].include_in, self.dist
)
def testFeatureOptions(self):
dist = self.dist
self.failUnless(
('with-dwim',None,'include DWIM') in dist.feature_options
)
self.failUnless(
('without-dwim',None,'exclude DWIM (default)') in dist.feature_options
)
self.failUnless(
('with-bar',None,'include bar (default)') in dist.feature_options
)
self.failUnless(
('without-bar',None,'exclude bar') in dist.feature_options
)
self.assertEqual(dist.feature_negopt['without-foo'],'with-foo')
self.assertEqual(dist.feature_negopt['without-bar'],'with-bar')
self.assertEqual(dist.feature_negopt['without-dwim'],'with-dwim')
self.failIf('without-baz' in dist.feature_negopt)
def testUseFeatures(self):
dist = self.dist
self.assertEqual(dist.with_foo,1)
self.assertEqual(dist.with_bar,0)
self.assertEqual(dist.with_baz,1)
self.failIf('bar_et' in dist.py_modules)
self.failIf('pkg.bar' in dist.packages)
self.failUnless('pkg.baz' in dist.packages)
self.failUnless('scripts/baz_it' in dist.scripts)
self.failUnless(('libfoo','foo/foofoo.c') in dist.libraries)
self.assertEqual(dist.ext_modules,[])
self.assertEqual(dist.require_features, [self.req])
# If we ask for bar, it should fail because we explicitly disabled
# it on the command line
self.assertRaises(DistutilsOptionError, dist.include_feature, 'bar')
def testFeatureWithInvalidRemove(self):
self.assertRaises(
SystemExit, makeSetup, features = {'x':Feature('x', remove='y')}
)
class TestCommandTests(TestCase):
def testTestIsCommand(self):
test_cmd = makeSetup().get_command_obj('test')
self.failUnless(isinstance(test_cmd, distutils.cmd.Command))
def testLongOptSuiteWNoDefault(self):
ts1 = makeSetup(script_args=['test','--test-suite=foo.tests.suite'])
ts1 = ts1.get_command_obj('test')
ts1.ensure_finalized()
self.assertEqual(ts1.test_suite, 'foo.tests.suite')
def testDefaultSuite(self):
ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test')
ts2.ensure_finalized()
self.assertEqual(ts2.test_suite, 'bar.tests.suite')
def testDefaultWModuleOnCmdLine(self):
ts3 = makeSetup(
test_suite='bar.tests',
script_args=['test','-m','foo.tests']
).get_command_obj('test')
ts3.ensure_finalized()
self.assertEqual(ts3.test_module, 'foo.tests')
self.assertEqual(ts3.test_suite, 'foo.tests.test_suite')
def testConflictingOptions(self):
ts4 = makeSetup(
script_args=['test','-m','bar.tests', '-s','foo.tests.suite']
).get_command_obj('test')
self.assertRaises(DistutilsOptionError, ts4.ensure_finalized)
def testNoSuite(self):
ts5 = makeSetup().get_command_obj('test')
ts5.ensure_finalized()
self.assertEqual(ts5.test_suite, None)
| mit |
mtnman38/Aggregate | Executables/Aggregate 0.8.8 for Macintosh.app/Contents/Resources/lib/python2.7/requests/packages/urllib3/util.py | 41 | 20493 | # urllib3/util.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from base64 import b64encode
from binascii import hexlify, unhexlify
from collections import namedtuple
from hashlib import md5, sha1
from socket import error as SocketError, _GLOBAL_DEFAULT_TIMEOUT
import time
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError, SSLError, TimeoutStateError
_Default = object()
# The default timeout to use for socket connections. This is the attribute used
# by httplib to define the default timeout
def current_time():
"""
Retrieve the current time, this function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
"""
Utility object for storing timeout values.
Example usage:
.. code-block:: python
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
pool.request(...) # Etc, etc
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
:param total:
The maximum amount of time to wait for an HTTP request to connect and
return. This combines the connect and read timeouts into one. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response. Specifically, Python's DNS resolver does not obey the
timeout specified on the socket. Other factors that can affect total
request time include high CPU load, high swap, the program running at a
low priority level, or other behaviors. The observed running time for
urllib3 to return a response may be greater than the value passed to
`total`.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server, not
the total amount of time for the request to return a complete response.
As an example, you may want a request to return within 7 seconds or
fail, so you set the ``total`` timeout to 7 seconds. If the server
sends one byte to you every 5 seconds, the request will **not** trigger
time out. This case is admittedly rare.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, connect=_Default, read=_Default, total=None):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is used
for clear error messages
:return: the value
:raises ValueError: if the type is not an integer or a float, or if it
is a numeric value less than zero
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value passed
to this function.
:param timeout: The legacy timeout value
:type timeout: integer, float, sentinel default object, or None
:return: a Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: the elapsed time
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: the connect timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: the value to use for the read timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# in case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
auth, url = url.split('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, rest = divmod(len(fingerprint), 2)
if rest or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
if hasattr(obj, 'fp'):
# Object is a container for another file-like object that gets released
# on exhaustion (e.g. HTTPResponse)
return obj.fp is None
return obj.closed
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)
| gpl-2.0 |
zmarvel/slowboy | slowboy/util.py | 1 | 1753 |
import abc
from collections import namedtuple
Op = namedtuple('Op', ['function', 'cycles', 'description'])
class ClockListener(metaclass=abc.ABCMeta):
@abc.abstractmethod
def notify(self, clock: int, cycles: int):
"""Notify the listener that the clock has advanced.
:param clock: The new value of the CPU clock.
:param cycles: The number of cycles that have passed since the last
notification."""
pass
def uint8toBCD(uint8):
"""Convert an 8-bit unsigned integer to binary-coded decimal."""
d1 = uint8 // 10
d0 = uint8 % 10
return (d1 << 4) | d0
def sub_s8(x, y):
"""Subtract two 8-bit integers stored in two's complement."""
return (x + twoscompl8(y)) & 0xff
def sub_s16(x, y):
"""Subtract two 16-bit integers stored in two's complement."""
return (x + twoscompl16(y)) & 0xffff
def add_s8(x, y):
"""Add two 8-bit integers stored in two's complement."""
return (x + y) & 0xff
def add_s16(x, y):
"""Add two 16-bit integers stored in two's complement."""
return (x + y) & 0xffff
def twoscompl8(x):
"""Returns the reciprocal of 8-bit x in two's complement."""
return ((x ^ 0xff) + 1) & 0xff
def twoscompl16(x):
"""Returns the reciprocal of 16-bit x in two's complement."""
return ((x ^ 0xffff) + 1) & 0xffff
def hexdump(bytes, line_len, start=0):
line = []
j = 0
for b in bytes:
s = '{:02x}'.format(b)
if j % line_len == 0 and j > 0:
yield '{:04x}: {}'.format(start+j-line_len, ' '.join(line))
line = []
j += 1
line.append(s)
yield '{:04x}: {}'.format(start+j-line_len, ' '.join(line))
def print_lines(it):
for line in it:
print(line)
| mit |
eXistenZNL/SickRage | lib/subliminal/providers/addic7ed.py | 18 | 8768 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import babelfish
import bs4
import requests
from . import Provider
from .. import __version__
from ..cache import region, SHOW_EXPIRATION_TIME
from ..exceptions import ConfigurationError, AuthenticationError, DownloadLimitExceeded, ProviderError
from ..subtitle import Subtitle, fix_line_endings, compute_guess_properties_matches
from ..video import Episode
logger = logging.getLogger(__name__)
babelfish.language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter')
class Addic7edSubtitle(Subtitle):
provider_name = 'addic7ed'
def __init__(self, language, series, season, episode, title, year, version, hearing_impaired, download_link,
page_link):
super(Addic7edSubtitle, self).__init__(language, hearing_impaired, page_link)
self.series = series
self.season = season
self.episode = episode
self.title = title
self.year = year
self.version = version
self.download_link = download_link
def compute_matches(self, video):
matches = set()
# series
if video.series and self.series == video.series:
matches.add('series')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# title
if video.title and self.title.lower() == video.title.lower():
matches.add('title')
# year
if self.year == video.year:
matches.add('year')
# release_group
if video.release_group and self.version and video.release_group.lower() in self.version.lower():
matches.add('release_group')
"""
# resolution
if video.resolution and self.version and video.resolution in self.version.lower():
matches.add('resolution')
# format
if video.format and self.version and video.format in self.version.lower:
matches.add('format')
"""
# we don't have the complete filename, so we need to guess the matches separately
# guess resolution (screenSize in guessit)
matches |= compute_guess_properties_matches(video, self.version, 'screenSize')
# guess format
matches |= compute_guess_properties_matches(video, self.version, 'format')
# guess video codec
matches |= compute_guess_properties_matches(video, self.version, 'videoCodec')
return matches
class Addic7edProvider(Provider):
languages = {babelfish.Language('por', 'BR')} | {babelfish.Language(l)
for l in ['ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas',
'fin', 'fra', 'glg', 'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa',
'nld', 'nor', 'pol', 'por', 'ron', 'rus', 'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha',
'tur', 'ukr', 'vie', 'zho']}
video_types = (Episode,)
server = 'http://www.addic7ed.com'
def __init__(self, username=None, password=None):
if username is not None and password is None or username is None and password is not None:
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
def initialize(self):
self.session = requests.Session()
self.session.headers = {'User-Agent': 'Subliminal/%s' % __version__.split('-')[0]}
# login
if self.username is not None and self.password is not None:
logger.debug('Logging in')
data = {'username': self.username, 'password': self.password, 'Submit': 'Log in'}
r = self.session.post(self.server + '/dologin.php', data, timeout=10, allow_redirects=False)
if r.status_code == 302:
logger.info('Logged in')
self.logged_in = True
else:
raise AuthenticationError(self.username)
def terminate(self):
# logout
if self.logged_in:
r = self.session.get(self.server + '/logout.php', timeout=10)
logger.info('Logged out')
if r.status_code != 200:
raise ProviderError('Request failed with status code %d' % r.status_code)
self.session.close()
def get(self, url, params=None):
"""Make a GET request on `url` with the given parameters
:param string url: part of the URL to reach with the leading slash
:param params: params of the request
:return: the response
:rtype: :class:`bs4.BeautifulSoup`
"""
r = self.session.get(self.server + url, params=params, timeout=10)
if r.status_code != 200:
raise ProviderError('Request failed with status code %d' % r.status_code)
return bs4.BeautifulSoup(r.content, ['permissive'])
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def get_show_ids(self):
"""Load the shows page with default series to show ids mapping
:return: series to show ids
:rtype: dict
"""
soup = self.get('/shows.php')
show_ids = {}
for html_show in soup.select('td.version > h3 > a[href^="/show/"]'):
show_ids[html_show.string.lower()] = int(html_show['href'][6:])
return show_ids
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def find_show_id(self, series, year=None):
"""Find the show id from the `series` with optional `year`
Use this only if the show id cannot be found with :meth:`get_show_ids`
:param string series: series of the episode in lowercase
:param year: year of the series, if any
:type year: int or None
:return: the show id, if any
:rtype: int or None
"""
series_year = series
if year is not None:
series_year += ' (%d)' % year
params = {'search': series_year, 'Submit': 'Search'}
logger.debug('Searching series %r', params)
suggested_shows = self.get('/search.php', params).select('span.titulo > a[href^="/show/"]')
if not suggested_shows:
logger.info('Series %r not found', series_year)
return None
return int(suggested_shows[0]['href'][6:])
def query(self, series, season, year=None):
show_ids = self.get_show_ids()
show_id = None
if year is not None: # search with the year
series_year = '%s (%d)' % (series.lower(), year)
if series_year in show_ids:
show_id = show_ids[series_year]
else:
show_id = self.find_show_id(series.lower(), year)
if show_id is None: # search without the year
year = None
if series.lower() in show_ids:
show_id = show_ids[series.lower()]
else:
show_id = self.find_show_id(series.lower())
if show_id is None:
return []
params = {'show_id': show_id, 'season': season}
logger.debug('Searching subtitles %r', params)
link = '/show/{show_id}&season={season}'.format(**params)
soup = self.get(link)
subtitles = []
for row in soup('tr', class_='epeven completed'):
cells = row('td')
if cells[5].string != 'Completed':
continue
if not cells[3].string:
continue
subtitles.append(Addic7edSubtitle(babelfish.Language.fromaddic7ed(cells[3].string), series, season,
int(cells[1].string), cells[2].string, year, cells[4].string,
bool(cells[6].string), cells[9].a['href'],
self.server + cells[2].a['href']))
return subtitles
def list_subtitles(self, video, languages):
return [s for s in self.query(video.series, video.season, video.year)
if s.language in languages and s.episode == video.episode]
def download_subtitle(self, subtitle):
r = self.session.get(self.server + subtitle.download_link, timeout=10, headers={'Referer': subtitle.page_link})
if r.status_code != 200:
raise ProviderError('Request failed with status code %d' % r.status_code)
if r.headers['Content-Type'] == 'text/html':
raise DownloadLimitExceeded
subtitle.content = fix_line_endings(r.content)
| gpl-3.0 |
amunk/metagoofil | hachoir_core/log.py | 86 | 4235 | import os, sys, time
import hachoir_core.config as config
from hachoir_core.i18n import _
class Log:
LOG_INFO = 0
LOG_WARN = 1
LOG_ERROR = 2
level_name = {
LOG_WARN: "[warn]",
LOG_ERROR: "[err!]",
LOG_INFO: "[info]"
}
def __init__(self):
self.__buffer = {}
self.__file = None
self.use_print = True
self.use_buffer = False
self.on_new_message = None # Prototype: def func(level, prefix, text, context)
def shutdown(self):
if self.__file:
self._writeIntoFile(_("Stop Hachoir"))
def setFilename(self, filename, append=True):
"""
Use a file to store all messages. The
UTF-8 encoding will be used. Write an informative
message if the file can't be created.
@param filename: C{L{string}}
"""
# Look if file already exists or not
filename = os.path.expanduser(filename)
filename = os.path.realpath(filename)
append = os.access(filename, os.F_OK)
# Create log file (or open it in append mode, if it already exists)
try:
import codecs
if append:
self.__file = codecs.open(filename, "a", "utf-8")
else:
self.__file = codecs.open(filename, "w", "utf-8")
self._writeIntoFile(_("Starting Hachoir"))
except IOError, err:
if err.errno == 2:
self.__file = None
self.info(_("[Log] setFilename(%s) fails: no such file") % filename)
else:
raise
def _writeIntoFile(self, message):
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
self.__file.write(u"%s - %s\n" % (timestamp, message))
self.__file.flush()
def newMessage(self, level, text, ctxt=None):
"""
Write a new message : append it in the buffer,
display it to the screen (if needed), and write
it in the log file (if needed).
@param level: Message level.
@type level: C{int}
@param text: Message content.
@type text: C{str}
@param ctxt: The caller instance.
"""
if level < self.LOG_ERROR and config.quiet or \
level <= self.LOG_INFO and not config.verbose:
return
if config.debug:
from hachoir_core.error import getBacktrace
backtrace = getBacktrace(None)
if backtrace:
text += "\n\n" + backtrace
_text = text
if hasattr(ctxt, "_logger"):
_ctxt = ctxt._logger()
if _ctxt is not None:
text = "[%s] %s" % (_ctxt, text)
# Add message to log buffer
if self.use_buffer:
if not self.__buffer.has_key(level):
self.__buffer[level] = [text]
else:
self.__buffer[level].append(text)
# Add prefix
prefix = self.level_name.get(level, "[info]")
# Display on stdout (if used)
if self.use_print:
sys.stdout.flush()
sys.stderr.write("%s %s\n" % (prefix, text))
sys.stderr.flush()
# Write into outfile (if used)
if self.__file:
self._writeIntoFile("%s %s" % (prefix, text))
# Use callback (if used)
if self.on_new_message:
self.on_new_message (level, prefix, _text, ctxt)
def info(self, text):
"""
New informative message.
@type text: C{str}
"""
self.newMessage(Log.LOG_INFO, text)
def warning(self, text):
"""
New warning message.
@type text: C{str}
"""
self.newMessage(Log.LOG_WARN, text)
def error(self, text):
"""
New error message.
@type text: C{str}
"""
self.newMessage(Log.LOG_ERROR, text)
log = Log()
class Logger(object):
def _logger(self):
return "<%s>" % self.__class__.__name__
def info(self, text):
log.newMessage(Log.LOG_INFO, text, self)
def warning(self, text):
log.newMessage(Log.LOG_WARN, text, self)
def error(self, text):
log.newMessage(Log.LOG_ERROR, text, self)
| gpl-2.0 |
ColdSauce/IsSittingOnButt | server/env/lib/python2.7/site-packages/pip/locations.py | 184 | 6644 | """Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import getpass
import os
import os.path
import site
import sys
from distutils import sysconfig
from distutils.command.install import install, SCHEME_KEYS # noqa
from pip.compat import WINDOWS
from pip.utils import appdirs
# CA Bundle Locations
CA_BUNDLE_PATHS = [
# Debian/Ubuntu/Gentoo etc.
"/etc/ssl/certs/ca-certificates.crt",
# Fedora/RHEL
"/etc/pki/tls/certs/ca-bundle.crt",
# OpenSUSE
"/etc/ssl/ca-bundle.pem",
# OpenBSD
"/etc/ssl/cert.pem",
# FreeBSD/DragonFly
"/usr/local/share/certs/ca-root-nss.crt",
# Homebrew on OSX
"/usr/local/etc/openssl/cert.pem",
]
# Attempt to locate a CA Bundle that we can pass into requests, we have a list
# of possible ones from various systems. If we cannot find one then we'll set
# this to None so that we default to whatever requests is setup to handle.
#
# Note to Downstream: If you wish to disable this autodetection and simply use
# whatever requests does (likely you've already patched
# requests.certs.where()) then simply edit this line so
# that it reads ``CA_BUNDLE_PATH = None``.
CA_BUNDLE_PATH = next((x for x in CA_BUNDLE_PATHS if os.path.exists(x)), None)
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
def __get_username():
""" Returns the effective username of the current process. """
if WINDOWS:
return getpass.getuser()
import pwd
return pwd.getpwuid(os.geteuid()).pw_name
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under Mac OS X + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_python_lib()
user_site = site.USER_SITE
user_dir = os.path.expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard Mac OS X framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
site_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name}
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
i.user = user or i.user
if user:
i.prefix = ""
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
if 'install_lib' in d.get_option_dict('install'):
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
scheme["headers"] = os.path.join(
root,
os.path.abspath(scheme["headers"])[1:],
)
return scheme
| apache-2.0 |
ThomasSweijen/TPF | examples/test/test_Ip2_FrictMat_CpmMat_FrictPhys.py | 10 | 1264 | from yade import *
from yade import plot,qt
import sys
young=25e9
poisson=.2
sigmaT=3e6
frictionAngle=atan(1)
density=4800 ## 4800 # twice the density, since porosity is about .5 (.62)
epsCrackOnset=1e-4
relDuctility=300
intRadius=1.5
concMat = O.materials.append(CpmMat(young=young,poisson=poisson,density=4800,sigmaT=3e6,relDuctility=30,epsCrackOnset=1e-4,neverDamage=False))
frictMat = O.materials.append(FrictMat(young=young,poisson=poisson,density=4800))
b1 = sphere((0,0,0),1,material=concMat)
b1.state.vel = Vector3(1,0,0)
b2 = sphere((0,5,0),1,material=concMat)
b2.state.vel = Vector3(2,-2,0)
b3 = sphere((0,-4,0),1,material=frictMat)
b3.state.vel = Vector3(1,3,0)
b4 = facet(((2,-5,-5),(2,-5,10),(2,10,-5)),material=frictMat)
O.bodies.append((b1,b2,b3,b4))
O.dt = 5e-6
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[
Ig2_Sphere_Sphere_ScGeom(),
Ig2_Facet_Sphere_ScGeom()
],
[
Ip2_CpmMat_CpmMat_CpmPhys(),
Ip2_FrictMat_CpmMat_FrictPhys(),
Ip2_FrictMat_FrictMat_FrictPhys(),
],
[
Law2_ScGeom_CpmPhys_Cpm(),
Law2_ScGeom_FrictPhys_CundallStrack()
]
),
NewtonIntegrator(label='newton'),
]
O.step()
try:
from yade import qt
qt.View()
except:
O.run()
| gpl-2.0 |
golismero/golismero-devel | tools/sqlmap/plugins/dbms/access/__init__.py | 8 | 1049 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import ACCESS_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.access.enumeration import Enumeration
from plugins.dbms.access.filesystem import Filesystem
from plugins.dbms.access.fingerprint import Fingerprint
from plugins.dbms.access.syntax import Syntax
from plugins.dbms.access.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class AccessMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines Microsoft Access methods
"""
def __init__(self):
self.excludeDbsList = ACCESS_SYSTEM_DBS
Syntax.__init__(self)
Fingerprint.__init__(self)
Enumeration.__init__(self)
Filesystem.__init__(self)
Miscellaneous.__init__(self)
Takeover.__init__(self)
unescaper[DBMS.ACCESS] = Syntax.escape
| gpl-2.0 |
koreiklein/fantasia | ui/render/gl/distances.py | 1 | 1104 | # Copyright (C) 2013 Korei Klein <korei.klein1@gmail.com>
# Constants for gl rendering of basic are collected here.
from ui.render.gl import colors
epsilon = 0.0001
divider_spacing = 15.0
notThickness = 22.0
notShiftThickness = notThickness + 21.0
# Amount by which to shift the value contained inside a Not.
notShiftOffset = [notShiftThickness + 5, notShiftThickness, 0.0]
quantifier_variables_spacing = 100.0
variable_binding_spacing = 20.0
quantifier_before_divider_spacing = 10.0
quantifier_after_divider_spacing = 55.0
infixSpacing = 88.0
applySpacing = 16.0
productVariableHorizontalSpacing = 0.0
productVariableBorder = 10.0
symbolBackgroundBorderWidth = 30.0
variableBackgroundBorderWidth = 30.0
holdsSpacing = 60.0
iffSpacing = 35.0
exponential_border_width = 40.0
min_unit_divider_length = 100.0
min_intersect_divider_length = 250.0
unit_width = 20.0
quantifier_divider_width = 20.0
conjunctiveDividerWidth = 20.0
def capLengthOfDividerByLength(length):
return min(35.0, length / 7.0)
inject_spacing = 8.0
before_dot_spacing = 8.0
after_dot_spacing = 8.0
dotWidth = 15.0
| gpl-2.0 |
andrewcmyers/tensorflow | tensorflow/contrib/training/python/training/hparam.py | 11 | 16739 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameter values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import re
import six
from tensorflow.contrib.training.python.training import hparam_pb2
from tensorflow.python.framework import ops
from tensorflow.python.util import compat
def parse_values(values, type_map):
"""Parses hyperparameter values from a string into a python map..
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, the last
value is used.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square backets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
Returns:
A python map containing the name, value pairs.
Raises:
ValueError: If `values` cannot be parsed.
"""
ret = {}
param_re = re.compile(
r'(?P<name>[a-zA-Z][\w]*)\s*=\s*'
r'((?P<val>[^,\[]*)|\[(?P<vals>[^\]]*)\])($|,)')
pos = 0
while pos < len(values):
m = param_re.match(values, pos)
if not m:
raise ValueError('Malformed hyperparameter value: %s' % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
if name not in type_map:
raise ValueError('Unknown hyperparameter type for %s' % name)
type_ = type_map[name]
def parse_fail(value):
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s'
% (name, type_.__name__, value, values))
if type_ == bool:
def parse_bool(value):
if value in ['true', 'True']:
return True
elif value in ['false', 'False']:
return False
else:
try:
return bool(int(value))
except (ValueError, TypeError):
parse_fail(value)
parse = parse_bool
else:
parse = type_
if m_dict['val'] is not None:
try:
ret[name] = parse(m_dict['val'])
except (ValueError, TypeError):
parse_fail(m_dict['val'])
elif m_dict['vals'] is not None:
elements = filter(None, re.split('[ ,]', m_dict['vals']))
try:
ret[name] = [parse(e) for e in elements]
except (ValueError, TypeError):
parse_fail(m_dict['vals'])
else:
parse_fail('')
return ret
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs.
A `HParams` object holds hyperparameters used to build and train a model,
such as the number of hidden units in a neural net layer or the learning rate
to use when training.
You first create a `HParams` object by specifying the names and values of the
hyperparameters.
To make them easily accessible the parameter names are added as direct
attributes of the class. A typical usage is as follows:
```python
# Create a HParams object specifying names and values of the model
# hyperparameters:
hparams = HParams(learning_rate=0.1, num_hidden_units=100)
# The hyperparameter are available as attributes of the HParams object:
hparams.learning_rate ==> 0.1
hparams.num_hidden_units ==> 100
```
Hyperparameters have type, which is inferred from the type of their value
passed at construction type. The currently supported types are: integer,
float, string, and list of integer, float, or string.
You can override hyperparameter values by calling the
[`parse()`](#HParams.parse) method, passing a string of comma separated
`name=value` pairs. This is intended to make it possible to override
any hyperparameter values from a single command-line flag to which
the user passes 'hyper-param=value' pairs. It avoids having to define
one flag for each hyperparameter.
The syntax expected for each value depends on the type of the parameter.
See `parse()` for a description of the syntax.
Example:
```python
# Define a command line flag to pass name=value pairs.
# For example using argparse:
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma separated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():
# Create a HParams object specifying the names and values of the
# model hyperparameters:
hparams = tf.HParams(learning_rate=0.1, num_hidden_units=100,
activations=['relu', 'tanh'])
# Override hyperparameters values by parsing the command line
hparams.parse(args.hparams)
# If the user passed `--hparams=learning_rate=0.3` on the command line
# then 'hparams' has the following attributes:
hparams.learning_rate ==> 0.3
hparams.num_hidden_units ==> 100
hparams.activations ==> ['relu', 'tanh']
# If the hyperparameters are in json format use parse_json:
hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}')
```
"""
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = tf.HParams(
learning_rate=0.1, num_hidden_units=100, activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef
protocol buffer. If provided, this object is initialized by
deserializing hparam_def. Otherwise **kwargs is used.
model_structure: An instance of ModelStructure, defining the feature
crosses to be used in the Trial.
**kwargs: Key-value pairs where the key is the hyperparameter name and
the value is the value for the parameter.
Raises:
ValueError: If both `hparam_def` and initialization values are provided,
or if one of the arguments is invalid.
"""
# Register the hyperparameters and their type in _hparam_types.
# This simplifies the implementation of parse().
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
self._model_structure = model_structure
if hparam_def:
self._init_from_proto(hparam_def)
if kwargs:
raise ValueError('hparam_def and initialization values are '
'mutually exclusive')
else:
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def _init_from_proto(self, hparam_def):
"""Creates a new HParams from `HParamDef` protocol buffer.
Args:
hparam_def: `HParamDef` protocol buffer.
"""
assert isinstance(hparam_def, hparam_pb2.HParamDef)
for name, value in hparam_def.hparam.items():
kind = value.WhichOneof('kind')
if kind.endswith('_value'):
# Single value.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, int(getattr(value, kind)))
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, compat.as_str(getattr(value, kind)))
else:
self.add_hparam(name, getattr(value, kind))
else:
# List of values.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, [int(v) for v in getattr(value, kind).value])
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, [compat.as_str(v)
for v in getattr(value, kind).value])
else:
self.add_hparam(name, [v for v in getattr(value, kind).value])
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# hyperparameter name.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError('Multi-valued hyperparameters cannot be empty: %s'
% name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def parse(self, values):
"""Override hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed.
"""
type_map = dict()
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self._set_from_map(values_map)
def _set_from_map(self, values_map):
"""Override hyperparameter values, parsing new values from a dictionary.
Args:
values_map: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values_map` cannot be parsed.
"""
for name, value in values_map.items():
_, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, value)
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, value)
return self
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self):
"""Serializes the hyperparameters into JSON.
Returns:
A JSON string.
"""
return json.dumps(self.values())
def parse_json(self, values_json):
"""Override hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values_json` cannot be parsed.
"""
values_map = json.loads(values_json)
return self._set_from_map(values_map)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def __str__(self):
return str(sorted(self.values().items()))
@staticmethod
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
"""
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])
def to_proto(self, export_scope=None): # pylint: disable=unused-argument
"""Converts a `HParams` object to a `HParamDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `HParamDef` protocol buffer.
"""
hparam_proto = hparam_pb2.HParamDef()
for name in self._hparam_types:
# Parse the values.
param_type, is_list = self._hparam_types.get(name, (None, None))
kind = HParams._get_kind_name(param_type, is_list)
if is_list:
if kind.startswith('bytes'):
v_list = [compat.as_bytes(v) for v in getattr(self, name)]
else:
v_list = [v for v in getattr(self, name)]
getattr(hparam_proto.hparam[name], kind).value.extend(v_list)
else:
v = getattr(self, name)
if kind.startswith('bytes'):
v = compat.as_bytes(getattr(self, name))
setattr(hparam_proto.hparam[name], kind, v)
return hparam_proto
@staticmethod
def from_proto(hparam_def, import_scope=None): # pylint: disable=unused-argument
return HParams(hparam_def=hparam_def)
ops.register_proto_function('hparams',
proto_type=hparam_pb2.HParamDef,
to_proto=HParams.to_proto,
from_proto=HParams.from_proto)
| apache-2.0 |
nach00/Test4 | setup.py | 2 | 1293 | import sys
from distutils.core import setup
#PY3 = sys.version_info.major >= 3
PY3 = sys.version_info[0] >= 3
VERSION_FILE = "pifacecad/version.py"
def get_version():
if PY3:
version_vars = {}
with open(VERSION_FILE) as f:
code = compile(f.read(), VERSION_FILE, 'exec')
exec(code, None, version_vars)
return version_vars['__version__']
else:
execfile(VERSION_FILE)
return __version__
setup(
name='pifacecad',
version=get_version(),
description='The PiFace Control And Display module.',
author='Thomas Preston',
author_email='thomas.preston@openlx.org.uk',
license='GPLv3+',
url='http://piface.github.io/pifacecad/',
packages=['pifacecad', 'pifacecad.tools'],
long_description=open('README.md').read() + open('CHANGELOG').read(),
classifiers=[
"License :: OSI Approved :: GNU Affero General Public License v3 or "
"later (AGPLv3+)",
"Programming Language :: Python :: 3",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='piface cad control display raspberrypi openlx',
requires=['pifacecommon', 'lirc'],
)
| gpl-3.0 |
UniMOOC/gcb-new-module | modules/usage_reporting/consent_banner.py | 5 | 2634 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Banner to obtain consent for usage reporting."""
__author__ = [
'John Orr (jorr@google.com)',
]
import jinja2
import os
import appengine_config
from controllers import utils
from models import roles
from models import transforms
from modules.admin import admin
from modules.dashboard import dashboard
from modules.usage_reporting import config
from modules.usage_reporting import messaging
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'usage_reporting', 'templates')
def _make_consent_banner(handler):
if config.is_consent_set() or messaging.is_disabled():
return None
template_values = {
'xsrf_token': handler.create_xsrf_token(
ConsentBannerRestHandler.XSRF_TOKEN),
'is_super_admin': roles.Roles.is_super_admin()
}
return jinja2.Markup(
handler.get_template('consent_banner.html', [TEMPLATES_DIR]
).render(template_values))
class ConsentBannerRestHandler(utils.BaseRESTHandler):
"""Handle REST requests to set report consent from banner."""
URL = '/rest/modules/usage_reporting/consent'
XSRF_TOKEN = 'usage_reporting_consent_banner'
def post(self):
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, self.XSRF_TOKEN, {}):
return
if not roles.Roles.is_super_admin():
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = transforms.loads(request.get('payload'))
is_allowed = payload['is_allowed']
config.set_report_allowed(is_allowed)
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, is_allowed,
source=messaging.Message.BANNER_SOURCE)
transforms.send_json_response(self, 200, 'OK')
def notify_module_enabled():
dashboard.DashboardHandler.PAGE_HEADER_HOOKS.append(_make_consent_banner)
admin.GlobalAdminHandler.PAGE_HEADER_HOOKS.append(_make_consent_banner)
| apache-2.0 |
Yen-Chung-En/2015cdb_W12 | static/Brython3.1.1-20150328-091302/Lib/xml/dom/minicompat.py | 781 | 3228 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| agpl-3.0 |
laborautonomo/poedit | deps/boost/tools/build/v2/test/project_id.py | 44 | 12454 | #!/usr/bin/python
# Copyright (C) 2012. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests Boost Build's project-id handling.
import BoostBuild
import sys
def test_assigning_project_ids():
t = BoostBuild.Tester(pass_toolset=False)
t.write("jamroot.jam", """\
import assert ;
import modules ;
import notfile ;
import project ;
rule assert-project-id ( id ? : module-name ? )
{
module-name ?= [ CALLER_MODULE ] ;
assert.result $(id) : project.attribute $(module-name) id ;
}
# Project rule modifies the main project id.
assert-project-id ; # Initial project id is empty
project foo ; assert-project-id /foo ;
project ; assert-project-id /foo ;
project foo ; assert-project-id /foo ;
project bar ; assert-project-id /bar ;
project /foo ; assert-project-id /foo ;
project "" ; assert-project-id /foo ;
# Calling the use-project rule does not modify the project's main id.
use-project id1 : a ;
# We need to load the 'a' Jamfile module manually as the use-project rule will
# only schedule the load to be done after the current module load finishes.
a-module = [ project.load a ] ;
assert-project-id : $(a-module) ;
use-project id2 : a ;
assert-project-id : $(a-module) ;
modules.call-in $(a-module) : project baz ;
assert-project-id /baz : $(a-module) ;
use-project id3 : a ;
assert-project-id /baz : $(a-module) ;
# Make sure the project id still holds after all the scheduled use-project loads
# complete. We do this by scheduling the assert for the Jam action scheduling
# phase.
notfile x : @assert-a-rule ;
rule assert-a-rule ( target : : properties * )
{
assert-project-id /baz : $(a-module) ;
}
""")
t.write("a/jamfile.jam", """\
# Initial project id for this module is empty.
assert-project-id ;
""")
t.run_build_system()
t.cleanup()
def test_using_project_ids_in_target_references():
t = BoostBuild.Tester()
__write_appender(t, "appender.jam")
t.write("jamroot.jam", """\
import type ;
type.register AAA : _a ;
type.register BBB : _b ;
import appender ;
appender.register aaa-to-bbb : AAA : BBB ;
use-project id1 : a ;
use-project /id2 : a ;
bbb b1 : /id1//target ;
bbb b2 : /id2//target ;
bbb b3 : /id3//target ;
bbb b4 : a//target ;
bbb b5 : /project-a1//target ;
bbb b6 : /project-a2//target ;
bbb b7 : /project-a3//target ;
use-project id3 : a ;
""")
t.write("a/source._a", "")
t.write("a/jamfile.jam", """\
project project-a1 ;
project /project-a2 ;
import alias ;
alias target : source._a ;
project /project-a3 ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/b%d._b" % x for x in range(1, 8))
t.expect_nothing_more()
t.cleanup()
def test_repeated_ids_for_different_projects():
t = BoostBuild.Tester()
t.write("a/jamfile.jam", "")
t.write("jamroot.jam", "project foo ; use-project foo : a ;")
t.run_build_system(status=1)
t.expect_output_lines("""\
error: Attempt to redeclare already registered project id '/foo'.
error: Original project:
error: Name: Jamfile<*>
error: Module: Jamfile<*>
error: Main id: /foo
error: File: jamroot.jam
error: Location: .
error: New project:
error: Module: Jamfile<*>
error: File: a*jamfile.jam
error: Location: a""")
t.write("jamroot.jam", "use-project foo : a ; project foo ;")
t.run_build_system(status=1)
t.expect_output_lines("""\
error: Attempt to redeclare already registered project id '/foo'.
error: Original project:
error: Name: Jamfile<*>
error: Module: Jamfile<*>
error: Main id: /foo
error: File: jamroot.jam
error: Location: .
error: New project:
error: Module: Jamfile<*>
error: File: a*jamfile.jam
error: Location: a""")
t.write("jamroot.jam", """\
import modules ;
import project ;
modules.call-in [ project.load a ] : project foo ;
project foo ;
""")
t.run_build_system(status=1)
t.expect_output_lines("""\
error: at jamroot.jam:4
error: Attempt to redeclare already registered project id '/foo'.
error: Original project:
error: Name: Jamfile<*>
error: Module: Jamfile<*>
error: Main id: /foo
error: File: a*jamfile.jam
error: Location: a
error: New project:
error: Module: Jamfile<*>
error: File: jamroot.jam
error: Location: .""")
t.cleanup()
def test_repeated_ids_for_same_project():
t = BoostBuild.Tester()
t.write("jamroot.jam", "project foo ; project foo ;")
t.run_build_system()
t.write("jamroot.jam", "project foo ; use-project foo : . ;")
t.run_build_system()
t.write("jamroot.jam", "project foo ; use-project foo : ./. ;")
t.run_build_system()
t.write("jamroot.jam", """\
project foo ;
use-project foo : . ;
use-project foo : ./aaa/.. ;
use-project foo : ./. ;
""")
t.run_build_system()
# On Windows we have a case-insensitive file system and we can use
# backslashes as path separators.
# FIXME: Make a similar test pass on Cygwin.
if sys.platform in ['win32']:
t.write("a/fOo bAr/b/jamfile.jam", "")
t.write("jamroot.jam", r"""
use-project bar : "a/foo bar/b" ;
use-project bar : "a/foO Bar/b" ;
use-project bar : "a/foo BAR/b/" ;
use-project bar : "a\\.\\FOO bar\\b\\" ;
""")
t.run_build_system()
t.rm("a")
t.write("bar/jamfile.jam", "")
t.write("jamroot.jam", """\
use-project bar : bar ;
use-project bar : bar/ ;
use-project bar : bar// ;
use-project bar : bar/// ;
use-project bar : bar//// ;
use-project bar : bar/. ;
use-project bar : bar/./ ;
use-project bar : bar/////./ ;
use-project bar : bar/../bar/xxx/.. ;
use-project bar : bar/..///bar/xxx///////.. ;
use-project bar : bar/./../bar/xxx/.. ;
use-project bar : bar/.////../bar/xxx/.. ;
use-project bar : bar/././../bar/xxx/.. ;
use-project bar : bar/././//////////../bar/xxx/.. ;
use-project bar : bar/.///.////../bar/xxx/.. ;
use-project bar : bar/./././xxx/.. ;
use-project bar : bar/xxx////.. ;
use-project bar : bar/xxx/.. ;
use-project bar : bar///////xxx/.. ;
""")
t.run_build_system()
t.rm("bar")
# On Windows we have a case-insensitive file system and we can use
# backslashes as path separators.
# FIXME: Make a similar test pass on Cygwin.
if sys.platform in ['win32']:
t.write("baR/jamfile.jam", "")
t.write("jamroot.jam", r"""
use-project bar : bar ;
use-project bar : BAR ;
use-project bar : bAr ;
use-project bar : bAr/ ;
use-project bar : bAr\\ ;
use-project bar : bAr\\\\ ;
use-project bar : bAr\\\\///// ;
use-project bar : bAr/. ;
use-project bar : bAr/./././ ;
use-project bar : bAr\\.\\.\\.\\ ;
use-project bar : bAr\\./\\/.\\.\\ ;
use-project bar : bAr/.\\././ ;
use-project bar : Bar ;
use-project bar : BaR ;
use-project bar : BaR/./../bAr/xxx/.. ;
use-project bar : BaR/./..\\bAr\\xxx/.. ;
use-project bar : BaR/xxx/.. ;
use-project bar : BaR///\\\\\\//xxx/.. ;
use-project bar : Bar\\xxx/.. ;
use-project bar : BAR/xXx/.. ;
use-project bar : BAR/xXx\\\\/\\/\\//\\.. ;
""")
t.run_build_system()
t.rm("baR")
t.cleanup()
def test_unresolved_project_references():
t = BoostBuild.Tester()
__write_appender(t, "appender.jam")
t.write("a/source._a", "")
t.write("a/jamfile.jam", "import alias ; alias target : source._a ;")
t.write("jamroot.jam", """\
import type ;
type.register AAA : _a ;
type.register BBB : _b ;
import appender ;
appender.register aaa-to-bbb : AAA : BBB ;
use-project foo : a ;
bbb b1 : a//target ;
bbb b2 : /foo//target ;
bbb b-invalid : invalid//target ;
bbb b-root-invalid : /invalid//target ;
bbb b-missing-root : foo//target ;
bbb b-invalid-target : /foo//invalid ;
""")
t.run_build_system(["b1", "b2"])
t.expect_addition("bin/$toolset/debug/b%d._b" % x for x in range(1, 3))
t.expect_nothing_more()
t.run_build_system(["b-invalid"], status=1)
t.expect_output_lines("""\
error: Unable to find file or target named
error: 'invalid//target'
error: referred to from project at
error: '.'
error: could not resolve project reference 'invalid'""")
t.run_build_system(["b-root-invalid"], status=1)
t.expect_output_lines("""\
error: Unable to find file or target named
error: '/invalid//target'
error: referred to from project at
error: '.'
error: could not resolve project reference '/invalid'""")
t.run_build_system(["b-missing-root"], status=1)
t.expect_output_lines("""\
error: Unable to find file or target named
error: 'foo//target'
error: referred to from project at
error: '.'
error: could not resolve project reference 'foo' - possibly missing a """
"leading slash ('/') character.")
t.run_build_system(["b-invalid-target"], status=1)
t.expect_output_lines("""\
error: Unable to find file or target named
error: '/foo//invalid'
error: referred to from project at
error: '.'""")
t.expect_output_lines("*could not resolve project reference*", False)
t.cleanup()
def __write_appender(t, name):
t.write(name,
r"""# Copyright 2012 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Support for registering test generators that construct their targets by
# simply appending their given input data, e.g. list of sources & targets.
import "class" : new ;
import generators ;
import modules ;
import sequence ;
rule register ( id composing ? : source-types + : target-types + )
{
local caller-module = [ CALLER_MODULE ] ;
id = $(caller-module).$(id) ;
local g = [ new generator $(id) $(composing) : $(source-types) :
$(target-types) ] ;
$(g).set-rule-name $(__name__).appender ;
generators.register $(g) ;
return $(id) ;
}
if [ modules.peek : NT ]
{
X = ")" ;
ECHO_CMD = (echo. ;
}
else
{
X = \" ;
ECHO_CMD = "echo $(X)" ;
}
local appender-runs ;
# We set up separate actions for building each target in order to avoid having
# to iterate over them in action (i.e. shell) code. We have to be extra careful
# though to achieve the exact same effect as if doing all the work in just one
# action. Otherwise Boost Jam might, under some circumstances, run only some of
# our actions. To achieve this we register a series of actions for all the
# targets (since they all have the same target list - either all or none of them
# get run independent of which target actually needs to get built), each
# building only a single target. Since all our actions use the same targets, we
# can not use 'on-target' parameters to pass data to a specific action so we
# pass them using the second 'sources' parameter which our actions then know how
# to interpret correctly. This works well since Boost Jam does not automatically
# add dependency relations between specified action targets & sources and so the
# second argument, even though most often used to pass in a list of sources, can
# actually be used for passing in any type of information.
rule appender ( targets + : sources + : properties * )
{
appender-runs = [ CALC $(appender-runs:E=0) + 1 ] ;
local target-index = 0 ;
local target-count = [ sequence.length $(targets) ] ;
local original-targets ;
for t in $(targets)
{
target-index = [ CALC $(target-index) + 1 ] ;
local appender-run = $(appender-runs) ;
if $(targets[2])-defined
{
appender-run += [$(target-index)/$(target-count)] ;
}
append $(targets) : $(appender-run:J=" ") $(t) $(sources) ;
}
}
actions append
{
$(ECHO_CMD)-------------------------------------------------$(X)
$(ECHO_CMD)Appender run: $(>[1])$(X)
$(ECHO_CMD)Appender run: $(>[1])$(X)>> "$(>[2])"
$(ECHO_CMD)Target group: $(<:J=' ')$(X)
$(ECHO_CMD)Target group: $(<:J=' ')$(X)>> "$(>[2])"
$(ECHO_CMD) Target: '$(>[2])'$(X)
$(ECHO_CMD) Target: '$(>[2])'$(X)>> "$(>[2])"
$(ECHO_CMD) Sources: '$(>[3-]:J=' ')'$(X)
$(ECHO_CMD) Sources: '$(>[3-]:J=' ')'$(X)>> "$(>[2])"
$(ECHO_CMD)=================================================$(X)
$(ECHO_CMD)-------------------------------------------------$(X)>> "$(>[2])"
}
""")
test_assigning_project_ids()
test_using_project_ids_in_target_references()
test_repeated_ids_for_same_project()
test_repeated_ids_for_different_projects()
test_unresolved_project_references()
| mit |
cherusk/ansible | lib/ansible/modules/system/facter.py | 69 | 1855 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: facter
short_description: Runs the discovery program I(facter) on the remote system
description:
- Runs the I(facter) discovery program
(U(https://github.com/puppetlabs/facter)) on the remote system, returning
JSON data that can be useful for inventory purposes.
version_added: "0.2"
options: {}
notes: []
requirements: [ "facter", "ruby-json" ]
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Example command-line invocation
ansible www.example.net -m facter
'''
def main():
module = AnsibleModule(
argument_spec = dict()
)
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cmd = [facter_path, "--puppet", "--json"]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
dwightgunning/django | django/contrib/admin/templatetags/log.py | 499 | 2080 | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
entries = LogEntry.objects.all()
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
entries = LogEntry.objects.filter(user__pk=user_id)
context[self.varname] = entries.select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None))
| bsd-3-clause |
jmartinm/invenio | modules/bibdocfile/lib/bibdocfile_regression_tests.py | 10 | 30584 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibDocFile Regression Test Suite."""
__revision__ = "$Id$"
import shutil
import os
from invenio.testutils import InvenioTestCase
from invenio.testutils import make_test_suite, run_test_suite
from invenio.bibdocfile import BibRecDocs, BibRelation, MoreInfo, \
check_bibdoc_authorization, bibdocfile_url_p, guess_format_from_url, CFG_HAS_MAGIC, \
Md5Folder, calculate_md5, calculate_md5_external
from invenio.dbquery import run_sql
from invenio.access_control_config import CFG_WEBACCESS_WARNING_MSGS
from invenio.config import \
CFG_SITE_URL, \
CFG_PREFIX, \
CFG_BIBDOCFILE_FILEDIR, \
CFG_SITE_RECORD, \
CFG_WEBDIR, \
CFG_TMPDIR, \
CFG_PATH_MD5SUM
import invenio.template
from datetime import datetime
import time
class BibDocFsInfoTest(InvenioTestCase):
"""Regression tests about the table bibdocfsinfo"""
def setUp(self):
self.my_bibrecdoc = BibRecDocs(2)
self.unique_name = self.my_bibrecdoc.propose_unique_docname('file')
self.my_bibdoc = self.my_bibrecdoc.add_new_file(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', docname=self.unique_name)
self.my_bibdoc_id = self.my_bibdoc.id
def tearDown(self):
self.my_bibdoc.expunge()
def test_hard_delete(self):
"""bibdocfile - test correct update of bibdocfsinfo when hard-deleting"""
self.assertEqual(run_sql("SELECT MAX(version) FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.my_bibdoc_id, ))[0][0], 1)
self.assertEqual(run_sql("SELECT last_version FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=1 AND format='.jpg'", (self.my_bibdoc_id, ))[0][0], True)
self.my_bibdoc.add_file_new_version(CFG_PREFIX + '/lib/webtest/invenio/test.gif')
self.assertEqual(run_sql("SELECT MAX(version) FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.my_bibdoc_id, ))[0][0], 2)
self.assertEqual(run_sql("SELECT last_version FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=2 AND format='.gif'", (self.my_bibdoc_id, ))[0][0], True)
self.assertEqual(run_sql("SELECT last_version FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=1 AND format='.jpg'", (self.my_bibdoc_id, ))[0][0], False)
self.my_bibdoc.delete_file('.gif', 2)
self.assertEqual(run_sql("SELECT MAX(version) FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.my_bibdoc_id, ))[0][0], 1)
self.assertEqual(run_sql("SELECT last_version FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=1 AND format='.jpg'", (self.my_bibdoc_id, ))[0][0], True)
class BibDocFileGuessFormat(InvenioTestCase):
"""Regression tests for guess_format_from_url"""
def test_guess_format_from_url_local_no_ext(self):
"""bibdocfile - guess_format_from_url(), local URL, no extension"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'test')), '.bin')
if CFG_HAS_MAGIC:
def test_guess_format_from_url_local_no_ext_with_magic(self):
"""bibdocfile - guess_format_from_url(), local URL, no extension, with magic"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'testgif')), '.gif')
else:
def test_guess_format_from_url_local_no_ext_with_magic(self):
"""bibdocfile - guess_format_from_url(), local URL, no extension, no magic"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'testgif')), '.bin')
def test_guess_format_from_url_local_unknown_ext(self):
"""bibdocfile - guess_format_from_url(), local URL, unknown extension"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'test.foo')), '.foo')
def test_guess_format_from_url_local_known_ext(self):
"""bibdocfile - guess_format_from_url(), local URL, unknown extension"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'test.gif')), '.gif')
def test_guess_format_from_url_remote_no_ext(self):
"""bibdocfile - guess_format_from_url(), remote URL, no extension"""
self.assertEqual(guess_format_from_url(CFG_SITE_URL + '/img/test'), '.bin')
if CFG_HAS_MAGIC:
def test_guess_format_from_url_remote_no_ext_with_magic(self):
"""bibdocfile - guess_format_from_url(), remote URL, no extension, with magic"""
self.assertEqual(guess_format_from_url(CFG_SITE_URL + '/img/testgif'), '.gif')
else:
def test_guess_format_from_url_remote_no_ext_with_magic(self):
"""bibdocfile - guess_format_from_url(), remote URL, no extension, no magic"""
self.failUnless(guess_format_from_url(CFG_SITE_URL + '/img/testgif') in ('.bin', '.gif'))
if CFG_HAS_MAGIC:
def test_guess_format_from_url_remote_unknown_ext(self):
"""bibdocfile - guess_format_from_url(), remote URL, unknown extension, with magic"""
self.assertEqual(guess_format_from_url(CFG_SITE_URL + '/img/test.foo'), '.gif')
else:
def test_guess_format_from_url_remote_unknown_ext(self):
"""bibdocfile - guess_format_from_url(), remote URL, unknown extension, no magic"""
self.failUnless(guess_format_from_url(CFG_SITE_URL + '/img/test.foo') in ('.bin', '.gif'))
def test_guess_format_from_url_remote_known_ext(self):
"""bibdocfile - guess_format_from_url(), remote URL, known extension"""
self.assertEqual(guess_format_from_url(CFG_SITE_URL + '/img/test.gif'), '.gif')
def test_guess_format_from_url_local_gpl_license(self):
local_path = os.path.join(CFG_TMPDIR, 'LICENSE')
print >> open(local_path, 'w'), """
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
[...]
"""
try:
if CFG_HAS_MAGIC:
self.assertEqual(guess_format_from_url(local_path), '.txt')
else:
self.assertEqual(guess_format_from_url(local_path), '.bin')
finally:
os.remove(local_path)
class BibRecDocsTest(InvenioTestCase):
"""regression tests about BibRecDocs"""
def test_BibRecDocs(self):
"""bibdocfile - BibRecDocs functions"""
my_bibrecdoc = BibRecDocs(2)
#add bibdoc
my_bibrecdoc.add_new_file(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', 'Main', 'img_test', False, 'test add new file', 'test', '.jpg')
my_bibrecdoc.add_bibdoc(doctype='Main', docname='file', never_fail=False)
self.assertEqual(len(my_bibrecdoc.list_bibdocs()), 3)
my_added_bibdoc = my_bibrecdoc.get_bibdoc('file')
#add bibdocfile in empty bibdoc
my_added_bibdoc.add_file_new_version(CFG_PREFIX + '/lib/webtest/invenio/test.gif', \
description= 'added in empty bibdoc', comment=None, docformat=None, flags=['PERFORM_HIDE_PREVIOUS'])
#propose unique docname
self.assertEqual(my_bibrecdoc.propose_unique_docname('file'), 'file_2')
#has docname
self.assertEqual(my_bibrecdoc.has_docname_p('file'), True)
#merge 2 bibdocs
my_bibrecdoc.merge_bibdocs('img_test', 'file')
self.assertEqual(len(my_bibrecdoc.get_bibdoc("img_test").list_all_files()), 2)
#check file exists
self.assertEqual(my_bibrecdoc.check_file_exists(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', '.jpg'), True)
#get bibdoc names
# we can not rely on the order !
names = set([my_bibrecdoc.get_bibdoc_names('Main')[0], my_bibrecdoc.get_bibdoc_names('Main')[1]])
self.assertTrue('0104007_02' in names)
self.assertTrue('img_test' in names)
#get total size
self.assertEqual(my_bibrecdoc.get_total_size(), 1647591)
#get total size latest version
self.assertEqual(my_bibrecdoc.get_total_size_latest_version(), 1647591)
#display
#value = my_bibrecdoc.display(docname='img_test', version='', doctype='', ln='en', verbose=0, display_hidden=True)
#self.assert_("<small><b>Main</b>" in value)
#get xml 8564
value = my_bibrecdoc.get_xml_8564()
self.assert_('/'+ CFG_SITE_RECORD +'/2/files/img_test.jpg</subfield>' in value)
#check duplicate docnames
self.assertEqual(my_bibrecdoc.check_duplicate_docnames(), True)
def tearDown(self):
my_bibrecdoc = BibRecDocs(2)
#delete
my_bibrecdoc.delete_bibdoc('img_test')
my_bibrecdoc.delete_bibdoc('file')
my_bibrecdoc.delete_bibdoc('test')
class BibDocsTest(InvenioTestCase):
"""regression tests about BibDocs"""
def test_BibDocs(self):
"""bibdocfile - BibDocs functions"""
#add file
my_bibrecdoc = BibRecDocs(2)
timestamp1 = datetime(*(time.strptime("2011-10-09 08:07:06", "%Y-%m-%d %H:%M:%S")[:6]))
my_bibrecdoc.add_new_file(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', 'Main', 'img_test', False, 'test add new file', 'test', '.jpg', modification_date=timestamp1)
my_new_bibdoc = my_bibrecdoc.get_bibdoc("img_test")
value = my_bibrecdoc.list_bibdocs()
self.assertEqual(len(value), 2)
#get total file (bibdoc)
self.assertEqual(my_new_bibdoc.get_total_size(), 91750)
#get recid
self.assertEqual(my_new_bibdoc.bibrec_links[0]["recid"], 2)
#change name
my_new_bibdoc.change_name(2, 'new_name')
#get docname
my_bibrecdoc = BibRecDocs(2)
self.assertEqual(my_bibrecdoc.get_docname(my_new_bibdoc.id), 'new_name')
#get type
self.assertEqual(my_new_bibdoc.get_type(), 'Main')
#get id
self.assert_(my_new_bibdoc.get_id() > 80)
#set status
my_new_bibdoc.set_status('new status')
#get status
self.assertEqual(my_new_bibdoc.get_status(), 'new status')
#get base directory
self.assert_(my_new_bibdoc.get_base_dir().startswith(CFG_BIBDOCFILE_FILEDIR))
#get file number
self.assertEqual(my_new_bibdoc.get_file_number(), 1)
#add file new version
timestamp2 = datetime(*(time.strptime("2010-09-08 07:06:05", "%Y-%m-%d %H:%M:%S")[:6]))
my_new_bibdoc.add_file_new_version(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', description= 'the new version', comment=None, docformat=None, flags=["PERFORM_HIDE_PREVIOUS"], modification_date=timestamp2)
self.assertEqual(my_new_bibdoc.list_versions(), [1, 2])
#revert
timestamp3 = datetime.now()
time.sleep(2) # so we can see a difference between now() and the time of the revert
my_new_bibdoc.revert(1)
self.assertEqual(my_new_bibdoc.list_versions(), [1, 2, 3])
self.assertEqual(my_new_bibdoc.get_description('.jpg', version=3), 'test add new file')
#get total size latest version
self.assertEqual(my_new_bibdoc.get_total_size_latest_version(), 91750)
#get latest version
self.assertEqual(my_new_bibdoc.get_latest_version(), 3)
#list latest files
self.assertEqual(len(my_new_bibdoc.list_latest_files()), 1)
self.assertEqual(my_new_bibdoc.list_latest_files()[0].get_version(), 3)
#list version files
self.assertEqual(len(my_new_bibdoc.list_version_files(1, list_hidden=True)), 1)
#display # No Display facility inside of an object !
# value = my_new_bibdoc.display(version='', ln='en', display_hidden=True)
# self.assert_('>test add new file<' in value)
#format already exist
self.assertEqual(my_new_bibdoc.format_already_exists_p('.jpg'), True)
#get file
self.assertEqual(my_new_bibdoc.get_file('.jpg', version='1').get_version(), 1)
#set description
my_new_bibdoc.set_description('new description', '.jpg', version=1)
#get description
self.assertEqual(my_new_bibdoc.get_description('.jpg', version=1), 'new description')
#set comment
my_new_bibdoc.set_description('new comment', '.jpg', version=1)
#get comment
self.assertEqual(my_new_bibdoc.get_description('.jpg', version=1), 'new comment')
#get history
assert len(my_new_bibdoc.get_history()) > 0
#check modification date
self.assertEqual(my_new_bibdoc.get_file('.jpg', version=1).md, timestamp1)
self.assertEqual(my_new_bibdoc.get_file('.jpg', version=2).md, timestamp2)
assert my_new_bibdoc.get_file('.jpg', version=3).md > timestamp3
#delete file
my_new_bibdoc.delete_file('.jpg', 2)
#list all files
self.assertEqual(len(my_new_bibdoc.list_all_files()), 2)
#delete file
my_new_bibdoc.delete_file('.jpg', 3)
#add new format
timestamp4 = datetime(*(time.strptime("2012-11-10 09:08:07", "%Y-%m-%d %H:%M:%S")[:6]))
my_new_bibdoc.add_file_new_format(CFG_PREFIX + '/lib/webtest/invenio/test.gif', version=None, description=None, comment=None, docformat=None, modification_date=timestamp4)
self.assertEqual(len(my_new_bibdoc.list_all_files()), 2)
#check modification time
self.assertEqual(my_new_bibdoc.get_file('.jpg', version=1).md, timestamp1)
self.assertEqual(my_new_bibdoc.get_file('.gif', version=1).md, timestamp4)
#change the format name
my_new_bibdoc.change_docformat('.gif', '.gif;icon-640')
self.assertEqual(my_new_bibdoc.format_already_exists_p('.gif'), False)
self.assertEqual(my_new_bibdoc.format_already_exists_p('.gif;icon-640'), True)
#delete file
my_new_bibdoc.delete_file('.jpg', 1)
#delete file
my_new_bibdoc.delete_file('.gif;icon-640', 1)
#empty bibdoc
self.assertEqual(my_new_bibdoc.empty_p(), True)
#hidden?
self.assertEqual(my_new_bibdoc.hidden_p('.jpg', version=1), False)
#hide
my_new_bibdoc.set_flag('HIDDEN', '.jpg', version=1)
#hidden?
self.assertEqual(my_new_bibdoc.hidden_p('.jpg', version=1), True)
#add and get icon
my_new_bibdoc.add_icon( CFG_PREFIX + '/lib/webtest/invenio/icon-test.gif', modification_date=timestamp4)
my_bibrecdoc = BibRecDocs(2)
value = my_bibrecdoc.get_bibdoc("new_name")
self.assertEqual(value.get_icon().docid, my_new_bibdoc.get_icon().docid)
self.assertEqual(value.get_icon().version, my_new_bibdoc.get_icon().version)
self.assertEqual(value.get_icon().format, my_new_bibdoc.get_icon().format)
#check modification time
self.assertEqual(my_new_bibdoc.get_icon().md, timestamp4)
#delete icon
my_new_bibdoc.delete_icon()
#get icon
self.assertEqual(my_new_bibdoc.get_icon(), None)
#delete
my_new_bibdoc.delete()
self.assertEqual(my_new_bibdoc.deleted_p(), True)
#undelete
my_new_bibdoc.undelete(previous_status='', recid=2)
#expunging
my_new_bibdoc.expunge()
my_bibrecdoc.build_bibdoc_list()
self.failIf('new_name' in my_bibrecdoc.get_bibdoc_names())
self.failUnless(my_bibrecdoc.get_bibdoc_names())
def tearDown(self):
my_bibrecdoc = BibRecDocs(2)
#delete
my_bibrecdoc.delete_bibdoc('img_test')
my_bibrecdoc.delete_bibdoc('new_name')
class BibRelationTest(InvenioTestCase):
""" regression tests for BibRelation"""
def test_RelationCreation_Version(self):
"""
Testing relations between particular versions of a document
We create two relations differing only on the BibDoc version
number and verify that they are indeed differen (store different data)
"""
rel1 = BibRelation.create(bibdoc1_id = 10, bibdoc2_id=12,
bibdoc1_ver = 1, bibdoc2_ver = 1,
rel_type = "some_rel")
rel2 = BibRelation.create(bibdoc1_id = 10, bibdoc2_id=12,
bibdoc1_ver = 1, bibdoc2_ver = 2,
rel_type = "some_rel")
rel1["key1"] = "value1"
rel1["key2"] = "value2"
rel2["key1"] = "value3"
# now testing the retrieval of data
new_rel1 = BibRelation(bibdoc1_id = 10, bibdoc2_id = 12,
rel_type = "some_rel", bibdoc1_ver = 1,
bibdoc2_ver = 1)
new_rel2 = BibRelation(bibdoc1_id = 10, bibdoc2_id = 12,
rel_type = "some_rel", bibdoc1_ver = 1,
bibdoc2_ver = 2)
self.assertEqual(new_rel1["key1"], "value1")
self.assertEqual(new_rel1["key2"], "value2")
self.assertEqual(new_rel2["key1"], "value3")
# now testing the deletion of relations
new_rel1.delete()
new_rel2.delete()
newer_rel1 = BibRelation.create(bibdoc1_id = 10, bibdoc2_id=12,
bibdoc1_ver = 1, bibdoc2_ver = 1,
rel_type = "some_rel")
newer_rel2 = BibRelation.create(bibdoc1_id = 10, bibdoc2_id=12,
bibdoc1_ver = 1, bibdoc2_ver = 2,
rel_type = "some_rel")
self.assertEqual("key1" in newer_rel1, False)
self.assertEqual("key1" in newer_rel2, False)
newer_rel1.delete()
newer_rel2.delete()
class BibDocFilesTest(InvenioTestCase):
"""regression tests about BibDocFiles"""
def test_BibDocFiles(self):
"""bibdocfile - BibDocFile functions """
#add bibdoc
my_bibrecdoc = BibRecDocs(2)
timestamp = datetime(*(time.strptime("2010-09-08 07:06:05", "%Y-%m-%d %H:%M:%S")[:6]))
my_bibrecdoc.add_new_file(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', 'Main', 'img_test', False, 'test add new file', 'test', '.jpg', modification_date=timestamp)
my_new_bibdoc = my_bibrecdoc.get_bibdoc("img_test")
my_new_bibdocfile = my_new_bibdoc.list_all_files()[0]
#get url
self.assertEqual(my_new_bibdocfile.get_url(), CFG_SITE_URL + '/%s/2/files/img_test.jpg' % CFG_SITE_RECORD)
#get type
self.assertEqual(my_new_bibdocfile.get_type(), 'Main')
#get path
# we should not test for particular path ! this is in the gestion of the underlying implementation,
# not the interface which should ne tested
# self.assert_(my_new_bibdocfile.get_path().startswith(CFG_BIBDOCFILE_FILEDIR))
# self.assert_(my_new_bibdocfile.get_path().endswith('/img_test.jpg;1'))
#get bibdocid
self.assertEqual(my_new_bibdocfile.get_bibdocid(), my_new_bibdoc.get_id())
#get name
self.assertEqual(my_new_bibdocfile.get_name() , 'img_test')
#get full name
self.assertEqual(my_new_bibdocfile.get_full_name() , 'img_test.jpg')
#get full path
#self.assert_(my_new_bibdocfile.get_full_path().startswith(CFG_BIBDOCFILE_FILEDIR))
#self.assert_(my_new_bibdocfile.get_full_path().endswith('/img_test.jpg;1'))
#get format
self.assertEqual(my_new_bibdocfile.get_format(), '.jpg')
#get version
self.assertEqual(my_new_bibdocfile.get_version(), 1)
#get description
self.assertEqual(my_new_bibdocfile.get_description(), my_new_bibdoc.get_description('.jpg', version=1))
#get comment
self.assertEqual(my_new_bibdocfile.get_comment(), my_new_bibdoc.get_comment('.jpg', version=1))
#get recid
self.assertEqual(my_new_bibdocfile.get_recid(), 2)
#get status
self.assertEqual(my_new_bibdocfile.get_status(), '')
#get size
self.assertEqual(my_new_bibdocfile.get_size(), 91750)
#get checksum
self.assertEqual(my_new_bibdocfile.get_checksum(), '28ec893f9da735ad65de544f71d4ad76')
#check
self.assertEqual(my_new_bibdocfile.check(), True)
#display
tmpl = invenio.template.load("bibdocfile")
value = tmpl.tmpl_display_bibdocfile(my_new_bibdocfile, ln='en')
assert 'files/img_test.jpg?version=1">' in value
#hidden?
self.assertEqual(my_new_bibdocfile.hidden_p(), False)
#check modification date
self.assertEqual(my_new_bibdocfile.md, timestamp)
#delete
my_new_bibdoc.delete()
self.assertEqual(my_new_bibdoc.deleted_p(), True)
class CheckBibDocAuthorizationTest(InvenioTestCase):
"""Regression tests for check_bibdoc_authorization function."""
def test_check_bibdoc_authorization(self):
"""bibdocfile - check_bibdoc_authorization function"""
from invenio.webuser import collect_user_info, get_uid_from_email
jekyll = collect_user_info(get_uid_from_email('jekyll@cds.cern.ch'))
self.assertEqual(check_bibdoc_authorization(jekyll, 'role:thesesviewer'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'role: thesesviewer'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'role: thesesviewer'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'Role: thesesviewer'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'email: jekyll@cds.cern.ch'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'email: jekyll@cds.cern.ch'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
juliet = collect_user_info(get_uid_from_email('juliet.capulet@cds.cern.ch'))
self.assertEqual(check_bibdoc_authorization(juliet, 'restricted_picture'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(juliet, 'status: restricted_picture'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertNotEqual(check_bibdoc_authorization(juliet, 'restricted_video')[0], 0)
self.assertNotEqual(check_bibdoc_authorization(juliet, 'status: restricted_video')[0], 0)
class BibDocFileURLTest(InvenioTestCase):
"""Regression tests for bibdocfile_url_p function."""
def test_bibdocfile_url_p(self):
"""bibdocfile - check bibdocfile_url_p() functionality"""
self.failUnless(bibdocfile_url_p(CFG_SITE_URL + '/%s/98/files/9709037.pdf' % CFG_SITE_RECORD))
self.failUnless(bibdocfile_url_p(CFG_SITE_URL + '/%s/098/files/9709037.pdf' % CFG_SITE_RECORD))
class MoreInfoTest(InvenioTestCase):
"""regression tests about BibDocFiles"""
def test_initialData(self):
"""Testing if passing the initial data really enriches the existing structure"""
more_info = MoreInfo(docid = 134)
more_info.set_data("ns1", "k1", "vsrjklfh23478956@#%@#@#%")
more_info2 = MoreInfo(docid = 134, initial_data = {"ns1" : { "k2" : "weucb2324@#%@#$%@"}})
self.assertEqual(more_info.get_data("ns1", "k2"), "weucb2324@#%@#$%@")
self.assertEqual(more_info.get_data("ns1", "k1"), "vsrjklfh23478956@#%@#@#%")
self.assertEqual(more_info2.get_data("ns1", "k2"), "weucb2324@#%@#$%@")
self.assertEqual(more_info2.get_data("ns1", "k1"), "vsrjklfh23478956@#%@#@#%")
more_info3 = MoreInfo(docid = 134)
self.assertEqual(more_info3.get_data("ns1", "k2"), "weucb2324@#%@#$%@")
self.assertEqual(more_info3.get_data("ns1", "k1"), "vsrjklfh23478956@#%@#@#%")
more_info.del_key("ns1", "k1")
more_info.del_key("ns1", "k2")
def test_createSeparateRead(self):
"""MoreInfo - testing if information saved using one instance is accessible via
a new one"""
more_info = MoreInfo(docid = 13)
more_info.set_data("some_namespace", "some_key", "vsrjklfh23478956@#%@#@#%")
more_info2 = MoreInfo(docid = 13)
self.assertEqual(more_info.get_data("some_namespace", "some_key"), "vsrjklfh23478956@#%@#@#%")
self.assertEqual(more_info2.get_data("some_namespace", "some_key"), "vsrjklfh23478956@#%@#@#%")
more_info2.del_key("some_namespace", "some_key")
def test_DictionaryBehaviour(self):
"""moreinfo - tests assignments of data, both using the general interface and using
namespaces"""
more_info = MoreInfo()
more_info.set_data("namespace1", "key1", "val1")
more_info.set_data("namespace1", "key2", "val2")
more_info.set_data("namespace2", "key1", "val3")
self.assertEqual(more_info.get_data("namespace1", "key1"), "val1")
self.assertEqual(more_info.get_data("namespace1", "key2"), "val2")
self.assertEqual(more_info.get_data("namespace2", "key1"), "val3")
def test_inMemoryMoreInfo(self):
"""test that MoreInfo is really stored only in memory (no database accesses)"""
m1 = MoreInfo(docid = 101, version = 12, cache_only = True)
m2 = MoreInfo(docid = 101, version = 12, cache_reads = False) # The most direct DB access
m1.set_data("n1", "k1", "v1")
self.assertEqual(m2.get_data("n1","k1"), None)
self.assertEqual(m1.get_data("n1","k1"), "v1")
def test_readCacheMoreInfo(self):
"""we verify that if value is not present in the cache, read will happen from the database"""
m1 = MoreInfo(docid = 102, version = 12)
m2 = MoreInfo(docid = 102, version = 12) # The most direct DB access
self.assertEqual(m2.get_data("n11","k11"), None)
self.assertEqual(m1.get_data("n11","k11"), None)
m1.set_data("n11", "k11", "some value")
self.assertEqual(m1.get_data("n11","k11"), "some value")
self.assertEqual(m2.get_data("n11","k11"), "some value") # read from a different instance
m1.delete()
m2.delete()
class BibDocFileMd5FolderTests(InvenioTestCase):
"""Regression test class for the Md5Folder class"""
def setUp(self):
self.path = os.path.join(CFG_TMPDIR, 'md5_tests')
if not os.path.exists(self.path):
os.makedirs(self.path)
def tearDown(self):
shutil.rmtree(self.path)
def test_empty_md5folder(self):
"""bibdocfile - empty Md5Folder"""
self.assertEqual(Md5Folder(self.path).md5s, {})
def test_one_file_md5folder(self):
"""bibdocfile - one file in Md5Folder"""
open(os.path.join(self.path, 'test.txt'), "w").write("test")
md5s = Md5Folder(self.path)
self.assertEqual(md5s.md5s, {'test.txt': '098f6bcd4621d373cade4e832627b4f6'})
def test_adding_one_more_file_md5folder(self):
"""bibdocfile - one more file in Md5Folder"""
open(os.path.join(self.path, 'test.txt'), "w").write("test")
md5s = Md5Folder(self.path)
self.assertEqual(md5s.md5s, {'test.txt': '098f6bcd4621d373cade4e832627b4f6'})
open(os.path.join(self.path, 'test2.txt'), "w").write("second test")
md5s.update()
self.assertEqual(md5s.md5s, {'test.txt': '098f6bcd4621d373cade4e832627b4f6', 'test2.txt': 'f5a6496b3ed4f2d6e5d602c7be8e6b42'})
def test_detect_corruption(self):
"""bibdocfile - detect corruption in Md5Folder"""
open(os.path.join(self.path, 'test.txt'), "w").write("test")
md5s = Md5Folder(self.path)
open(os.path.join(self.path, 'test.txt'), "w").write("second test")
self.failIf(md5s.check('test.txt'))
md5s.update(only_new=False)
self.failUnless(md5s.check('test.txt'))
self.assertEqual(md5s.get_checksum('test.txt'), 'f5a6496b3ed4f2d6e5d602c7be8e6b42')
if CFG_PATH_MD5SUM:
def test_md5_algorithms(self):
"""bibdocfile - compare md5 algorithms"""
filepath = os.path.join(self.path, 'test.txt')
open(filepath, "w").write("test")
self.assertEqual(calculate_md5(filepath, force_internal=True), calculate_md5_external(filepath))
TEST_SUITE = make_test_suite(BibDocFileMd5FolderTests,
BibRecDocsTest,
BibDocsTest,
BibDocFilesTest,
MoreInfoTest,
BibRelationTest,
BibDocFileURLTest,
CheckBibDocAuthorizationTest,
BibDocFsInfoTest,
BibDocFileGuessFormat)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 |
shakfu/start-vm | default/bin/normalize.py | 1 | 1259 | #!/usr/bin/env python
import hashlib
import os
import sys
from datetime import datetime
HASH = hashlib.md5(str(datetime.now())).hexdigest()
def normalize(path, file_func=None, dir_func=None):
''' recursive normalization of directory and file names
applies the following changes to directory and filenames:
- lowercasing
- converts spaces to '-'
'''
norm_func = lambda x: x.lower().replace(' ', '-')
if not file_func:
file_func = norm_func
if not dir_func:
dir_func = norm_func
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
f = os.path.join(root, name)
print(file_func(f))
for name in dirs:
d = os.path.join(root, name)
#print(dir_func(d))
def norm_func(path):
entry = os.path.basename(path)
parent = os.path.dirname(path)
entry_norm = entry.lower().replace(' ', '-')
p = os.path.join(parent, entry_norm)+HASH
os.rename(path, p)
new = p.strip(HASH)
os.rename(p, new)
return new
def norm_path(path=None):
if not path:
path = sys.argv[1]
normalize(path, norm_func)
#normalize(path, None, norm_func)
if __name__ == '__main__':
norm_path()
| mit |
luci/luci-py | appengine/components/components/auth/change_log_test.py | 2 | 45674 | #!/usr/bin/env vpython
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import datetime
import sys
import unittest
from test_support import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components import utils
from components.auth import change_log
from components.auth import model
from components.auth.proto import realms_pb2
from components.auth.proto import security_config_pb2
from test_support import test_case
class MakeInitialSnapshotTest(test_case.TestCase):
"""Tests for ensure_initial_snapshot function."""
def test_works(self):
# Initial entities. Don't call 'record_revision' to imitate "old"
# application without history related code.
@ndb.transactional
def make_auth_db():
model.AuthGlobalConfig(key=model.root_key()).put()
model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key()).put()
model.AuthGroup(key=model.group_key('A group')).put()
model.AuthIPWhitelist(key=model.ip_whitelist_key('A whitelist')).put()
model.replicate_auth_db()
make_auth_db()
# Bump auth_db once more to avoid hitting trivial case of "processing first
# revision ever".
auth_db_rev = ndb.transaction(model.replicate_auth_db)
self.assertEqual(2, auth_db_rev)
# Now do the work.
change_log.ensure_initial_snapshot(auth_db_rev)
# Generated new AuthDB rev with updated entities.
self.assertEqual(3, model.get_auth_db_revision())
# Check all *History entities exist now.
p = model.historical_revision_key(3)
self.assertIsNotNone(
ndb.Key('AuthGlobalConfigHistory', 'root', parent=p).get())
self.assertIsNotNone(
ndb.Key(
'AuthIPWhitelistAssignmentsHistory', 'default', parent=p).get())
self.assertIsNotNone(ndb.Key('AuthGroupHistory', 'A group', parent=p).get())
self.assertIsNotNone(
ndb.Key('AuthIPWhitelistHistory', 'A whitelist', parent=p).get())
# Call again, should be noop (marker is set).
change_log.ensure_initial_snapshot(3)
self.assertEqual(3, model.get_auth_db_revision())
ident = lambda x: model.Identity.from_bytes('user:' + x)
glob = lambda x: model.IdentityGlob.from_bytes('user:' + x)
def make_group(name, comment, **kwargs):
group = model.AuthGroup(key=model.group_key(name), **kwargs)
group.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment=comment)
group.put()
def make_ip_whitelist(name, comment, **kwargs):
wl = model.AuthIPWhitelist(key=model.ip_whitelist_key(name), **kwargs)
wl.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment=comment)
wl.put()
def security_config(regexps):
msg = security_config_pb2.SecurityConfig(internal_service_regexp=regexps)
return msg.SerializeToString()
class GenerateChangesTest(test_case.TestCase):
"""Tests for generate_changes function."""
def setUp(self):
super(GenerateChangesTest, self).setUp()
self.mock(change_log, 'enqueue_process_change_task', lambda _: None)
self.mock_now(datetime.datetime(2015, 1, 2, 3, 4, 5))
def auth_db_transaction(self, callback):
"""Imitates AuthDB change and subsequent 'process-change' task.
Returns parent entity of entity subgroup with all generated changes.
"""
@ndb.transactional
def run():
callback()
return model.replicate_auth_db()
auth_db_rev = run()
change_log.process_change(auth_db_rev)
return change_log.change_log_revision_key(auth_db_rev)
def grab_all(self, ancestor):
"""Returns dicts with all entities under given ancestor."""
entities = {}
def cb(key):
# Skip AuthDBLogRev itself, it's not interesting.
if key == ancestor:
return
as_str = []
k = key
while k and k != ancestor:
as_str.append('%s:%s' % (k.kind(), k.id()))
k = k.parent()
entities['/'.join(as_str)] = {
prop: val for prop, val in key.get().to_dict().items() if val
}
ndb.Query(ancestor=ancestor).map(cb, keys_only=True)
return entities
def test_works(self):
# Touch all kinds of entities at once. More thorough tests for per-entity
# changes are below.
def touch_all():
make_group(
name='A group',
members=[ident('a@example.com'), ident('b@example.com')],
description='Blah',
comment='New group')
make_ip_whitelist(
name='An IP whitelist',
subnets=['127.0.0.1/32'],
description='Bluh',
comment='New IP whitelist')
a = model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key(),
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('a@example.com'),
ip_whitelist='An IP whitelist')
])
a.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New assignment')
a.put()
c = model.AuthGlobalConfig(
key=model.root_key(),
oauth_client_id='client_id',
oauth_client_secret='client_secret',
oauth_additional_client_ids=['1', '2'])
c.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Config change')
c.put()
r = model.AuthRealmsGlobals(
key=model.realms_globals_key(),
permissions=[realms_pb2.Permission(name='luci.dev.p1')])
r.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New permission')
r.put()
p = model.AuthProjectRealms(
key=model.project_realms_key('proj1'),
realms=realms_pb2.Realms(api_version=1234),
config_rev='config_rev',
perms_rev='prems_rev')
p.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New project')
p.put()
changes = self.grab_all(self.auth_db_transaction(touch_all))
self.assertEqual({
'AuthDBChange:AuthGlobalConfig$root!7000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_OAUTH_CLIENT_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_client_id': u'client_id',
'oauth_client_secret': u'client_secret',
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7100': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'1', u'2'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_CREATED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'description': u'Blah',
'owners': u'administrators',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1200': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'members': [
model.Identity(kind='user', name='a@example.com'),
model.Identity(kind='user', name='b@example.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$An IP whitelist!3000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_CREATED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New IP whitelist',
'description': u'Bluh',
'target': u'AuthIPWhitelist$An IP whitelist',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$An IP whitelist!3200': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New IP whitelist',
'subnets': [u'127.0.0.1/32'],
'target': u'AuthIPWhitelist$An IP whitelist',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelistAssignments'
'$default$user:a@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'New assignment',
'identity': model.Identity(kind='user', name='a@example.com'),
'ip_whitelist': u'An IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:a@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com')
},
'AuthDBChange:AuthProjectRealms$proj1!10000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CREATED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'New project',
'config_rev_new': u'config_rev',
'perms_rev_new': u'prems_rev',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com')
},
'AuthDBChange:AuthRealmsGlobals$globals!9000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_REALMS_GLOBALS_CHANGED,
'class_': [u'AuthDBChange', u'AuthRealmsGlobalsChange'],
'comment': u'New permission',
'permissions_added': [u'luci.dev.p1'],
'target': u'AuthRealmsGlobals$globals',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com')
},
}, changes)
def test_groups_diff(self):
def create():
make_group(
name='A group',
members=[ident('a@example.com'), ident('b@example.com')],
globs=[glob('*@example.com'), glob('*@other.com')],
nested=['A', 'B'],
description='Blah',
comment='New group')
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthGroup$A group!1000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_CREATED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'description': u'Blah',
'owners': u'administrators',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1200': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'members': [
model.Identity(kind='user', name='a@example.com'),
model.Identity(kind='user', name='b@example.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1400': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'globs': [
model.IdentityGlob(kind='user', pattern='*@example.com'),
model.IdentityGlob(kind='user', pattern='*@other.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1600': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'New group',
'nested': [u'A', u'B'],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def modify():
g = model.group_key('A group').get()
g.members = [ident('a@example.com'), ident('c@example.com')]
g.globs = [glob('*@example.com'), glob('*@blah.com')]
g.nested = ['A', 'C']
g.description = 'Another blah'
g.owners = 'another-owners'
g.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Changed')
g.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthGroup$A group!1100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_DESCRIPTION_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'description': u'Another blah',
'old_description': u'Blah',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1150': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_OWNERS_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'old_owners': u'administrators',
'owners': u'another-owners',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'members': [model.Identity(kind='user', name='c@example.com')],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1300': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'members': [model.Identity(kind='user', name='b@example.com')],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1400': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'globs': [model.IdentityGlob(kind='user', pattern='*@blah.com')],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1500': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'globs': [model.IdentityGlob(kind='user', pattern='*@other.com')],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1600': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_ADDED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'nested': [u'C'],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1700': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Changed',
'nested': [u'B'],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def delete():
g = model.group_key('A group').get()
g.record_deletion(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Deleted')
g.key.delete()
changes = self.grab_all(self.auth_db_transaction(delete))
self.assertEqual({
'AuthDBChange:AuthGroup$A group!1300': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Deleted',
'members': [
model.Identity(kind='user', name='a@example.com'),
model.Identity(kind='user', name='c@example.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1500': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Deleted',
'globs': [
model.IdentityGlob(kind='user', pattern='*@example.com'),
model.IdentityGlob(kind='user', pattern='*@blah.com'),
],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1700': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Deleted',
'nested': [u'A', u'C'],
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGroup$A group!1800': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_DELETED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Deleted',
'old_description': u'Another blah',
'old_owners': u'another-owners',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_ip_whitelists_diff(self):
def create():
make_ip_whitelist(
name='A list',
subnets=['127.0.0.1/32', '127.0.0.2/32'],
description='Blah',
comment='New list')
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthIPWhitelist$A list!3000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_CREATED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New list',
'description': u'Blah',
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$A list!3200': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New list',
'subnets': [u'127.0.0.1/32', u'127.0.0.2/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def modify():
l = model.ip_whitelist_key('A list').get()
l.subnets = ['127.0.0.1/32', '127.0.0.3/32']
l.description = 'Another blah'
l.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Changed')
l.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthIPWhitelist$A list!3100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_DESCRIPTION_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Changed',
'description': u'Another blah',
'old_description': u'Blah',
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$A list!3200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Changed',
'subnets': [u'127.0.0.3/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$A list!3300': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Changed',
'subnets': [u'127.0.0.2/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def delete():
l = model.ip_whitelist_key('A list').get()
l.record_deletion(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Deleted')
l.key.delete()
changes = self.grab_all(self.auth_db_transaction(delete))
self.assertEqual({
'AuthDBChange:AuthIPWhitelist$A list!3300': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Deleted',
'subnets': [u'127.0.0.1/32', u'127.0.0.3/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelist$A list!3400': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_DELETED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Deleted',
'old_description': u'Another blah',
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_ip_wl_assignments_diff(self):
def create():
a = model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key(),
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('a@example.com'),
ip_whitelist='An IP whitelist'),
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('b@example.com'),
ip_whitelist='Another IP whitelist'),
])
a.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New assignment')
a.put()
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:a@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'New assignment',
'identity': model.Identity(kind='user', name='a@example.com'),
'ip_whitelist': u'An IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:a@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:b@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'New assignment',
'identity': model.Identity(kind='user', name='b@example.com'),
'ip_whitelist': u'Another IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:b@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def change():
a = model.ip_whitelist_assignments_key().get()
a.assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('a@example.com'),
ip_whitelist='Another IP whitelist'),
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('c@example.com'),
ip_whitelist='IP whitelist'),
]
a.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='change')
a.put()
changes = self.grab_all(self.auth_db_transaction(change))
self.assertEqual({
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:a@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'change',
'identity': model.Identity(kind='user', name='a@example.com'),
'ip_whitelist': u'Another IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:a@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:b@example.com!5100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_UNSET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'change',
'identity': model.Identity(kind='user', name='b@example.com'),
'ip_whitelist': u'Another IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:b@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:c@example.com!5000': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'change',
'identity': model.Identity(kind='user', name='c@example.com'),
'ip_whitelist': u'IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:c@example.com',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_global_config_diff(self):
def create():
c = model.AuthGlobalConfig(
key=model.root_key(),
oauth_client_id='client_id',
oauth_client_secret='client_secret',
oauth_additional_client_ids=['1', '2'])
c.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Config change')
c.put()
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthGlobalConfig$root!7000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_OAUTH_CLIENT_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_client_id': u'client_id',
'oauth_client_secret': u'client_secret',
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7100': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'1', u'2'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def modify():
c = model.root_key().get()
c.oauth_additional_client_ids = ['1', '3']
c.token_server_url = 'https://token-server'
c.security_config = security_config(['hi'])
c.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Config change')
c.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthGlobalConfig$root!7100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'3'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'2'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7300': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_CONF_TOKEN_SERVER_URL_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'target': u'AuthGlobalConfig$root',
'token_server_url_new': u'https://token-server',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthGlobalConfig$root!7400': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_CONF_SECURITY_CONFIG_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'security_config_new': security_config(['hi']),
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_realms_globals_diff(self):
def create():
c = model.AuthRealmsGlobals(
key=model.realms_globals_key(),
permissions=[
realms_pb2.Permission(name='luci.dev.p1'),
realms_pb2.Permission(name='luci.dev.p2'),
realms_pb2.Permission(name='luci.dev.p3'),
])
c.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='New realms config')
c.put()
self.auth_db_transaction(create)
def modify():
ent = model.realms_globals_key().get()
ent.permissions = [
realms_pb2.Permission(name='luci.dev.p1'),
realms_pb2.Permission(name='luci.dev.p3'),
realms_pb2.Permission(name='luci.dev.p4'),
]
ent.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Realms config change')
ent.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthRealmsGlobals$globals!9000': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_REALMS_GLOBALS_CHANGED,
'class_': [u'AuthDBChange', u'AuthRealmsGlobalsChange'],
'comment': u'Realms config change',
'permissions_added': [u'luci.dev.p4'],
'permissions_removed': [u'luci.dev.p2'],
'target': u'AuthRealmsGlobals$globals',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def test_project_realms_diff(self):
# Note: in reality Realms.api_version is fixed. We change it in this test
# since it is the simplest field to change.
def create():
p = model.AuthProjectRealms(
key=model.project_realms_key('proj1'),
realms=realms_pb2.Realms(api_version=123),
config_rev='config_rev1',
perms_rev='perms_rev1')
p.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Created')
p.put()
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CREATED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Created',
'config_rev_new': u'config_rev1',
'perms_rev_new': u'perms_rev1',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
def update(api_version, config_rev, perms_rev):
p = model.project_realms_key('proj1').get()
p.realms = realms_pb2.Realms(api_version=api_version)
p.config_rev = config_rev
p.perms_rev = perms_rev
p.record_revision(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Updated')
p.put()
# Update everything.
changes = self.grab_all(self.auth_db_transaction(
lambda: update(1234, 'config_rev2', 'perms_rev2')))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CHANGED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Updated',
'config_rev_new': u'config_rev2',
'config_rev_old': u'config_rev1',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
'AuthDBChange:AuthProjectRealms$proj1!10200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_PROJECT_REALMS_REEVALUATED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Updated',
'perms_rev_new': u'perms_rev2',
'perms_rev_old': u'perms_rev1',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
# Update realms_pb2.Realms, but do not change revisions.
changes = self.grab_all(self.auth_db_transaction(
lambda: update(12345, 'config_rev2', 'perms_rev2')))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10100': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CHANGED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Updated',
'config_rev_new': u'config_rev2',
'config_rev_old': u'config_rev2',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
# Update revisions, but don't actually touch realms.
changes = self.grab_all(self.auth_db_transaction(
lambda: update(12345, 'config_rev3', 'perms_rev3')))
self.assertEqual({}, changes)
def delete():
p = model.project_realms_key('proj1').get()
p.record_deletion(
modified_by=ident('me@example.com'),
modified_ts=utils.utcnow(),
comment='Deleted')
p.key.delete()
changes = self.grab_all(self.auth_db_transaction(delete))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10300': {
'app_version': u'v1a',
'auth_db_rev': 5,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_REMOVED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Deleted',
'config_rev_old': u'config_rev3',
'perms_rev_old': u'perms_rev3',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='me@example.com'),
},
}, changes)
class AuthDBChangeTest(test_case.TestCase):
# Test to_jsonish for AuthDBGroupChange and AuthDBIPWhitelistAssignmentChange,
# the rest are trivial.
def test_group_change_to_jsonish(self):
c = change_log.AuthDBGroupChange(
change_type=change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
target='AuthGroup$abc',
auth_db_rev=123,
who=ident('a@example.com'),
when=datetime.datetime(2015, 1, 2, 3, 4, 5),
comment='A comment',
app_version='v123',
description='abc',
members=[ident('a@a.com')],
globs=[glob('*@a.com')],
nested=['A'],
owners='abc',
old_owners='def')
self.assertEqual({
'app_version': 'v123',
'auth_db_rev': 123,
'change_type': 'GROUP_MEMBERS_ADDED',
'comment': 'A comment',
'description': 'abc',
'globs': ['user:*@a.com'],
'members': ['user:a@a.com'],
'nested': ['A'],
'old_description': None,
'old_owners': 'def',
'owners': 'abc',
'target': 'AuthGroup$abc',
'when': 1420167845000000,
'who': 'user:a@example.com',
}, c.to_jsonish())
def test_wl_assignment_to_jsonish(self):
c = change_log.AuthDBIPWhitelistAssignmentChange(
change_type=change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
target='AuthIPWhitelistAssignments$default',
auth_db_rev=123,
who=ident('a@example.com'),
when=datetime.datetime(2015, 1, 2, 3, 4, 5),
comment='A comment',
app_version='v123',
identity=ident('b@example.com'),
ip_whitelist='whitelist')
self.assertEqual({
'app_version': 'v123',
'auth_db_rev': 123,
'change_type': 'GROUP_MEMBERS_ADDED',
'comment': 'A comment',
'identity': 'user:b@example.com',
'ip_whitelist': 'whitelist',
'target': 'AuthIPWhitelistAssignments$default',
'when': 1420167845000000,
'who': 'user:a@example.com',
}, c.to_jsonish())
def test_security_config_change_to_jsonish(self):
c = change_log.AuthDBConfigChange(
change_type=change_log.AuthDBChange.CHANGE_CONF_SECURITY_CONFIG_CHANGED,
target='AuthGlobalConfig$default',
auth_db_rev=123,
who=ident('a@example.com'),
when=datetime.datetime(2015, 1, 2, 3, 4, 5),
comment='A comment',
app_version='v123',
security_config_old=None,
security_config_new=security_config(['hi']))
self.assertEqual({
'app_version': 'v123',
'auth_db_rev': 123,
'change_type': 'CONF_SECURITY_CONFIG_CHANGED',
'comment': 'A comment',
'oauth_additional_client_ids': [],
'oauth_client_id': None,
'oauth_client_secret': None,
'security_config_new': {'internal_service_regexp': [u'hi']},
'security_config_old': None,
'target': 'AuthGlobalConfig$default',
'token_server_url_new': None,
'token_server_url_old': None,
'when': 1420167845000000,
'who': 'user:a@example.com',
}, c.to_jsonish())
class ChangeLogQueryTest(test_case.TestCase):
# We know that some indexes are required. But component can't declare them,
# so don't check them.
SKIP_INDEX_YAML_CHECK = True
def test_is_changle_log_indexed(self):
self.assertTrue(change_log.is_changle_log_indexed())
def test_make_change_log_query(self):
def mk_ch(tp, rev, target):
ch = change_log.AuthDBChange(
change_type=getattr(change_log.AuthDBChange, 'CHANGE_%s' % tp),
auth_db_rev=rev,
target=target)
ch.key = change_log.make_change_key(ch)
ch.put()
def key(c):
return '%s/%s' % (c.key.parent().id(), c.key.id())
mk_ch('GROUP_CREATED', 1, 'AuthGroup$abc')
mk_ch('GROUP_MEMBERS_ADDED', 1, 'AuthGroup$abc')
mk_ch('GROUP_CREATED', 1, 'AuthGroup$another')
mk_ch('GROUP_DELETED', 2, 'AuthGroup$abc')
mk_ch('GROUP_MEMBERS_ADDED', 2, 'AuthGroup$another')
# All. Most recent first. Largest even types first.
q = change_log.make_change_log_query()
self.assertEqual([
'2/AuthGroup$another!1200',
'2/AuthGroup$abc!1800',
'1/AuthGroup$another!1000',
'1/AuthGroup$abc!1200',
'1/AuthGroup$abc!1000',
], map(key, q.fetch()))
# Single revision only.
q = change_log.make_change_log_query(auth_db_rev=1)
self.assertEqual([
'1/AuthGroup$another!1000',
'1/AuthGroup$abc!1200',
'1/AuthGroup$abc!1000',
], map(key, q.fetch()))
# Single target only.
q = change_log.make_change_log_query(target='AuthGroup$another')
self.assertEqual([
'2/AuthGroup$another!1200',
'1/AuthGroup$another!1000',
], map(key, q.fetch()))
# Single revision and single target.
q = change_log.make_change_log_query(
auth_db_rev=1, target='AuthGroup$another')
self.assertEqual(['1/AuthGroup$another!1000'], map(key, q.fetch()))
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
| apache-2.0 |
killbug2004/volatility | volatility/plugins/linux/slab_info.py | 14 | 6997 | # Volatility
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Joe Sylve
@license: GNU General Public License 2.0
@contact: joe.sylve@gmail.com
@organization: Digital Forensics Solutions
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
class kmem_cache(obj.CType):
def get_type(self):
raise NotImplementedError
def get_name(self):
return str(self.name.dereference_as("String", length = 255))
class kmem_cache_slab(kmem_cache):
def get_type(self):
return "slab"
# volatility does not support indexing pointers
# and the definition of nodelists changes from array to pointer
def _get_nodelist(self):
ent = self.nodelists
if type(ent) == obj.Pointer:
ret = obj.Object("kmem_list3", offset = ent.dereference(), vm = self.obj_vm)
elif type(ent) == obj.Array:
ret = ent[0]
else:
debug.error("Unknown nodelists types. %s" % type(ent))
return ret
def _get_free_list(self):
slablist = self._get_nodelist().slabs_free
for slab in slablist.list_of_type("slab", "list"):
yield slab
def _get_partial_list(self):
slablist = self._get_nodelist().slabs_partial
for slab in slablist.list_of_type("slab", "list"):
yield slab
def _get_full_list(self):
slablist = self._get_nodelist().slabs_full
for slab in slablist.list_of_type("slab", "list"):
yield slab
def _get_object(self, offset):
return obj.Object(self.struct_type,
offset = offset,
vm = self.obj_vm,
parent = self.obj_parent,
name = self.struct_type)
def __iter__(self):
if not self.unalloc:
for slab in self._get_full_list():
for i in range(self.num):
yield self._get_object(slab.s_mem.v() + i * self.buffer_size)
for slab in self._get_partial_list():
if not self.num or self.num == 0:
return
bufctl = obj.Object("Array",
offset = slab.v() + slab.size(),
vm = self.obj_vm,
parent = self.obj_parent,
targetType = "unsigned int",
count = self.num)
unallocated = [0] * self.num
i = slab.free
while i != 0xFFFFFFFF:
if i >= self.num:
break
unallocated[i] = 1
i = bufctl[i]
for i in range(0, self.num):
if unallocated[i] == self.unalloc:
yield self._get_object(slab.s_mem.v() + i * self.buffer_size)
if self.unalloc:
for slab in self._get_free_list():
for i in range(self.num):
yield self._get_object(slab.s_mem.v() + i * self.buffer_size)
class LinuxKmemCacheOverlay(obj.ProfileModification):
conditions = {'os': lambda x: x == 'linux'}
before = ['BasicObjectClasses'] # , 'LinuxVTypes']
def modification(self, profile):
if profile.get_symbol("cache_chain"):
profile.object_classes.update({'kmem_cache': kmem_cache_slab})
class linux_slabinfo(linux_common.AbstractLinuxCommand):
"""Mimics /proc/slabinfo on a running machine"""
def get_all_kmem_caches(self):
linux_common.set_plugin_members(self)
cache_chain = self.addr_space.profile.get_symbol("cache_chain")
slab_caches = self.addr_space.profile.get_symbol("slab_caches")
if cache_chain: #slab
caches = obj.Object("list_head", offset = cache_chain, vm = self.addr_space)
listm = "next"
ret = [cache for cache in caches.list_of_type("kmem_cache", listm)]
elif slab_caches: #slub
debug.info("SLUB is currently unsupported.")
ret = []
else:
debug.error("Unknown or unimplemented slab type.")
return ret
def get_kmem_cache(self, cache_name, unalloc, struct_name = ""):
if struct_name == "":
struct_name = cache_name
for cache in self.get_all_kmem_caches():
if cache.get_name() == cache_name:
cache.newattr("unalloc", unalloc)
cache.newattr("struct_type", struct_name)
return cache
debug.debug("Invalid kmem_cache: {0}".format(cache_name))
return []
def calculate(self):
linux_common.set_plugin_members(self)
for cache in self.get_all_kmem_caches():
if cache.get_type() == "slab":
active_objs = 0
active_slabs = 0
num_slabs = 0
# shared_avail = 0
for slab in cache._get_full_list():
active_objs += cache.num
active_slabs += 1
for slab in cache._get_partial_list():
active_objs += slab.inuse
active_slabs += 1
for slab in cache._get_free_list():
num_slabs += 1
num_slabs += active_slabs
num_objs = num_slabs * cache.num
yield [cache.get_name(),
active_objs,
num_objs,
cache.buffer_size,
cache.num,
1 << cache.gfporder,
active_slabs,
num_slabs]
def render_text(self, outfd, data):
self.table_header(outfd, [("<name>", "<30"),
("<active_objs>", "<13"),
("<num_objs>", "<10"),
("<objsize>", "<10"),
("<objperslab>", "<12"),
("<pagesperslab>", "<15"),
("<active_slabs>", "<14"),
("<num_slabs>", "<7"),
])
for info in data:
self.table_row(outfd, info[0], info[1], info[2], info[3], info[4], info[5], info[6], info[7])
| gpl-2.0 |
willingc/oh-mainline | vendor/packages/gdata/tests/atom_tests/token_store_test.py | 128 | 2896 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.token_store
import atom.http_interface
import atom.service
import atom.url
class TokenStoreTest(unittest.TestCase):
def setUp(self):
self.token = atom.service.BasicAuthToken('aaa1', scopes=[
'http://example.com/', 'http://example.org'])
self.tokens = atom.token_store.TokenStore()
self.tokens.add_token(self.token)
def testAddAndFindTokens(self):
self.assert_(self.tokens.find_token('http://example.com/') == self.token)
self.assert_(self.tokens.find_token('http://example.org/') == self.token)
self.assert_(self.tokens.find_token('http://example.org/foo?ok=1') == (
self.token))
self.assert_(isinstance(self.tokens.find_token('http://example.net/'),
atom.http_interface.GenericToken))
self.assert_(isinstance(self.tokens.find_token('example.com/'),
atom.http_interface.GenericToken))
def testFindTokenUsingMultipleUrls(self):
self.assert_(self.tokens.find_token(
'http://example.com/') == self.token)
self.assert_(self.tokens.find_token(
'http://example.org/bar') == self.token)
self.assert_(isinstance(self.tokens.find_token(''),
atom.http_interface.GenericToken))
self.assert_(isinstance(self.tokens.find_token(
'http://example.net/'),
atom.http_interface.GenericToken))
def testFindTokenWithPartialScopes(self):
token = atom.service.BasicAuthToken('aaa1',
scopes=[atom.url.Url(host='www.example.com', path='/foo'),
atom.url.Url(host='www.example.net')])
token_store = atom.token_store.TokenStore()
token_store.add_token(token)
self.assert_(token_store.find_token(
'http://www.example.com/foobar') == token)
self.assert_(token_store.find_token(
'https://www.example.com:443/foobar') == token)
self.assert_(token_store.find_token(
'http://www.example.net/xyz') == token)
self.assert_(token_store.find_token('http://www.example.org/') != token)
self.assert_(isinstance(token_store.find_token('http://example.org/'),
atom.http_interface.GenericToken))
def suite():
return unittest.TestSuite((unittest.makeSuite(TokenStoreTest,'test'),))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
JavML/django | django/contrib/gis/maps/google/gmap.py | 526 | 9223 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.gis.maps.google.overlays import (
GMarker, GPolygon, GPolyline,
)
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
class GoogleMapException(Exception):
pass
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL = 'http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException(
'Google Maps API Key not found (try adding '
'GOOGLE_MAPS_API_KEY to your settings).'
)
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None:
zoom = 4
self.zoom = zoom
if center is None:
center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom': self.calc_zoom,
'center': self.center,
'dom_id': self.dom_id,
'js_module': self.js_module,
'kml_urls': self.kml_urls,
'zoom': self.zoom,
'polygons': self.polygons,
'polylines': self.polylines,
'icons': self.icons,
'markers': self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return format_html('<body {} {}>', self.onload, self.onunload)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return format_html('onload="{}.{}_load()"', self.js_module, self.dom_id)
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return format_html('<script src="{}{}" type="text/javascript"></script>',
self.api_url, self.key)
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return format_html('{}\n <script type="text/javascript">\n//<![CDATA[\n{}//]]>\n </script>',
self.api_script, mark_safe(self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return format_html('<style type="text/css">{}</style>', self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return format_html('<html xmlns="http://www.w3.org/1999/xhtml" {}>', self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set(marker.icon for marker in self.markers if marker.icon)
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in range(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module': self.js_module,
'dom_ids': self.dom_ids,
'load_map_js': self.load_map_js(),
'icons': self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps:
icons |= map.icons
return icons
| bsd-3-clause |
alikins/ansible | lib/ansible/modules/network/onyx/onyx_config.py | 23 | 8198 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_config
extends_documentation_fragment: onyx
version_added: "2.5"
author: "Alex Tabachnik (@atabachnik), Samer Deeb (@samerd)"
short_description: Manage Mellanox ONYX configuration sections
description:
- Mellanox ONYX configurations uses a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ONYX configuration sections in
a deterministic way.
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct
default: line
choices: ['line', 'block']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
default: no
choices: ['yes', 'no']
config:
description:
- The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
default: no
choices: ['yes', 'no']
"""
EXAMPLES = """
---
- onyx_config:
lines:
- snmp-server community
- snmp-server host 10.2.2.2 traps version 2c
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/onyx_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.onyx.onyx import get_config
from ansible.module_utils.network.onyx.onyx import load_config
from ansible.module_utils.network.onyx.onyx import run_commands
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
path = module.params['parents']
candidate = get_candidate(module)
if match != 'none':
contents = module.params['config']
if not contents:
contents = get_config(module)
config = NetworkConfig(indent=1, contents=contents)
configobjs = candidate.difference(config, path=path, match=match,
replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
if module.params['save']:
if not module.check_mode:
run_commands(module, 'configuration write')
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
config=dict(),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
result = {'changed': False}
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
angelapper/edx-platform | lms/djangoapps/mobile_api/middleware.py | 7 | 6355 | """
Middleware for Mobile APIs
"""
from datetime import datetime
from django.conf import settings
from django.core.cache import cache
from django.http import HttpResponse
from pytz import UTC
import request_cache
from mobile_api.mobile_platform import MobilePlatform
from mobile_api.models import AppVersionConfig
from mobile_api.utils import parsed_version
from openedx.core.lib.mobile_utils import is_request_from_mobile_app
class AppVersionUpgrade(object):
"""
Middleware class to keep track of mobile application version being used.
"""
LATEST_VERSION_HEADER = 'EDX-APP-LATEST-VERSION'
LAST_SUPPORTED_DATE_HEADER = 'EDX-APP-VERSION-LAST-SUPPORTED-DATE'
NO_LAST_SUPPORTED_DATE = 'NO_LAST_SUPPORTED_DATE'
NO_LATEST_VERSION = 'NO_LATEST_VERSION'
USER_APP_VERSION = 'USER_APP_VERSION'
REQUEST_CACHE_NAME = 'app-version-info'
CACHE_TIMEOUT = settings.APP_UPGRADE_CACHE_TIMEOUT
def process_request(self, request):
"""
Processes request to validate app version that is making request.
Returns:
Http response with status code 426 (i.e. Update Required) if request is from
mobile native app and app version is no longer supported else returns None
"""
version_data = self._get_version_info(request)
if version_data:
last_supported_date = version_data[self.LAST_SUPPORTED_DATE_HEADER]
if last_supported_date != self.NO_LAST_SUPPORTED_DATE:
if datetime.now().replace(tzinfo=UTC) > last_supported_date:
return HttpResponse(status=426) # Http status 426; Update Required
def process_response(self, __, response):
"""
If request is from mobile native app, then add version related info to response headers.
Returns:
Http response: with additional headers;
1. EDX-APP-LATEST-VERSION; if user app version < latest available version
2. EDX-APP-VERSION-LAST-SUPPORTED-DATE; if user app version < min supported version and
timestamp < expiry of that version
"""
request_cache_dict = request_cache.get_cache(self.REQUEST_CACHE_NAME)
if request_cache_dict:
last_supported_date = request_cache_dict[self.LAST_SUPPORTED_DATE_HEADER]
if last_supported_date != self.NO_LAST_SUPPORTED_DATE:
response[self.LAST_SUPPORTED_DATE_HEADER] = last_supported_date.isoformat()
latest_version = request_cache_dict[self.LATEST_VERSION_HEADER]
user_app_version = request_cache_dict[self.USER_APP_VERSION]
if (latest_version != self.NO_LATEST_VERSION and
parsed_version(user_app_version) < parsed_version(latest_version)):
response[self.LATEST_VERSION_HEADER] = latest_version
return response
def _get_cache_key_name(self, field, key):
"""
Get key name to use to cache any property against field name and identification key.
Arguments:
field (str): The property name that needs to get cached.
key (str): Unique identification for cache key (e.g. platform_name).
Returns:
string: Cache key to be used.
"""
return "mobile_api.app_version_upgrade.{}.{}".format(field, key)
def _get_version_info(self, request):
"""
Gets and Sets version related info in mem cache and request cache; and returns a dict of it.
It sets request cache data for last_supported_date and latest_version with memcached values if exists against
user app properties else computes the values for specific platform and sets it in both memcache (for next
server interaction from same app version/platform) and request cache
Returns:
dict: Containing app version info
"""
user_agent = request.META.get('HTTP_USER_AGENT')
if user_agent:
platform = self._get_platform(request, user_agent)
if platform:
request_cache_dict = request_cache.get_cache(self.REQUEST_CACHE_NAME)
request_cache_dict[self.USER_APP_VERSION] = platform.version
last_supported_date_cache_key = self._get_cache_key_name(
self.LAST_SUPPORTED_DATE_HEADER,
platform.version
)
latest_version_cache_key = self._get_cache_key_name(self.LATEST_VERSION_HEADER, platform.NAME)
cached_data = cache.get_many([last_supported_date_cache_key, latest_version_cache_key])
last_supported_date = cached_data.get(last_supported_date_cache_key)
if not last_supported_date:
last_supported_date = self._get_last_supported_date(platform.NAME, platform.version)
cache.set(last_supported_date_cache_key, last_supported_date, self.CACHE_TIMEOUT)
request_cache_dict[self.LAST_SUPPORTED_DATE_HEADER] = last_supported_date
latest_version = cached_data.get(latest_version_cache_key)
if not latest_version:
latest_version = self._get_latest_version(platform.NAME)
cache.set(latest_version_cache_key, latest_version, self.CACHE_TIMEOUT)
request_cache_dict[self.LATEST_VERSION_HEADER] = latest_version
return request_cache_dict
def _get_platform(self, request, user_agent):
"""
Determines the platform type for mobile app making the request against user_agent.
Returns:
None if request app does not belong to one of the supported mobile platforms
else returns an instance of corresponding mobile platform.
"""
if is_request_from_mobile_app(request):
return MobilePlatform.get_instance(user_agent)
def _get_last_supported_date(self, platform_name, platform_version):
""" Get expiry date of app version for a platform. """
return AppVersionConfig.last_supported_date(platform_name, platform_version) or self.NO_LAST_SUPPORTED_DATE
def _get_latest_version(self, platform_name):
""" Get latest app version available for platform. """
return AppVersionConfig.latest_version(platform_name) or self.NO_LATEST_VERSION
| agpl-3.0 |
gcd0318/django | tests/template_tests/syntax_tests/test_list_index.py | 521 | 2694 | from django.test import SimpleTestCase
from ..utils import setup
class ListIndexTests(SimpleTestCase):
@setup({'list-index01': '{{ var.1 }}'})
def test_list_index01(self):
"""
List-index syntax allows a template to access a certain item of a
subscriptable object.
"""
output = self.engine.render_to_string('list-index01', {'var': ['first item', 'second item']})
self.assertEqual(output, 'second item')
@setup({'list-index02': '{{ var.5 }}'})
def test_list_index02(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index02', {'var': ['first item', 'second item']})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index03': '{{ var.1 }}'})
def test_list_index03(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index03', {'var': None})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index04': '{{ var.1 }}'})
def test_list_index04(self):
"""
Fail silently when variable is a dict without the specified key.
"""
output = self.engine.render_to_string('list-index04', {'var': {}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index05': '{{ var.1 }}'})
def test_list_index05(self):
"""
Dictionary lookup wins out when dict's key is a string.
"""
output = self.engine.render_to_string('list-index05', {'var': {'1': "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index06': '{{ var.1 }}'})
def test_list_index06(self):
"""
But list-index lookup wins out when dict's key is an int, which
behind the scenes is really a dictionary lookup (for a dict)
after converting the key to an int.
"""
output = self.engine.render_to_string('list-index06', {"var": {1: "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index07': '{{ var.1 }}'})
def test_list_index07(self):
"""
Dictionary lookup wins out when there is a string and int version
of the key.
"""
output = self.engine.render_to_string('list-index07', {"var": {'1': "hello", 1: "world"}})
self.assertEqual(output, 'hello')
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.