repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
PersianWikipedia/fawikibot | HujiBot/proxycheck.py | 1 | 6640 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This bot checks if any IP in a list may be an open proxy
The list must be a plain text file where each row is a single IP address
"""
#
# (C) User:Mensis Mirabilis, 2019
# (C) User:Huji, 2020
#
# Distributed under the terms of the CC-BY-SA license.
#
from __future__ import absolute_import
#
import config
import json
import requests
import re
import os
from datetime import datetime, timedelta
from ipwhois import IPWhois
from iptools import IpRange
from cidr_trie import PatriciaTrie
import argparse
class ProxyCheckBot():
def __init__(self, path=None, nowiki=None):
self.path = path
self.IPQSkey = config.findproxy['IPQSkey']
self.PCkey = config.findproxy['PCkey']
self.GIIemail = config.findproxy['GIIemail']
self.IPv4cache = PatriciaTrie()
self.IPv6cache = PatriciaTrie()
def load_ip_list(self, path):
# If path starts with ~ make it an absolute path
path = os.path.expanduser(path)
if not os.path.isfile(path):
print('Provided path does not exist')
exit()
fh = open(path)
lines = fh.read().splitlines()
fh.close()
return lines
def get_cache(self, ip):
pat = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if re.match(pat, ip) is None:
"""
Temporary fix for https://github.com/Figglewatts/cidr-trie/issues/2
"""
if self.IPv6cache.size == 0:
return []
return self.IPv6cache.find_all(ip)
else:
return self.IPv4cache.find_all(ip)
def set_cache(self, ip, cidr, country):
pat = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if re.match(pat, ip) is None:
self.IPv6cache.insert(cidr, country)
else:
self.IPv4cache.insert(cidr, country)
def get_ip_info(self, ip):
"""
Retrieves pertinent fields from IP WHOIS information
"""
cached_info = self.get_cache(ip)
if len(cached_info) == 0:
try:
request = IPWhois(ip)
result = request.lookup_rdap(depth=1)
cidr = result['asn_cidr']
country = result['asn_country_code']
self.set_cache(ip, cidr, country)
except Exception:
cidr = ''
country = ''
else:
cidr = cached_info[0][0]
country = cached_info[0][1]
return {
'cidr': cidr,
'country_code': country
}
def query_IPQualityScore(self, ip):
"""
Queries the IPQualityScore API to check if an IP is a proxy
"""
url = 'https://www.ipqualityscore.com/api/json/ip/%s/%s'
request = requests.get(url % (self.IPQSkey, ip))
result = request.json()
if 'proxy' in result.keys():
return 1 if result['proxy'] is True else 0
else:
return False
def query_proxycheck(self, ip):
"""
Queries the proxycheck.io API to check if an IP is a proxy
"""
url = 'http://proxycheck.io/v2/%s?key=%s&vpn=1'
request = requests.get(url % (ip, self.PCkey))
result = request.json()
if ip in result.keys() and 'proxy' in result[ip]:
return 1 if result[ip]['proxy'] == 'yes' else 0
else:
return False
def query_GetIPIntel(self, ip):
"""
Queries the GetIPIntel API to check if an IP is a proxy
"""
url = 'http://check.getipintel.net/check.php' + \
'?ip=%s&contact=%s&format=json&flags=m'
request = requests.get(url % (ip, self.GIIemail))
result = request.json()
if 'result' in result.keys():
return 1 if result['result'] == '1' else 0
else:
return False
def query_teoh_io(self, ip):
"""
Queries the teoh.io API to check if an IP is a proxy
"""
url = 'https://ip.teoh.io/api/vpn/%s'
request = requests.get(url % ip)
"""
Sadly, teoh.io sometimes generates PHP notices before the JSON output.
Therefore, we will have to find the actual JSON output and parse it.
"""
result = request.text
if result[0] != '{':
result = result[result.find('{'):]
result = json.loads(result)
if 'vpn_or_proxy' in result.keys():
return 1 if result['vpn_or_proxy'] == 'yes' else 0
else:
return False
def run_queries(self, ip):
return [
self.query_IPQualityScore(ip),
self.query_proxycheck(ip),
self.query_GetIPIntel(ip)
]
def format_result(self, res):
if res == 1:
return '{{yes}}'
elif res == 0:
return '{{no}}'
def progress(self, str):
"""
Displays a progress message to the user.
The message is updated as IPs are checked or skipped.
The message will be removed in the end, once final output is printed.
"""
blank = ' '
print(blank, end='\r')
print(str, end='\r')
def run(self):
out = '{| class="wikitable sortable"\n'
out += '! IP !! CIDR !! Country !! ' +\
'IPQualityScore !! proxycheck !! GetIPIntel'
if self.path is None:
print('Error: no IP list provided!')
exit()
else:
iplist = self.load_ip_list(self.path)
rowtemplate = '\n|-\n| %s || %s || %s || %s || %s || %s'
for ip in iplist:
ipinfo = self.get_ip_info(ip)
if ipinfo['country_code'] == 'IR':
"""
IPs from Iran are almost never proxies, skip the checks
"""
self.progress('Skipping %s' % ip)
pass
else:
self.progress('Checking %s' % ip)
IPQS, PC, GII = self.run_queries(ip)
row = rowtemplate % (
ip,
ipinfo['cidr'],
ipinfo['country_code'],
self.format_result(IPQS),
self.format_result(PC),
self.format_result(GII)
)
out += row
out += '\n|}'
print(out)
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='path to text file containing IP list')
args = parser.parse_args()
robot = ProxyCheckBot(args.path)
robot.run()
| mit |
arju88nair/projectCulminate | venv/lib/python3.5/site-packages/pip/commands/hash.py | 514 | 1597 | from __future__ import absolute_import
import hashlib
import logging
import sys
from pip.basecommand import Command
from pip.status_codes import ERROR
from pip.utils import read_chunks
from pip.utils.hashes import FAVORITE_HASH, STRONG_HASHES
logger = logging.getLogger(__name__)
class HashCommand(Command):
"""
Compute a hash of a local package archive.
These can be used with --hash in a requirements file to do repeatable
installs.
"""
name = 'hash'
usage = '%prog [options] <file> ...'
summary = 'Compute hashes of package archives.'
def __init__(self, *args, **kw):
super(HashCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-a', '--algorithm',
dest='algorithm',
choices=STRONG_HASHES,
action='store',
default=FAVORITE_HASH,
help='The hash algorithm to use: one of %s' %
', '.join(STRONG_HASHES))
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
self.parser.print_usage(sys.stderr)
return ERROR
algorithm = options.algorithm
for path in args:
logger.info('%s:\n--hash=%s:%s',
path, algorithm, _hash_of_file(path, algorithm))
def _hash_of_file(path, algorithm):
"""Return the hash digest of a file."""
with open(path, 'rb') as archive:
hash = hashlib.new(algorithm)
for chunk in read_chunks(archive):
hash.update(chunk)
return hash.hexdigest()
| apache-2.0 |
avsaj/rtpmidi | rtpmidi/test/test_packets.py | 1 | 12588 | import sys
import time
from twisted.trial import unittest
from rtpmidi.protocols.rtp.packets import RTCPPacket
from rtpmidi.protocols.rtp.packets import RTCPCompound
from rtpmidi.protocols.rtp.packets import ext_32_out_of_64
from rtpmidi.protocols.rtp.packets import unformat_from_32
from rtpmidi.protocols.rtp.packets import RTPPacket
##################RTCP#####################
class TestRTCPPacket(unittest.TestCase):
"""Testing RTCPPacket class"""
def setUp(self):
self.member = {'user_name': "name", 'cname': "user@host", 'tool': "none",
'addr':0, 'rtp_port':0, 'rtcp_port':0,
'last_rtp_received':0, 'last_rtcp_received':0,
'total_received_bytes':0, 'total_received_packets':0,
'last_seq':0, 'lost':0, 'last_ts':0, 'last_time':0,
'jitter':0, 'lsr':0, 'dlsr': 0,
'rt_time':0}
def tearDown(self):
pass
def test_ext_32_out_of_64(self):
ref = 10
res = ext_32_out_of_64(ref)
ref = 10 << 16
assert(res==ref), self.fail("Wrong encoding")
def test_unformat_from_32(self):
ref = 10
res = ext_32_out_of_64(ref)
res_2 = unformat_from_32(res)
assert(res_2==ref), self.fail("Wrong encoding or decoding")
ref = 10.245
res = ext_32_out_of_64(ref)
res_2 = unformat_from_32(res)
assert(res_2==ref), self.fail("Wrong encoding or decoding")
def test_encode_SR(self):
"""Testing encode functione for SR"""
ttt = time.time()
cont \
= (ssrc, ntp, rtp_ts, total_packets, total_bytes, ssrc_1,
frac_lost, lost, highest, jitter, lsr, dlsr) \
= (424242, ttt, 143, 100, 800, 424243, 1, 1, 65535, 15,
int(time.time()-10), 10)
members = {}
new_member = self.member.copy()
new_member['last_ts'] = rtp_ts
new_member['last_seq'] = highest
new_member['jitter'] = jitter
new_member['lost'] = lost
members_table = {}
members_table[ssrc] = new_member
arg_list = (ssrc, rtp_ts,
total_packets,
total_bytes, members_table)
rtcp = RTCPPacket("SR", ptcode=200, contents=arg_list)
rtcp_pac = rtcp.encode()
#Testing result
assert(type(rtcp_pac)==str), self.fail("Wrong type returned for SR" \
+ " RTCP packet")
assert(len(rtcp_pac) == 28), self.fail("Wrong size returned for SR" \
+ " RTCP packet")
def test_decode_SR(self):
"""Decode SR packet with only one feed"""
ttt = time.time()
(ssrc, ntp, rtp_ts, total_packets, total_bytes, ssrc_1,
frac_lost, lost, highest, jitter, lsr, dlsr) \
= (424242, ttt, 143, 100, 800, 424243, 1, 1, 65535, 15,
int(time.time()-10), 10)
frac_lost = int(lost / float(total_packets + lost))
members = {}
new_member = self.member.copy()
new_member['last_ts'] = rtp_ts
new_member['last_seq'] = highest
new_member['jitter'] = jitter
new_member['lost'] = lost
new_member['lsr'] = lsr
new_member['dlsr'] = dlsr
members_table = {}
members_table[ssrc_1] = new_member
arg_list = (ssrc, rtp_ts,
total_packets,
total_bytes, members_table)
rtcp = RTCPPacket("SR", ptcode=200, contents=arg_list)
rtcp_pac = rtcp.encode()
#Unpacking
#Decoding packet
packet = RTCPCompound(rtcp_pac)
#Getting content of the first block
cont = packet._rtcp[0].getContents()
lsr = ext_32_out_of_64(lsr)
lsr = unformat_from_32(lsr)
dlsr = ext_32_out_of_64(dlsr)
dlsr = unformat_from_32(dlsr)
#Testing content decode
assert(cont[0]== ssrc), self.fail("SSRC is not correctly encode or " \
+ "decode")
assert(int(cont[1]['ntpTS'])==int(ntp)), \
self.fail("NTP Timestamp is not correctly encode or decode")
assert(cont[1]['packets']==total_packets), \
self.fail("Cumulative number of packets is not correctly encode or decode")
assert(cont[1]['octets']==total_bytes), \
self.fail("Cumulative octets sum is not correctly encode or decode")
assert(cont[1]['rtpTS']==rtp_ts), \
self.fail("RTP timestamp is not correctly encode or decode")
assert(cont[2][0]['ssrc']==ssrc_1), \
self.fail("SSRC_1 is not correctly encode or decode")
assert(cont[2][0]['jitter']==jitter), \
self.fail("Jitter is not correctly encode or decode")
assert(cont[2][0]['fraclost']==frac_lost), \
self.fail("Frac lost is not correctly encode or decode")
assert(cont[2][0]['lsr']==lsr), \
self.fail("Last received is not correctly encode or decode")
assert(cont[2][0]['highest']==highest), \
self.fail("Highest seq num is not correctly encode or decode")
def test_encode_RR(self):
ttt = time.time()
(ssrc, ntp, rtp_ts, total_packets, total_bytes, ssrc_1, \
frac_lost, lost, highest, jitter, lsr, dlsr) \
= (424242, ttt, 143, 100, 800, 424243, 1, 1, 65535, 15, \
int(time.time()-10), 10)
frac_lost = int(lost / float(total_packets + lost))
members = {}
new_member = self.member.copy()
new_member['last_ts'] = rtp_ts
new_member['last_seq'] = highest
new_member['jitter'] = jitter
new_member['lost'] = lost
new_member['lsr'] = lsr
new_member['dlsr'] = dlsr
members_table = {}
members_table[ssrc_1] = new_member
arg_list = (ssrc, members_table)
rtcp = RTCPPacket("RR", ptcode=201, contents=arg_list)
rtcp_pac = rtcp.encode()
#Testing result
assert(type(rtcp_pac)==str), self.fail("Wrong type returned for RR" \
+ " RTCP packet")
assert(len(rtcp_pac) == 32), self.fail("Wrong size returned for RR" \
+ " RTCP packet")
def test_decode_RR(self):
ttt = time.time()
(ssrc, ntp, rtp_ts, total_packets, total_bytes, ssrc_1, \
frac_lost, lost, highest, jitter, lsr, dlsr) \
= (424242, ttt, 143, 100, 800, 424243, 1, 1, 65535, 15, \
int(time.time()-10), 10)
frac_lost = int(lost / float(total_packets + lost))
members = {}
new_member = self.member.copy()
new_member['last_ts'] = rtp_ts
new_member['last_seq'] = highest
new_member['jitter'] = jitter
new_member['lost'] = lost
new_member['lsr'] = lsr
new_member['dlsr'] = dlsr
members_table = {}
members_table[ssrc_1] = new_member
arg_list = (ssrc, members_table)
rtcp = RTCPPacket("RR", ptcode=201, contents=arg_list)
rtcp_pac = rtcp.encode()
#Unpacking
#Decoding packet
packet = RTCPCompound(rtcp_pac)
#Getting content of the first block
cont = packet._rtcp[0].getContents()
lsr = ext_32_out_of_64(lsr)
lsr = unformat_from_32(lsr)
dlsr = ext_32_out_of_64(dlsr)
dlsr = unformat_from_32(dlsr)
#Testing content decode
assert(cont[0]== ssrc), self.fail("SSRC is not correctly encode or " \
+ "decode")
assert(cont[1][0]['ssrc']==ssrc_1), \
self.fail("SSRC_1 is not correctly encode or decode")
assert(cont[1][0]['jitter']==jitter), \
self.fail("Jitter is not correctly encode or decode")
assert(cont[1][0]['fraclost']==frac_lost), \
self.fail("Frac lost is not correctly encode or decode")
assert(cont[1][0]['lsr']==lsr), \
self.fail("Last received is not correctly encode or decode")
assert(cont[1][0]['highest']==highest), \
self.fail("Highest seq num is not correctly encode or decode")
def test_encode_BYE(self):
"""Test encode BYE packet with a single SSRC"""
cont = (ssrc, reason) = ([4242], "because")
rtcp = RTCPPacket("BYE", ptcode=203, contents=cont)
rtcp_pac = rtcp.encode()
assert(type(rtcp_pac)==str), \
self.fail("Wrong type returned by encode RTCP BYE packet")
#MUST be padd ??
assert(len(rtcp_pac) == 16), \
self.fail("Wrong size returned by encode RTCP BYE packet")
def test_decode_BYE(self):
"""Test decode BYE packet with a single SSRC"""
cont = (ssrc, reason) = ([4242], "because")
rtcp = RTCPPacket("BYE", ptcode=203, contents=cont)
rtcp_pac = rtcp.encode()
#Unpacking
#Decoding packet
packet = RTCPCompound(rtcp_pac)
#Getting content of the first block
cont = packet._rtcp[0].getContents()
assert(cont[0][0]==ssrc[0]), \
self.fail("SSRC is not correctly encode or decode")
assert(cont[1]==reason), \
self.fail("Reason is not correctly encode or decode")
def test_encode_SDES(self):
item = []
cont = []
item.append(("CNAME", "me@myself.mine"))
item.append(("NAME", "memyselfandi"))
item.append(("TOOL", "sropulpof_test"))
cont.append((424242, item))
rtcp = RTCPPacket("SDES", ptcode=202, contents=cont)
rtcp_pac = rtcp.encode()
assert(type(rtcp_pac)==str), \
self.fail("Wrong type returned by encode RTCP SDES packet")
#MUST be padd
length_wait = 8 + 2 + len("me@myself.mine") + 2 + len("memyselfandi") + 2 + len("sropulpof_test")
pad = 4 - (length_wait%4)
length_wait += pad
assert(len(rtcp_pac) == length_wait), \
self.fail("Wrong size returned by encode RTCP SDES packet")
def test_decode_SDES(self):
item = []
cont = []
item.append(("CNAME", "me@myself.mine"))
item.append(("NAME", "memyselfandi"))
item.append(("TOOL", "sropulpof_test"))
cont.append((424242, item))
rtcp = RTCPPacket("SDES", ptcode=202, contents=cont)
rtcp_pac = rtcp.encode()
packets = RTCPCompound(rtcp_pac)
for packet in packets:
packet_type = packet.getPT()
cont = packet.getContents()
assert(packet_type=="SDES"), self.fail("Wrong type select by RTCPCompound")
assert(cont[0][0]==424242), self.fail("Wrong ssrc")
for elt in cont[0][1]:
if elt[0] == "CNAME":
assert(elt[1]=="me@myself.mine"), self.fail("wrong encoded for Cname")
elif elt[0] == "NAME":
assert(elt[1]=="memyselfandi"), self.fail("wrong encoded for Name")
elif elt[0] == "TOOL":
assert(elt[1]=="sropulpof_test"), self.fail("wrong encoded for Tool")
class TestRTCPCompound(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_encode(self):
pass
def test_decode(self):
pass
##################RTP#####################
class TestRTPPacket(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
class TestHeader(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
#params: ssrc, pt, ct, seq, ts, marker=0, xhdrtype=None, xhdrdata=''
pass
def test_netbytes(self):
pass
def test_init(self):
#params: ssrc, seq, ts, data, pt=None, ct=None, marker=0,
# authtag='', xhdrtype=None, xhdrdata=''
#
test = "some data"
ssrc, seq, ts, data, marker = (424242, 1, 143, test, 1)
packet = RTPPacket(ssrc, seq, ts, data, marker=marker)
#assert(len(packet)==
def test_netbytes(self):
pass
def test_parse_rtppacket(self):
pass
class TestNTE(unittest.TestCase):
def test_init(self):
pass
def test_getKey(self):
pass
def test_getPayload(self):
pass
def test_isDone(self):
pass
| gpl-3.0 |
google/contentbox | third_party/django/contrib/auth/tests/test_decorators.py | 112 | 1698 | from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.tests.test_views import AuthViewsTestCase
from django.contrib.auth.tests.utils import skipIfCustomUser
@skipIfCustomUser
class LoginRequiredTestCase(AuthViewsTestCase):
"""
Tests the login_required decorators
"""
urls = 'django.contrib.auth.tests.urls'
def testCallable(self):
"""
Check that login_required is assignable to callable objects.
"""
class CallableView(object):
def __call__(self, *args, **kwargs):
pass
login_required(CallableView())
def testView(self):
"""
Check that login_required is assignable to normal views.
"""
def normal_view(request):
pass
login_required(normal_view)
def testLoginRequired(self, view_url='/login_required/', login_url=None):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator.
"""
if login_url is None:
login_url = settings.LOGIN_URL
response = self.client.get(view_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(login_url in response.url)
self.login()
response = self.client.get(view_url)
self.assertEqual(response.status_code, 200)
def testLoginRequiredNextUrl(self):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator with a login_url set.
"""
self.testLoginRequired(view_url='/login_required_login_url/',
login_url='/somewhere/')
| apache-2.0 |
petewarden/tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/remove_init_variable_v1.py | 14 | 2981 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/remove_init_variable_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# Verify that the tf.versions attribute exists. It is difficult to enforce
# contents, since the version numbers change over time. The conversion logic
# itself is verified in the common graphdef converter, so here just assert
# it is being invoked.
# CHECK: module
# CHECK-SAME: tf.versions
# CHECK-SAME: bad_consumers
# CHECK-SAME: min_consumer
# CHECK-SAME: producer
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "[[VAR:[a-zA-Z_0-9]+]]", type = tensor<1x3xf32>, value = {{.*}} : tensor<1x3xf32>} : () -> ()
# CHECK-NOT: session_initializer
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: [[ARG0:%.*]]: tensor<3x1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: [[ARG1:%.*]]: tensor<!tf.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @[[VAR]]})
# CHECK-SAME: -> (tensor<3x3xf32> {tf_saved_model.index_path = ["r"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key"]
# CHECK-NEXT: [[R0:%.*]] = "tf.ReadVariableOp"([[ARG1]]) {{{.*}}} : (tensor<!tf.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32>
# CHECK-NEXT: [[R1:%.*]] = "tf.MatMul"([[ARG0]], [[R0]]) {{{.*}}} : (tensor<3x1xf32>, tensor<1x3xf32>) -> tensor<3x3xf32>
# CHECK-NEXT: return [[R1]] : tensor<3x3xf32>
def Test():
x = tf.constant([[1.0], [1.0], [1.0]])
y = tf.compat.v1.get_variable(
name='y',
shape=(1, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
r = tf.matmul(x, y)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_r = tf.compat.v1.saved_model.utils.build_tensor_info(r)
return {
'key': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name='some_function'))
}, tf.initializers.global_variables(), None
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test, canonicalize=True)
| apache-2.0 |
tyndare/osmose-backend | plugins/Structural_WayOneNode.py | 1 | 2257 | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Jocelyn Jaubert 2013 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from plugins.Plugin import Plugin
class Structural_WayOneNode(Plugin):
def init(self, logger):
Plugin.init(self, logger)
self.errors[12401] = { "item": 1240, "level": 2, "tag": ["geom", "fix:chair"], "desc": T_(u"Way with one node") }
def way(self, data, tags, nds):
if len(nds) == 1:
return {"class": 12401}
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
a = Structural_WayOneNode(None)
a.init(None)
for n in [[1],
[2],
]:
self.check_err(a.way(None, {}, n), n)
for n in [[1, 2],
[1, 4, 1, 2],
[1] * 10 + [2],
]:
assert not a.way(None, {}, n), n
| gpl-3.0 |
themurph/openshift-tools | openshift/installer/vendored/openshift-ansible-git-2016-04-27/roles/os_firewall/library/os_firewall_manage_iptables.py | 68 | 10474 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=fixme, missing-docstring
from subprocess import call, check_output
DOCUMENTATION = '''
---
module: os_firewall_manage_iptables
short_description: This module manages iptables rules for a given chain
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
class IpTablesError(Exception):
def __init__(self, msg, cmd, exit_code, output):
super(IpTablesError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
self.output = output
class IpTablesAddRuleError(IpTablesError):
pass
class IpTablesRemoveRuleError(IpTablesError):
pass
class IpTablesSaveError(IpTablesError):
pass
class IpTablesCreateChainError(IpTablesError):
def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
# TODO: impliment rollbacks for any events that where successful and an
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def __init__(self, module):
self.module = module
self.ip_version = module.params['ip_version']
self.check_mode = module.check_mode
self.chain = module.params['chain']
self.create_jump_rule = module.params['create_jump_rule']
self.jump_rule_chain = module.params['jump_rule_chain']
self.cmd = self.gen_cmd()
self.save_cmd = self.gen_save_cmd()
self.output = []
self.changed = False
def save(self):
try:
self.output.append(check_output(self.save_cmd,
stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as ex:
raise IpTablesSaveError(
msg="Failed to save iptables rules",
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def verify_chain(self):
if not self.chain_exists():
self.create_chain()
if self.create_jump_rule and not self.jump_rule_exists():
self.create_jump()
def add_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if not self.rule_exists(rule):
self.verify_chain()
if self.check_mode:
self.changed = True
self.output.append("Create rule for %s %s" % (proto, port))
else:
cmd = self.cmd + ['-A'] + rule
try:
self.output.append(check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create rule for "
"%s %s" % (proto, port),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
def remove_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if self.rule_exists(rule):
if self.check_mode:
self.changed = True
self.output.append("Remove rule for %s %s" % (proto, port))
else:
cmd = self.cmd + ['-D'] + rule
try:
self.output.append(check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesRemoveRuleError(
chain=self.chain,
msg="Failed to remove rule for %s %s" % (proto, port),
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def rule_exists(self, rule):
check_cmd = self.cmd + ['-C'] + rule
return True if call(check_cmd) == 0 else False
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
'-m', proto, '--dport', str(port), '-j', 'ACCEPT']
def create_jump(self):
if self.check_mode:
self.changed = True
self.output.append("Create jump rule for chain %s" % self.chain)
else:
try:
cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
input_rules = [s.split() for s in output.split('\n')]
# Find the last numbered rule
last_rule_num = None
last_rule_target = None
for rule in input_rules[:-1]:
if rule:
try:
last_rule_num = int(rule[0])
except ValueError:
continue
last_rule_target = rule[1]
# Naively assume that if the last row is a REJECT rule, then
# we can add insert our rule right before it, otherwise we
# assume that we can just append the rule.
if (last_rule_num and last_rule_target
and last_rule_target == 'REJECT'):
# insert rule
cmd = self.cmd + ['-I', self.jump_rule_chain,
str(last_rule_num)]
else:
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
self.changed = True
self.output.append(output)
self.save()
except subprocess.CalledProcessError as ex:
if '--line-numbers' in ex.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
msg=("Failed to query existing " +
self.jump_rule_chain +
" rules to determine jump rule location"),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
else:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
msg=("Failed to create jump rule for chain " +
self.chain),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
def create_chain(self):
if self.check_mode:
self.changed = True
self.output.append("Create chain %s" % self.chain)
else:
try:
cmd = self.cmd + ['-N', self.chain]
self.output.append(check_output(cmd,
stderr=subprocess.STDOUT))
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create chain: %s" % self.chain,
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
)
def jump_rule_exists(self):
cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
return True if call(cmd) == 0 else False
def chain_exists(self):
cmd = self.cmd + ['-L', self.chain]
return True if call(cmd) == 0 else False
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
return ["/usr/sbin/%s" % cmd]
def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
action=dict(required=True, choices=['add', 'remove',
'verify_chain']),
chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
create_jump_rule=dict(required=False, type='bool', default=True),
jump_rule_chain=dict(required=False, default='INPUT'),
protocol=dict(required=False, choices=['tcp', 'udp']),
port=dict(required=False, type='int'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
supports_check_mode=True
)
action = module.params['action']
protocol = module.params['protocol']
port = module.params['port']
if action in ['add', 'remove']:
if not protocol:
error = "protocol is required when action is %s" % action
module.fail_json(msg=error)
if not port:
error = "port is required when action is %s" % action
module.fail_json(msg=error)
iptables_manager = IpTablesManager(module)
try:
if action == 'add':
iptables_manager.add_rule(port, protocol)
elif action == 'remove':
iptables_manager.remove_rule(port, protocol)
elif action == 'verify_chain':
iptables_manager.verify_chain()
except IpTablesError as ex:
module.fail_json(msg=ex.msg)
return module.exit_json(changed=iptables_manager.changed,
output=iptables_manager.output)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| apache-2.0 |
AMechler/AliPhysics | PWGPP/EMCAL/ECSingleChMacros/paint_emcal.py | 39 | 18839 | ## @package paint_emcal
# paint_emcal creates the canvas and pad where the histogram output of calib_emcal can be displayed in as individual channels of the emcal and dcal in their correct real eta and phi position.
#!/usr/bin/env python
if __name__ != '__main__':
import math, array, ROOT
## Sets up ROOT
def set_root_style():
ROOT.gROOT.SetStyle("Plain")
ROOT.gStyle.SetCanvasColor(ROOT.kWhite)
ROOT.gStyle.SetCanvasBorderMode(0)
ROOT.gStyle.SetPadBorderMode(0)
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetNumberContours(253)
ROOT.gStyle.SetPalette(55, ROOT.nullptr)
## Creates a canvas in order plot the emcal and dcal channels
def multipanel_pad(canvas, pad_layout_unscaled, margin = None):
pad = []
pad_layout = pad_layout_unscaled
for i in range(2):
s = sum(pad_layout_unscaled[i])
for j in range(len(pad_layout_unscaled[i])):
pad_layout[i][j] /= s
pad_layout_1_reversed = list(reversed(pad_layout[1]))
if margin == None:
left_margin = canvas.GetLeftMargin()
right_margin = canvas.GetRightMargin()
top_margin = canvas.GetTopMargin()
bottom_margin = canvas.GetBottomMargin()
else:
left_margin, right_margin, top_margin, bottom_margin = magin
lr = left_margin + right_margin
tb = top_margin + bottom_margin
for i in range(len(pad_layout[0])):
x0 = sum(pad_layout[0][:i]) * (1 - lr)
x1 = lr + sum(pad_layout[0][:i+1]) * (1 - lr)
for j in reversed(range(len(pad_layout[1]))):
y0 = sum(pad_layout_1_reversed[:j]) * (1 - tb)
y1 = tb + sum(pad_layout_1_reversed[:j+1]) * (1 - tb)
name = 'pad%d' % len(pad)
canvas.cd()
pad.append(ROOT.TPad(name, name, x0, y0, x1, y1))
pad[-1].SetLeftMargin(
left_margin / (lr + pad_layout[0][i] * (1 - lr)))
pad[-1].SetRightMargin(
right_margin / (lr + pad_layout[0][i] * (1 - lr)))
pad[-1].SetTopMargin(
top_margin / (tb + pad_layout_1_reversed[j] * (1 - tb)))
pad[-1].SetBottomMargin(
bottom_margin / (tb + pad_layout_1_reversed[j] * (1 - tb)))
pad[-1].SetFillStyle(0)
pad[-1].SetFillColor(0)
pad[-1].Draw()
pad[-1].cd()
return pad
## Converts from channel ID to super module number, and phi and eta coordinates in the supermodule
def to_sm_ieta_iphi(n):
if n < 11520:
sm = n / 1152
n0 = sm * 1152
nphi = 24
elif n < 12288:
sm = 10 + (n - 11520) / 384
n0 = 11520 + (sm - 10) * 384
nphi = 8
elif n < 16896:
sm = 12 + (n - 12288) / 768
n0 = 12288 + (sm - 12) * 768
nphi = 24
else:
sm = 18 + (n - 16896) / 384
n0 = 16896 + (sm - 18) * 384
nphi = 8
n1 = n - n0
ieta = 2 * (n1 / (2 * nphi)) + 1 - (n1 % 2);
iphi = (n1 / 2) % nphi;
return sm, ieta, iphi
## Converts local eta and phi, to real world eta and phi
def to_eta_phi(sm, ieta, iphi, ieta_int = None):
if ieta_int == None:
ieta_int = round(ieta)
coeff_eta = [
[ 0.6538761263736301, 0.0012502455282809458,
-0.00005435850122961024, -0.013757648176291852,
-0.000025569020644999157, 1.1116965497826313e-6 ],
[ -0.007266662087911644, -0.000048501557965879184,
2.108763389820745e-6, -0.013757648176291801,
-0.000025569020645007092, 1.1116965497829259e-6 ],
[ 0.6538761263736301, 0.0012502455282809458,
-0.00005435850122961024, -0.013757648176291852,
-0.000025569020644999157, 1.1116965497826313e-6 ],
[ -0.007266662087911644, -0.000048501557965879184,
2.108763389820745e-6, -0.013757648176291801,
-0.000025569020645007092, 1.1116965497829259e-6 ],
[ 0.6538761263736301, 0.0012502455282809458,
-0.00005435850122961024, -0.013757648176291852,
-0.000025569020644999157, 1.1116965497826313e-6 ],
[ -0.007266662087911644, -0.000048501557965879184,
2.108763389820745e-6, -0.013757648176291801,
-0.000025569020645007092, 1.1116965497829259e-6 ],
[ 0.6538761263736301, 0.0012502455282809458,
-0.00005435850122961024, -0.013757648176291852,
-0.000025569020644999157, 1.1116965497826313e-6 ],
[ -0.007266662087911644, -0.000048501557965879184,
2.108763389820745e-6, -0.013757648176291801,
-0.000025569020645007092, 1.1116965497829259e-6 ],
[ 0.6538761263736301, 0.0012502455282809458,
-0.00005435850122961024, -0.013757648176291852,
-0.000025569020644999157, 1.1116965497826313e-6 ],
[ -0.007266662087911644, -0.000048501557965879184,
2.108763389820745e-6, -0.013757648176291801,
-0.000025569020645007092, 1.1116965497829259e-6 ],
[ 0.6538955994897965, 0.0012327876984126996,
-0.00005219165046974383, -0.013757946609494863,
-0.000025332446808511946, 1.086380600873213e-6 ],
[ -0.0072721088435373045, -0.00004216269841272758,
1.131762228686001e-6, -0.01375794660949486,
-0.00002533244680851135, 1.0863806008729037e-6 ],
[ 0.6537747727272758, 0.0012291942148758277,
-0.00005344322673373451, -0.01375054364989851,
-0.00002391323211245261, 1.0397057440196336e-6 ],
[ -0.22750791958042052, -0.0004878840193896497,
0.000021212348669116138, -0.01375054364989848,
-0.00002391323211246439, 1.0397057440201827e-6 ],
[ 0.6537747727272758, 0.0012291942148758277,
-0.00005344322673373451, -0.01375054364989851,
-0.00002391323211245261, 1.0397057440196336e-6 ],
[ -0.22750791958042052, -0.0004878840193896497,
0.000021212348669116138, -0.01375054364989848,
-0.00002391323211246439, 1.0397057440201827e-6 ],
[ 0.6537747727272758, 0.0012291942148758277,
-0.00005344322673373451, -0.01375054364989851,
-0.00002391323211245261, 1.0397057440196336e-6 ],
[ -0.22750791958042052, -0.0004878840193896497,
0.000021212348669116138, -0.01375054364989848,
-0.00002391323211246439, 1.0397057440201827e-6 ],
[ 0.6538955994897965, 0.0012327876984126996,
-0.00005219165046974383, -0.013757946609494863,
-0.000025332446808511946, 1.086380600873213e-6 ],
[ -0.0072721088435373045, -0.00004216269841272758,
1.131762228686001e-6, -0.01375794660949486,
-0.00002533244680851135, 1.0863806008729037e-6 ] ]
coeff_phi = [
[ 1.4153378750000025, 0.0006568888888901503,
0.013518083333333313, -0.000057045289855166094 ],
[ 1.4159947638888941, -0.0006568888888867641,
0.013461038043478247, 0.00005704528985495655 ],
[ 1.7644118472222259, 0.000646180555557041,
0.01351747826086953, -0.00005638586956534543 ],
[ 1.7650580277777765, -0.0006461805555546355,
0.013461092391304308, 0.00005638586956517725 ],
[ 2.1134724027777834, 0.0006524861111138665,
0.013517768115941999, -0.00005654166666683834 ],
[ 2.1141248888888984, -0.0006524861111081304,
0.013461226449275318, 0.000056541666666427527 ],
[ 2.462534333333343, 0.0006581805555594584,
0.013518074275362234, -0.00005697644927563384 ],
[ 2.463192513888897, -0.0006581805555524085,
0.013461097826086869, 0.000056976449275180296 ],
[ 2.8116099861111175, 0.0006471666666698442,
0.013517443840579645, -0.00005650181159440986 ],
[ 2.8122571527777906, -0.0006471666666621282,
0.013460942028985465, 0.00005650181159390807 ],
[ -3.12222569444445, 0.0006597222222129356,
0.013424007936509517, -0.00005902777777630214 ],
[ -3.121565972222227, -0.0006597222222309393,
0.013364980158731608, 0.000059027777779336 ],
[ -1.7262874166666704, 0.0008678541666655771,
0.013520961956521787, -0.00007551086956511075 ],
[ -1.7254195625000033, -0.000867854166667594,
0.013445451086956564, 0.00007551086956536232 ],
[ -1.3772168333333357, 0.000859541666666056,
0.013520576086956563, -0.00007492391304337749 ],
[ -1.3763572916666709, -0.000859541666667937,
0.013445652173913077, 0.00007492391304362568 ],
[ -1.0281504166666693, 0.0008688541666657723,
0.013520394021739162, -0.00007519021739121045 ],
[ -1.0272815625000018, -0.000868854166667404,
0.013445203804347865, 0.00007519021739139448 ],
[ -0.6787590277777789, 0.0006451388888869277,
0.013420585317460642, -0.00005367063492026231 ],
[ -0.67811388888889, -0.0006451388888908697,
0.01336691468254001, 0.000053670634921005446 ] ]
if abs(ieta - round(ieta)) >= 0.25:
ieta_mod_2 = ieta_int % 2
else:
ieta_mod_2 = ieta % 2
eta = coeff_eta[sm][0] + coeff_eta[sm][1] * iphi + \
coeff_eta[sm][2] * iphi * iphi + \
coeff_eta[sm][3] * ieta + \
coeff_eta[sm][4] * ieta * iphi + \
coeff_eta[sm][5] * ieta * iphi * iphi
phi = coeff_phi[sm][0] + coeff_phi[sm][1] * ieta_mod_2 + \
coeff_phi[sm][2] * iphi + \
coeff_phi[sm][3] * iphi * ieta_mod_2
if phi < -2:
phi += 2 * math.pi
return eta, phi
## Creates the emcal and dcal on the pad with real eta and phi range
def alice_emcal_canvas_pad(application_name, canvas_width = 1680):
left_margin = 0.03125 * 1.5
right_margin = 0.03125 * 2.1875
top_margin = 0.03125 * 1.5
bottom_margin = 0.03125 * 2.75
azimuth_range = [(-1.85, -0.5), (1.3, 3.35)]
pseudorapidity_range = (-0.8, 0.8)
pad_layout = (map(lambda r: abs(r[1] - r[0]), azimuth_range),
[1.0])
canvas_height = canvas_width * \
(1 - left_margin - right_margin) / \
sum(pad_layout[0]) * \
abs(pseudorapidity_range[1] -
pseudorapidity_range[0]) / \
(1 - top_margin - bottom_margin)
canvas = ROOT.TCanvas(
'canvas%d' % 0, application_name,
canvas_width + 4, int(round(canvas_height)) + 28)
canvas.SetLeftMargin(left_margin)
canvas.SetRightMargin(right_margin)
canvas.SetTopMargin(top_margin)
canvas.SetBottomMargin(bottom_margin)
return canvas, multipanel_pad(canvas, pad_layout)
## Fills the emcal and dcal canvas with either channel residue, energy/hits, chi-square, and more based on input from calib_emcal
def update(canvas, pad, root_histogram, root_histogram_index = 0,
outline = False, text_size_sm = 0.03125 * 0.875,
offset_sm = 3.5, text_size_ieta_iphi = 0.03125 * 0.625,
offset_ieta_iphi = (0.5, 1),
azimuth_range = ((-1.85, -0.5), (1.3, 3.35)),
pseudorapidity_range = (-0.8, 0.8)):
list_index = None
try:
index_list = (i for i, v in enumerate(root_histogram)
if isinstance(v, list)).next()
except StopIteration:
None
if list_index != None:
del root_histogram[list_index:]
root_histogram.append([])
scale_y = []
for i in range(2):
root_histogram[-1].append(ROOT.TH2D(
'root_histogram%d_%d' %
(len(root_histogram), len(root_histogram[-1])), '',
1, azimuth_range[i][0], azimuth_range[i][1],
1, pseudorapidity_range[0], pseudorapidity_range[1]))
scale_y.append(
(1 - pad[i].GetTopMargin() - pad[i].GetBottomMargin()) *
pad[i].GetHNDC() * canvas.GetWh() /
((1 - pad[i].GetLeftMargin() - pad[i].GetRightMargin()) *
pad[i].GetWNDC() * canvas.GetWw()))
if i == 0:
root_histogram[-1][-1].GetXaxis().SetLabelFont(42)
root_histogram[-1][-1].GetYaxis().SetLabelFont(42)
root_histogram[-1][-1].GetYaxis().SetTitle('\\eta')
root_histogram[-1][-1].GetYaxis().SetTitleOffset(
1.125 * scale_y[i])
elif i == 1:
root_histogram[-1][-1].Fill(0, 0, -1e-12)
root_histogram[-1][-1].GetXaxis().SetLabelFont(42)
root_histogram[-1][-1].GetXaxis().SetTitle('\\varphi')
root_histogram[-1][-1].GetYaxis().SetLabelSize(0)
root_histogram[-1][-1].GetZaxis().SetLabelFont(42)
root_histogram[-1][-1].GetYaxis().SetTickLength(
root_histogram[-1][-1].GetYaxis().GetTickLength() *
scale_y[-1])
root_histogram[-1][-1].GetZaxis().SetTickLength(
root_histogram[-1][-1].GetZaxis().GetTickLength() *
scale_y[-1])
u0 = root_histogram[root_histogram_index].GetMinimum()
u1 = root_histogram[root_histogram_index].GetMaximum()
u1 = u0 + (1 + 1e-3) * (u1 - u0)
#u0, u1 = 200, 1000
root_histogram[-1][-1].SetMinimum(u0)
root_histogram[-1][-1].SetMaximum(u1)
root_histogram[-1][-1].GetZaxis().SetRangeUser(u0, u1)
root_histogram.append([])
ncell = 17664
ncell_emcal = 12288
pad[1].cd()
root_histogram[-2][1].Draw('colz')
pad[1].Update()
palette = root_histogram[-2][1].GetListOfFunctions().FindObject(
'palette')
if palette != None:
palette.SetX2NDC(
palette.GetX1NDC() + scale_y[1] *
(palette.GetX2NDC() - palette.GetX1NDC()))
pad_index = 1
for i in range(ncell):
if i == ncell_emcal:
root_histogram[-2][1].Draw('zsame')
pad[0].cd()
root_histogram[-2][0].Draw('colz')
pad[0].Update()
pad_index = 0
content = root_histogram[root_histogram_index].\
GetBinContent(i + 1)
sm, ieta, iphi = to_sm_ieta_iphi(i)
p = [
to_eta_phi(sm, ieta - 0.5, iphi - 0.5, ieta),
to_eta_phi(sm, ieta - 0.5, iphi + 0.5, ieta),
to_eta_phi(sm, ieta + 0.5, iphi + 0.5, ieta),
to_eta_phi(sm, ieta + 0.5, iphi - 0.5, ieta),
to_eta_phi(sm, ieta - 0.5, iphi - 0.5, ieta)
]
x = array.array('d', [p[k][1] for k in range(len(p))])
y = array.array('d', [p[k][0] for k in range(len(p))])
root_histogram[-1].append(ROOT.TPolyLine(len(p), x, y))
if content != 0:
u = content
if pad[pad_index].GetLogz() != 0 and u > 0:
u = math.log10(u)
u = max(root_histogram[0].GetMinimum(), min(
root_histogram[0].GetMaximum(), u))
color = palette.GetValueColor(u)
root_histogram[-1][-1].SetFillColor(color)
root_histogram[-1][-1].SetLineColor(ROOT.kGray)
root_histogram[-1][-1].SetLineWidth(1)
if content != 0:
root_histogram[-1][-1].Draw('f')
if outline:
root_histogram[-1][-1].Draw()
neta = sm >= 12 and sm < 18 and 32 or 48
nphi = sm in (10, 11, 18, 19) and 8 or 24
if ieta == 0 and iphi == 0:
# -0.5 .. neta - 0.5
y, x = to_eta_phi(
sm, -(offset_sm + 0.5) + (neta + 2 * offset_sm) *
(sm % 2),
0.5 * (nphi - 1))
root_histogram[-1].append(ROOT.TText(x, y, 'SM%d' % sm))
root_histogram[-1][-1].SetTextAlign(
sm % 2 == 0 and 21 or 23)
root_histogram[-1][-1].SetTextFont(42)
root_histogram[-1][-1].SetTextSize(text_size_sm)
root_histogram[-1][-1].Draw()
if sm % 2 == 1:
if nphi == 8:
phi_list = 0, nphi - 1
else:
phi_list = 0, 10, 20, nphi - 1
for phi in phi_list:
y, x = to_eta_phi(
sm, neta + offset_ieta_iphi[0], phi)
root_histogram[-1].append(
ROOT.TText(x, y, '%d' % phi))
root_histogram[-1][-1].SetTextAlign(23)
root_histogram[-1][-1].SetTextFont(42)
root_histogram[-1][-1].SetTextSize(
text_size_ieta_iphi)
root_histogram[-1][-1].Draw()
if sm in (0, 1, 12, 13):
if neta == 32:
eta_list = 0, 10, 20, neta - 1
else:
eta_list = 0, 10, 20, 30, 40, neta - 1
for eta in eta_list:
eta_shifted = eta
if sm == 1 and eta == 0:
eta_shifted += 0.5
elif sm == 0 and eta == neta - 1:
eta_shifted -= 0.5
y, x = to_eta_phi(
sm, eta_shifted, -offset_ieta_iphi[1])
root_histogram[-1].append(
ROOT.TText(x, y, '%d' % eta))
root_histogram[-1][-1].SetTextAlign(32)
root_histogram[-1][-1].SetTextFont(42)
root_histogram[-1][-1].SetTextSize(
text_size_ieta_iphi)
root_histogram[-1][-1].Draw()
canvas.Update()
#Main block
if __name__ == '__main__':
import os, sys
sys.path.append(os.path.join(os.environ['ROOTSYS'], 'lib'))
import math, array, re, ROOT
application_name = ' '.join(sys.argv)
set_root_style()
canvas, pad = alice_emcal_canvas_pad(application_name)
ncell = 17664
root_histogram = []
log_z = False
filename_list = []
plot_range = None
for f in sys.argv[1:]:
if f in ('-l', '--log-z'):
log_z = True
elif f.find(':') != -1:
plot_range = map(float, f.split(':'))
else:
filename_list.append(f)
for filename in filename_list:
root_histogram.append(ROOT.TH1D(
'root_histogram%d' % len(root_histogram), '',
ncell, -0.5, ncell + 0.5))
f = open(filename, 'r')
line = f.readline()
while line != "":
line_split = re.sub("+", " ", re.sub(" *$", "", re.sub("^ *", "", line[:-1]))).split(" ")
cell_id = int(line_split[0]) - 1
count = float(line_split[1])
root_histogram[-1].Fill(cell_id, count)
line = f.readline()
if filename != filename_list[0]:
root_histogram[0].Divide(root_histogram[0], root_histogram[-1])
if plot_range != None:
root_histogram[0].SetMinimum(plot_range[0])
root_histogram[0].SetMaximum(plot_range[1])
canvas.cd()
for i in range(len(pad)):
pad[i].Draw()
if log_z:
pad[i].SetLogz()
update(canvas, pad, root_histogram)
ROOT.gApplication.Run()
| bsd-3-clause |
taotie12010/bigfour | common/lib/xmodule/xmodule/modulestore/tests/test_publish.py | 74 | 58785 | """
Test the publish code (mostly testing that publishing doesn't result in orphans)
"""
import ddt
import itertools
import os
import re
import unittest
import uuid
import xml.etree.ElementTree as ET
from contextlib import contextmanager
from nose.plugins.attrib import attr
from shutil import rmtree
from tempfile import mkdtemp
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.tests.test_split_w_old_mongo import SplitWMongoCourseBootstrapper
from xmodule.modulestore.tests.factories import check_mongo_calls, mongo_uses_error_check, CourseFactory, ItemFactory
from xmodule.modulestore.tests.test_cross_modulestore_import_export import (
MongoContentstoreBuilder, MODULESTORE_SETUPS,
DRAFT_MODULESTORE_SETUP, SPLIT_MODULESTORE_SETUP, MongoModulestoreBuilder,
)
@attr('mongo')
class TestPublish(SplitWMongoCourseBootstrapper):
"""
Test the publish code (primary causing orphans)
"""
def _create_course(self):
"""
Create the course, publish all verticals
* some detached items
"""
# There are 12 created items and 7 parent updates
# create course: finds: 1 to verify uniqueness, 1 to find parents
# sends: 1 to create course, 1 to create overview
with check_mongo_calls(4, 2):
super(TestPublish, self)._create_course(split=False) # 2 inserts (course and overview)
# with bulk will delay all inheritance computations which won't be added into the mongo_calls
with self.draft_mongo.bulk_operations(self.old_course_key):
# finds: 1 for parent to add child and 2 to get ancestors
# sends: 1 for insert, 1 for parent (add child)
with check_mongo_calls(3, 2):
self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid', split=False)
with check_mongo_calls(4, 2):
self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid', split=False)
# For each vertical (2) created:
# - load draft
# - load non-draft
# - get last error
# - load parent
# - get ancestors
# - load inheritable data
with check_mongo_calls(15, 6):
self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', split=False)
self._create_item('vertical', 'Vert2', {}, {'display_name': 'Vertical 2'}, 'chapter', 'Chapter1', split=False)
# For each (4) item created
# - try to find draft
# - try to find non-draft
# - compute what is parent
# - load draft parent again & compute its parent chain up to course
# count for updates increased to 16 b/c of edit_info updating
with check_mongo_calls(36, 16):
self._create_item('html', 'Html1', "<p>Goodbye</p>", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', split=False)
self._create_item(
'discussion', 'Discussion1',
"discussion discussion_category=\"Lecture 1\" discussion_id=\"a08bfd89b2aa40fa81f2c650a9332846\" discussion_target=\"Lecture 1\"/>\n",
{
"discussion_category": "Lecture 1",
"discussion_target": "Lecture 1",
"display_name": "Lecture 1 Discussion",
"discussion_id": "a08bfd89b2aa40fa81f2c650a9332846"
},
'vertical', 'Vert1',
split=False
)
self._create_item('html', 'Html2', "<p>Hello</p>", {'display_name': 'Hollow Html'}, 'vertical', 'Vert1', split=False)
self._create_item(
'discussion', 'Discussion2',
"discussion discussion_category=\"Lecture 2\" discussion_id=\"b08bfd89b2aa40fa81f2c650a9332846\" discussion_target=\"Lecture 2\"/>\n",
{
"discussion_category": "Lecture 2",
"discussion_target": "Lecture 2",
"display_name": "Lecture 2 Discussion",
"discussion_id": "b08bfd89b2aa40fa81f2c650a9332846"
},
'vertical', 'Vert2',
split=False
)
with check_mongo_calls(2, 2):
# 2 finds b/c looking for non-existent parents
self._create_item('static_tab', 'staticuno', "<p>tab</p>", {'display_name': 'Tab uno'}, None, None, split=False)
self._create_item('course_info', 'updates', "<ol><li><h2>Sep 22</h2><p>test</p></li></ol>", {}, None, None, split=False)
def test_publish_draft_delete(self):
"""
To reproduce a bug (STUD-811) publish a vertical, convert to draft, delete a child, move a child, publish.
See if deleted and moved children still is connected or exists in db (bug was disconnected but existed)
"""
vert_location = self.old_course_key.make_usage_key('vertical', block_id='Vert1')
item = self.draft_mongo.get_item(vert_location, 2)
# Finds:
# 1 get draft vert,
# 2 compute parent
# 3-14 for each child: (3 children x 4 queries each)
# get draft, compute parent, and then published child
# compute inheritance
# 15 get published vert
# 16-18 get ancestor chain
# 19 compute inheritance
# 20-22 get draft and published vert, compute parent
# Sends:
# delete the subtree of drafts (1 call),
# update the published version of each node in subtree (4 calls),
# update the ancestors up to course (2 calls)
if mongo_uses_error_check(self.draft_mongo):
max_find = 23
else:
max_find = 22
with check_mongo_calls(max_find, 7):
self.draft_mongo.publish(item.location, self.user_id)
# verify status
item = self.draft_mongo.get_item(vert_location, 0)
self.assertFalse(getattr(item, 'is_draft', False), "Item was published. Draft should not exist")
# however, children are still draft, but I'm not sure that's by design
# delete the draft version of the discussion
location = self.old_course_key.make_usage_key('discussion', block_id='Discussion1')
self.draft_mongo.delete_item(location, self.user_id)
draft_vert = self.draft_mongo.get_item(vert_location, 0)
self.assertTrue(getattr(draft_vert, 'is_draft', False), "Deletion didn't convert parent to draft")
self.assertNotIn(location, draft_vert.children)
# move the other child
other_child_loc = self.old_course_key.make_usage_key('html', block_id='Html2')
draft_vert.children.remove(other_child_loc)
other_vert = self.draft_mongo.get_item(self.old_course_key.make_usage_key('vertical', block_id='Vert2'), 0)
other_vert.children.append(other_child_loc)
self.draft_mongo.update_item(draft_vert, self.user_id)
self.draft_mongo.update_item(other_vert, self.user_id)
# publish
self.draft_mongo.publish(vert_location, self.user_id)
item = self.draft_mongo.get_item(draft_vert.location, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertNotIn(location, item.children)
self.assertIsNone(self.draft_mongo.get_parent_location(location))
with self.assertRaises(ItemNotFoundError):
self.draft_mongo.get_item(location)
self.assertNotIn(other_child_loc, item.children)
self.assertTrue(self.draft_mongo.has_item(other_child_loc), "Oops, lost moved item")
class DraftPublishedOpTestCourseSetup(unittest.TestCase):
"""
This class exists to test XML import and export between different modulestore
classes.
"""
def _create_course(self, store):
"""
Create the course that'll be published below. The course has a binary structure, meaning:
The course has two chapters (chapter_0 & chapter_1),
each of which has two sequentials (sequential_0/1 & sequential_2/3),
each of which has two verticals (vertical_0/1 - vertical_6/7),
each of which has two units (unit_0/1 - unit_14/15).
"""
def _make_block_id(block_type, num):
"""
Given a block_type/num, return a block id.
"""
return '{}{:02d}'.format(block_type, num)
def _make_course_db_entry(parent_type, parent_id, block_id, idx, child_block_type, child_block_id_base):
"""
Make a single entry for the course DB.
"""
return {
'parent_type': parent_type,
'parent_id': parent_id,
'index_in_children_list': idx % 2,
'filename': block_id,
'child_ids': (
(child_block_type, _make_block_id(child_block_id_base, idx * 2)),
(child_block_type, _make_block_id(child_block_id_base, idx * 2 + 1)),
)
}
def _add_course_db_entry(parent_type, parent_id, block_id, block_type, idx, child_type, child_base):
"""
Add a single entry for the course DB referenced by the tests below.
"""
self.course_db.update(
{
(block_type, block_id): _make_course_db_entry(
parent_type, parent_id, block_id, idx, child_type, child_base
)
}
)
def _create_binary_structure_items(parent_type, block_type, num_items, child_block_type):
"""
Add a level of the binary course structure by creating the items as children of the proper parents.
"""
parent_id = 'course'
for idx in xrange(0, num_items):
if parent_type != 'course':
parent_id = _make_block_id(parent_type, idx / 2)
parent_item = getattr(self, parent_id)
block_id = _make_block_id(block_type, idx)
setattr(self, block_id, ItemFactory.create(
parent_location=parent_item.location,
category=block_type,
modulestore=store,
publish_item=False,
location=self.course.id.make_usage_key(block_type, block_id)
))
_add_course_db_entry(
parent_type, parent_id, block_id, block_type, idx, child_block_type, child_block_type
)
# Create all the course items on the draft branch.
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
# Create course.
self.course = CourseFactory.create(
org='test_org',
number='999',
run='test_run',
display_name='My Test Course',
modulestore=store
)
with store.bulk_operations(self.course.id):
# Create chapters.
_create_binary_structure_items('course', 'chapter', 2, 'sequential')
_create_binary_structure_items('chapter', 'sequential', 4, 'vertical')
_create_binary_structure_items('sequential', 'vertical', 8, 'html')
_create_binary_structure_items('vertical', 'html', 16, '')
# Create a list of all verticals for convenience.
block_type = 'vertical'
for idx in xrange(0, 8):
block_id = _make_block_id(block_type, idx)
self.all_verticals.append((block_type, block_id))
# Create a list of all html units for convenience.
block_type = 'html'
for idx in xrange(0, 16):
block_id = _make_block_id(block_type, idx)
self.all_units.append((block_type, block_id))
def setUp(self):
self.user_id = -3
self.course = None
# For convenience, maintain a list of (block_type, block_id) pairs for all verticals/units.
self.all_verticals = []
self.all_units = []
# Course block database is keyed on (block_type, block_id) pairs.
# It's built during the course creation below and contains all the parent/child
# data needed to check the OLX.
self.course_db = {}
super(DraftPublishedOpTestCourseSetup, self).setUp()
class OLXFormatChecker(unittest.TestCase):
"""
Examines the on-disk course export to verify that specific items are present/missing
in the course export.
Currently assumes that the course is broken up into different subdirs.
Requires from subclasses:
self.root_export_dir - absolute root directory of course exports
self.export_dir - top-level course export directory name
self._ensure_exported() - A method that will export the course under test
to self.export_dir.
"""
unittest.TestCase.longMessage = True
def _ensure_exported(self):
"""
Method to ensure a course export - defined by subclass.
"""
raise NotImplementedError()
def _get_course_export_dir(self):
"""
Ensure that the course has been exported and return course export dir.
"""
self._ensure_exported()
block_path = os.path.join(self.root_export_dir, self.export_dir) # pylint: disable=no-member
self.assertTrue(
os.path.isdir(block_path),
msg='{} is not a dir.'.format(block_path)
)
return block_path
def _get_block_type_path(self, course_export_dir, block_type, draft):
"""
Return the path to the block type subdirectory, factoring in drafts.
"""
block_path = course_export_dir
if draft:
block_path = os.path.join(block_path, 'drafts')
return os.path.join(block_path, block_type)
def _get_block_filename(self, block_id):
"""
Return the course export filename for a block.
"""
return '{}.xml'.format(block_id)
def _get_block_contents(self, block_subdir_path, block_id):
"""
Determine the filename containing the block info.
Return the file contents.
"""
self._ensure_exported()
block_file = self._get_block_filename(block_id)
block_file_path = os.path.join(block_subdir_path, block_file)
self.assertTrue(
os.path.isfile(block_file_path),
msg='{} is not an existing file.'.format(block_file_path)
)
with open(block_file_path, "r") as file_handle:
return file_handle.read()
def assertElementTag(self, element, tag):
"""
Assert than an XML element has a specific tag.
Arguments:
element (ElementTree.Element): the element to check.
tag (str): The tag to validate.
"""
self.assertEqual(element.tag, tag)
def assertElementAttrsSubset(self, element, attrs):
"""
Assert that an XML element has at least the specified set of
attributes.
Arguments:
element (ElementTree.Element): the element to check.
attrs (dict): A dict mapping {attr: regex} where
each value in the dict is a regular expression
to match against the named attribute.
"""
for attribute, regex in attrs.items():
self.assertRegexpMatches(element.get(attribute), regex)
def parse_olx(self, block_type, block_id, **kwargs):
"""
Arguments:
block_type (str): The block-type of the XBlock to check.
block_id (str): The block-id of the XBlock to check.
draft (bool): If ``True``, run the assertions against the draft version of the
identified XBlock.
"""
course_export_dir = self._get_course_export_dir()
is_draft = kwargs.pop('draft', False)
block_path = self._get_block_type_path(course_export_dir, block_type, is_draft)
block_contents = self._get_block_contents(block_path, block_id)
return ET.fromstring(block_contents)
def assertOLXMissing(self, block_type, block_id, **kwargs):
"""
Assert that a particular block does not exist in a particular draft/published location.
Arguments:
block_type (str): The block-type of the XBlock to check.
block_id (str): The block-id of the XBlock to check.
draft (bool): If ``True``, assert that the block identified by ``block_type``
``block_id`` isn't a draft in the exported OLX.
"""
course_export_dir = self._get_course_export_dir()
is_draft = kwargs.pop('draft', False)
block_path = self._get_block_type_path(course_export_dir, block_type, is_draft)
block_file_path = os.path.join(block_path, self._get_block_filename(block_id))
self.assertFalse(
os.path.exists(block_file_path),
msg='{} exists but should not!'.format(block_file_path)
)
def assertParentReferences(self, element, course_key, parent_type, parent_id, index_in_children_list):
"""
Assert that the supplied element references the supplied parents.
Arguments:
element: The element to check.
course_key: The course the element is from.
parent_type: The block_type of the expected parent node.
parent_id: The block_id of the expected parent node.
index_in_children_list: The expected index in the parent.
"""
parent_key = course_key.make_usage_key(parent_type, parent_id)
self.assertElementAttrsSubset(element, {
'parent_url': re.escape(unicode(parent_key)),
'index_in_children_list': re.escape(str(index_in_children_list)),
})
def assertOLXProperties(self, element, block_type, course_key, draft, **kwargs):
"""
Assert that OLX properties (parent and child references) are satisfied.
"""
child_types_ids = kwargs.pop('child_ids', None)
filename = kwargs.pop('filename', None)
self.assertElementTag(element, block_type)
# Form the checked attributes based on the block type.
if block_type == 'html':
self.assertElementAttrsSubset(element, {'filename': filename})
elif draft:
# Draft items are expected to have certain XML attributes.
self.assertParentReferences(
element,
course_key,
**kwargs
)
# If children exist, construct regular expressions to check them.
child_id_regex = None
child_type = None
if child_types_ids:
# Grab the type of the first child as the type of all the children.
child_type = child_types_ids[0][0]
# Construct regex out of all the child_ids that are included.
child_id_regex = '|'.join([child[1] for child in child_types_ids])
for child in element:
self.assertElementTag(child, child_type)
self.assertElementAttrsSubset(child, {'url_name': child_id_regex})
def _assertOLXBase(self, block_list, draft, published): # pylint: disable=invalid-name
"""
Check that all blocks in the list are draft blocks in the OLX format when the course is exported.
"""
for block_data in block_list:
block_params = self.course_db.get(block_data)
self.assertIsNotNone(block_params)
(block_type, block_id) = block_data
if draft:
element = self.parse_olx(block_type, block_id, draft=True)
self.assertOLXProperties(element, block_type, self.course.id, draft=True, **block_params)
else:
self.assertOLXMissing(block_type, block_id, draft=True)
if published:
element = self.parse_olx(block_type, block_id, draft=False)
self.assertOLXProperties(element, block_type, self.course.id, draft=False, **block_params)
else:
self.assertOLXMissing(block_type, block_id, draft=False)
def assertOLXIsDraftOnly(self, block_list):
"""
Check that all blocks in the list are only draft blocks in the OLX format when the course is exported.
"""
self._assertOLXBase(block_list, draft=True, published=False)
def assertOLXIsPublishedOnly(self, block_list):
"""
Check that all blocks in the list are only published blocks in the OLX format when the course is exported.
"""
self._assertOLXBase(block_list, draft=False, published=True)
def assertOLXIsDraftAndPublished(self, block_list):
"""
Check that all blocks in the list are both draft and published in the OLX format when the course is exported.
"""
self._assertOLXBase(block_list, draft=True, published=True)
def assertOLXIsDeleted(self, block_list):
"""
Check that all blocks in the list are no longer in the OLX format when the course is exported.
"""
for block_data in block_list:
(block_type, block_id) = block_data
self.assertOLXMissing(block_type, block_id, draft=True)
self.assertOLXMissing(block_type, block_id, draft=False)
class DraftPublishedOpBaseTestSetup(OLXFormatChecker, DraftPublishedOpTestCourseSetup):
"""
Setup base class for draft/published/OLX tests.
"""
EXPORTED_COURSE_BEFORE_DIR_NAME = 'exported_course_before'
EXPORTED_COURSE_AFTER_DIR_NAME = 'exported_course_after_{}'
def setUp(self):
super(DraftPublishedOpBaseTestSetup, self).setUp()
self.export_dir = self.EXPORTED_COURSE_BEFORE_DIR_NAME
self.root_export_dir = None
self.contentstore = None
self.store = None
@contextmanager
def _create_export_dir(self):
"""
Create a temporary export dir - and clean it up when done.
"""
try:
export_dir = mkdtemp()
yield export_dir
finally:
rmtree(export_dir, ignore_errors=True)
@contextmanager
def _setup_test(self, modulestore_builder):
"""
Create the export dir, contentstore, and modulestore for a test.
"""
with self._create_export_dir() as self.root_export_dir:
# Construct the contentstore for storing the first import
with MongoContentstoreBuilder().build() as self.contentstore:
# Construct the modulestore for storing the first import (using the previously created contentstore)
with modulestore_builder.build(contentstore=self.contentstore) as self.store:
# Create the course.
self._create_course(self.store)
yield
def _ensure_exported(self):
"""
Check that the course has been exported. If not, export it.
"""
exported_course_path = os.path.join(self.root_export_dir, self.export_dir)
if not (os.path.exists(exported_course_path) and os.path.isdir(exported_course_path)):
# Export the course.
export_course_to_xml(
self.store,
self.contentstore,
self.course.id,
self.root_export_dir,
self.export_dir,
)
@property
def is_split_modulestore(self):
"""
``True`` when modulestore under test is a SplitMongoModuleStore.
"""
return self.store.get_modulestore_type(self.course.id) == ModuleStoreEnum.Type.split
@property
def is_old_mongo_modulestore(self):
"""
``True`` when modulestore under test is a MongoModuleStore.
"""
return self.store.get_modulestore_type(self.course.id) == ModuleStoreEnum.Type.mongo
def _make_new_export_dir_name(self):
"""
Make a unique name for the new export dir.
"""
return self.EXPORTED_COURSE_AFTER_DIR_NAME.format(unicode(uuid.uuid4())[:8])
def publish(self, block_list):
"""
Get each item, publish it, and shift to a new course export dir.
"""
for (block_type, block_id) in block_list:
# Get the specified test item from the draft branch.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
test_item = self.store.get_item(
self.course.id.make_usage_key(block_type=block_type, block_id=block_id)
)
# Publish the draft item to the published branch.
self.store.publish(test_item.location, self.user_id)
# Since the elemental operation is now complete, shift to the post-operation export directory name.
self.export_dir = self._make_new_export_dir_name()
def unpublish(self, block_list):
"""
Get each item, unpublish it, and shift to a new course export dir.
"""
for (block_type, block_id) in block_list:
# Get the specified test item from the published branch.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
test_item = self.store.get_item(
self.course.id.make_usage_key(block_type=block_type, block_id=block_id)
)
# Unpublish the draft item from the published branch.
self.store.unpublish(test_item.location, self.user_id)
# Since the elemental operation is now complete, shift to the post-operation export directory name.
self.export_dir = self._make_new_export_dir_name()
def delete_item(self, block_list, revision):
"""
Get each item, delete it, and shift to a new course export dir.
"""
for (block_type, block_id) in block_list:
# Get the specified test item from the draft branch.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
test_item = self.store.get_item(
self.course.id.make_usage_key(block_type=block_type, block_id=block_id)
)
# Delete the item from the specified branch.
self.store.delete_item(test_item.location, self.user_id, revision=revision)
# Since the elemental operation is now complete, shift to the post-operation export directory name.
self.export_dir = self._make_new_export_dir_name()
def convert_to_draft(self, block_list):
"""
Get each item, convert it to draft, and shift to a new course export dir.
"""
for (block_type, block_id) in block_list:
# Get the specified test item from the draft branch.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
test_item = self.store.get_item(
self.course.id.make_usage_key(block_type=block_type, block_id=block_id)
)
# Convert the item from the specified branch from published to draft.
self.store.convert_to_draft(test_item.location, self.user_id)
# Since the elemental operation is now complete, shift to the post-operation export directory name.
self.export_dir = self._make_new_export_dir_name()
def revert_to_published(self, block_list):
"""
Get each item, revert it to published, and shift to a new course export dir.
"""
for (block_type, block_id) in block_list:
# Get the specified test item from the draft branch.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
test_item = self.store.get_item(
self.course.id.make_usage_key(block_type=block_type, block_id=block_id)
)
# Revert the item from the specified branch from draft to published.
self.store.revert_to_published(test_item.location, self.user_id)
# Since the elemental operation is now complete, shift to the post-operation export directory name.
self.export_dir = self._make_new_export_dir_name()
@ddt.ddt
class ElementalPublishingTests(DraftPublishedOpBaseTestSetup):
"""
Tests for the publish() operation.
"""
@ddt.data(*MODULESTORE_SETUPS)
def test_autopublished_chapters_sequentials(self, modulestore_builder):
with self._setup_test(modulestore_builder):
# When a course is created out of chapters/sequentials/verticals/units
# as this course is, the chapters/sequentials are auto-published
# and the verticals/units are not.
# Ensure that this is indeed the case by verifying the OLX.
block_list_autopublished = (
('chapter', 'chapter00'),
('chapter', 'chapter01'),
('sequential', 'sequential00'),
('sequential', 'sequential01'),
('sequential', 'sequential02'),
('sequential', 'sequential03'),
)
block_list_draft = self.all_verticals + self.all_units
self.assertOLXIsPublishedOnly(block_list_autopublished)
self.assertOLXIsDraftOnly(block_list_draft)
@ddt.data(DRAFT_MODULESTORE_SETUP, MongoModulestoreBuilder())
def test_publish_old_mongo_unit(self, modulestore_builder):
with self._setup_test(modulestore_builder):
# MODULESTORE_DIFFERENCE:
# In old Mongo, you can successfully publish an item whose parent
# isn't published.
self.publish((('html', 'html00'),))
@ddt.data(SPLIT_MODULESTORE_SETUP)
def test_publish_split_unit(self, modulestore_builder):
with self._setup_test(modulestore_builder):
# MODULESTORE_DIFFERENCE:
# In Split, you cannot publish an item whose parents are unpublished.
# Split will raise an exception when the item's parent(s) aren't found
# in the published branch.
with self.assertRaises(ItemNotFoundError):
self.publish((('html', 'html00'),))
@ddt.data(*MODULESTORE_SETUPS)
def test_publish_multiple_verticals(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_parents_to_publish = (
('vertical', 'vertical03'),
('vertical', 'vertical04'),
)
block_list_publish = block_list_parents_to_publish + (
('html', 'html06'),
('html', 'html07'),
('html', 'html08'),
('html', 'html09'),
)
block_list_untouched = (
('vertical', 'vertical00'),
('vertical', 'vertical01'),
('vertical', 'vertical02'),
('vertical', 'vertical05'),
('vertical', 'vertical06'),
('vertical', 'vertical07'),
('html', 'html00'),
('html', 'html01'),
('html', 'html02'),
('html', 'html03'),
('html', 'html04'),
('html', 'html05'),
('html', 'html10'),
('html', 'html11'),
('html', 'html12'),
('html', 'html13'),
('html', 'html14'),
('html', 'html15'),
)
# Ensure that both groups of verticals and children are drafts in the exported OLX.
self.assertOLXIsDraftOnly(block_list_publish)
self.assertOLXIsDraftOnly(block_list_untouched)
# Publish both vertical03 and vertical 04.
self.publish(block_list_parents_to_publish)
# Ensure that the published verticals and children are indeed published in the exported OLX.
self.assertOLXIsPublishedOnly(block_list_publish)
# Ensure that the untouched vertical and children are still untouched.
self.assertOLXIsDraftOnly(block_list_untouched)
@ddt.data(*MODULESTORE_SETUPS)
def test_publish_single_sequential(self, modulestore_builder):
"""
Sequentials are auto-published. But publishing them explictly publishes their children,
changing the OLX of each sequential - the vertical children are in the sequential post-publish.
"""
with self._setup_test(modulestore_builder):
block_list_autopublished = (
('sequential', 'sequential00'),
)
block_list = (
('vertical', 'vertical00'),
('vertical', 'vertical01'),
('html', 'html00'),
('html', 'html01'),
('html', 'html02'),
('html', 'html03'),
)
# Ensure that the autopublished sequential exists as such in the exported OLX.
self.assertOLXIsPublishedOnly(block_list_autopublished)
# Ensure that the verticals and their children are drafts in the exported OLX.
self.assertOLXIsDraftOnly(block_list)
# Publish the sequential block.
self.publish(block_list_autopublished)
# Ensure that the sequential is still published in the exported OLX.
self.assertOLXIsPublishedOnly(block_list_autopublished)
# Ensure that the verticals and their children are published in the exported OLX.
self.assertOLXIsPublishedOnly(block_list)
@ddt.data(*MODULESTORE_SETUPS)
def test_publish_single_chapter(self, modulestore_builder):
"""
Chapters are auto-published.
"""
with self._setup_test(modulestore_builder):
block_list_autopublished = (
('chapter', 'chapter00'),
)
block_list_published = (
('vertical', 'vertical00'),
('vertical', 'vertical01'),
('vertical', 'vertical02'),
('vertical', 'vertical03'),
('html', 'html00'),
('html', 'html01'),
('html', 'html02'),
('html', 'html03'),
('html', 'html04'),
('html', 'html05'),
('html', 'html06'),
('html', 'html07'),
)
block_list_untouched = (
('vertical', 'vertical04'),
('vertical', 'vertical05'),
('vertical', 'vertical06'),
('vertical', 'vertical07'),
('html', 'html08'),
('html', 'html09'),
('html', 'html10'),
('html', 'html11'),
('html', 'html12'),
('html', 'html13'),
('html', 'html14'),
('html', 'html15'),
)
# Ensure that the autopublished chapter exists as such in the exported OLX.
self.assertOLXIsPublishedOnly(block_list_autopublished)
# Ensure that the verticals and their children are drafts in the exported OLX.
self.assertOLXIsDraftOnly(block_list_published)
self.assertOLXIsDraftOnly(block_list_untouched)
# Publish the chapter block.
self.publish(block_list_autopublished)
# Ensure that the chapter is still published in the exported OLX.
self.assertOLXIsPublishedOnly(block_list_autopublished)
# Ensure that the vertical and its children are published in the exported OLX.
self.assertOLXIsPublishedOnly(block_list_published)
# Ensure that the other vertical and children are not published.
self.assertOLXIsDraftOnly(block_list_untouched)
@ddt.ddt
class ElementalUnpublishingTests(DraftPublishedOpBaseTestSetup):
"""
Tests for the unpublish() operation.
"""
@ddt.data(*MODULESTORE_SETUPS)
def test_unpublish_draft_unit(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_to_unpublish = (
('html', 'html08'),
)
# The unit is a draft.
self.assertOLXIsDraftOnly(block_list_to_unpublish)
# Since there's no published version, attempting an unpublish throws an exception.
with self.assertRaises(ItemNotFoundError):
self.unpublish(block_list_to_unpublish)
@ddt.data(*MODULESTORE_SETUPS)
def test_unpublish_published_units(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_to_unpublish = (
('html', 'html08'),
('html', 'html09'),
)
block_list_parent = (
('vertical', 'vertical04'),
)
# The units are drafts.
self.assertOLXIsDraftOnly(block_list_to_unpublish)
self.assertOLXIsDraftOnly(block_list_parent)
# Publish the *parent* of the units, which also publishes the units.
self.publish(block_list_parent)
# The units are now published.
self.assertOLXIsPublishedOnly(block_list_parent)
self.assertOLXIsPublishedOnly(block_list_to_unpublish)
# Unpublish the child units.
self.unpublish(block_list_to_unpublish)
# The units are now drafts again.
self.assertOLXIsDraftOnly(block_list_to_unpublish)
# MODULESTORE_DIFFERENCE:
if self.is_split_modulestore:
# Split:
# The parent now has a draft *and* published item.
self.assertOLXIsDraftAndPublished(block_list_parent)
elif self.is_old_mongo_modulestore:
# Old Mongo:
# The parent remains published only.
self.assertOLXIsPublishedOnly(block_list_parent)
else:
raise Exception("Must test either Old Mongo or Split modulestore!")
@ddt.data(*MODULESTORE_SETUPS)
def test_unpublish_draft_vertical(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_to_unpublish = (
('vertical', 'vertical02'),
)
# The vertical is a draft.
self.assertOLXIsDraftOnly(block_list_to_unpublish)
# Since there's no published version, attempting an unpublish throws an exception.
with self.assertRaises(ItemNotFoundError):
self.unpublish(block_list_to_unpublish)
@ddt.data(*MODULESTORE_SETUPS)
def test_unpublish_published_vertical(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_to_unpublish = (
('vertical', 'vertical02'),
)
block_list_unpublished_children = (
('html', 'html04'),
('html', 'html05'),
)
block_list_untouched = (
('vertical', 'vertical04'),
('vertical', 'vertical05'),
('vertical', 'vertical06'),
('vertical', 'vertical07'),
('html', 'html08'),
('html', 'html09'),
('html', 'html10'),
('html', 'html11'),
('html', 'html12'),
('html', 'html13'),
('html', 'html14'),
('html', 'html15'),
)
# At first, no vertical or unit is published.
self.assertOLXIsDraftOnly(block_list_to_unpublish)
self.assertOLXIsDraftOnly(block_list_unpublished_children)
self.assertOLXIsDraftOnly(block_list_untouched)
# Then publish a vertical.
self.publish(block_list_to_unpublish)
# The published vertical and its children will be published.
self.assertOLXIsPublishedOnly(block_list_to_unpublish)
self.assertOLXIsPublishedOnly(block_list_unpublished_children)
self.assertOLXIsDraftOnly(block_list_untouched)
# Now, unpublish the same vertical.
self.unpublish(block_list_to_unpublish)
# The unpublished vertical and its children will now be a draft.
self.assertOLXIsDraftOnly(block_list_to_unpublish)
self.assertOLXIsDraftOnly(block_list_unpublished_children)
self.assertOLXIsDraftOnly(block_list_untouched)
@ddt.data(DRAFT_MODULESTORE_SETUP, MongoModulestoreBuilder())
def test_unpublish_old_mongo_draft_sequential(self, modulestore_builder):
with self._setup_test(modulestore_builder):
# MODULESTORE_DIFFERENCE:
# In old Mongo, you cannot successfully unpublish an autopublished sequential.
# An exception is thrown.
block_list_to_unpublish = (
('sequential', 'sequential03'),
)
with self.assertRaises(InvalidVersionError):
self.unpublish(block_list_to_unpublish)
@ddt.data(SPLIT_MODULESTORE_SETUP)
def test_unpublish_split_draft_sequential(self, modulestore_builder):
with self._setup_test(modulestore_builder):
# MODULESTORE_DIFFERENCE:
# In Split, the sequential is deleted.
# The sequential's children are orphaned - but they stay in
# the same draft state they were before.
block_list_to_unpublish = (
('sequential', 'sequential03'),
)
block_list_unpublished_children = (
('vertical', 'vertical06'),
('vertical', 'vertical07'),
('html', 'html12'),
('html', 'html13'),
('html', 'html14'),
('html', 'html15'),
)
# The autopublished sequential is published - its children are draft.
self.assertOLXIsPublishedOnly(block_list_to_unpublish)
self.assertOLXIsDraftOnly(block_list_unpublished_children)
# Unpublish the sequential.
self.unpublish(block_list_to_unpublish)
# Since the sequential was autopublished, a draft version of the sequential never existed.
# So unpublishing the sequential doesn't make it a draft - it deletes it!
self.assertOLXIsDeleted(block_list_to_unpublish)
# Its children are orphaned and remain as drafts.
self.assertOLXIsDraftOnly(block_list_unpublished_children)
@ddt.ddt
class ElementalDeleteItemTests(DraftPublishedOpBaseTestSetup):
"""
Tests for the delete_item() operation.
"""
def _check_for_item_deletion(self, block_list, expected_result):
"""
Based on the expected result, verify that OLX for the listed blocks is correct.
"""
assert_method = getattr(self, expected_result)
assert_method(block_list)
@ddt.data(*itertools.product(
MODULESTORE_SETUPS,
(
(ModuleStoreEnum.RevisionOption.published_only, 'assertOLXIsDraftOnly'),
(ModuleStoreEnum.RevisionOption.all, 'assertOLXIsDeleted'),
(None, 'assertOLXIsDeleted'),
)
))
@ddt.unpack
def test_delete_draft_unit(self, modulestore_builder, revision_and_result):
with self._setup_test(modulestore_builder):
block_list_to_delete = (
('html', 'html08'),
)
(revision, result) = revision_and_result
# The unit is a draft.
self.assertOLXIsDraftOnly(block_list_to_delete)
# MODULESTORE_DIFFERENCE:
if self.is_old_mongo_modulestore:
# Old Mongo throws no exception when trying to delete an item from the published branch
# that isn't yet published.
self.delete_item(block_list_to_delete, revision=revision)
self._check_for_item_deletion(block_list_to_delete, result)
elif self.is_split_modulestore:
if revision in (ModuleStoreEnum.RevisionOption.published_only, ModuleStoreEnum.RevisionOption.all):
# Split throws an exception when trying to delete an item from the published branch
# that isn't yet published.
with self.assertRaises(ValueError):
self.delete_item(block_list_to_delete, revision=revision)
else:
self.delete_item(block_list_to_delete, revision=revision)
self._check_for_item_deletion(block_list_to_delete, result)
else:
raise Exception("Must test either Old Mongo or Split modulestore!")
@ddt.data(*itertools.product(
(DRAFT_MODULESTORE_SETUP, MongoModulestoreBuilder()),
(
# MODULESTORE_DIFFERENCE: This first line is different between old Mongo and Split for verticals.
# Old Mongo deletes the draft vertical even when published_only is specified.
(ModuleStoreEnum.RevisionOption.published_only, 'assertOLXIsDeleted'),
(ModuleStoreEnum.RevisionOption.all, 'assertOLXIsDeleted'),
(None, 'assertOLXIsDeleted'),
)
))
@ddt.unpack
def test_old_mongo_delete_draft_vertical(self, modulestore_builder, revision_and_result):
with self._setup_test(modulestore_builder):
block_list_to_delete = (
('vertical', 'vertical03'),
)
block_list_children = (
('html', 'html06'),
('html', 'html07'),
)
(revision, result) = revision_and_result
# The vertical is a draft.
self.assertOLXIsDraftOnly(block_list_to_delete)
# MODULESTORE_DIFFERENCE:
# Old Mongo throws no exception when trying to delete an item from the published branch
# that isn't yet published.
self.delete_item(block_list_to_delete, revision=revision)
self._check_for_item_deletion(block_list_to_delete, result)
# MODULESTORE_DIFFERENCE:
# Weirdly, this is a difference between old Mongo -and- old Mongo wrapped with a mixed modulestore.
# When the code attempts and fails to delete the draft vertical using the published_only revision,
# the draft children are still around in one case and not in the other? Needs investigation.
# pylint: disable=bad-continuation
if (
isinstance(modulestore_builder, MongoModulestoreBuilder) and
revision == ModuleStoreEnum.RevisionOption.published_only
):
self.assertOLXIsDraftOnly(block_list_children)
else:
self.assertOLXIsDeleted(block_list_children)
@ddt.data(*itertools.product(
(SPLIT_MODULESTORE_SETUP,),
(
# MODULESTORE_DIFFERENCE: This first line is different between old Mongo and Split for verticals.
# Split does not delete the draft vertical when a published_only revision is specified.
(ModuleStoreEnum.RevisionOption.published_only, 'assertOLXIsDraftOnly'),
(ModuleStoreEnum.RevisionOption.all, 'assertOLXIsDeleted'),
(None, 'assertOLXIsDeleted'),
)
))
@ddt.unpack
def test_split_delete_draft_vertical(self, modulestore_builder, revision_and_result):
with self._setup_test(modulestore_builder):
block_list_to_delete = (
('vertical', 'vertical03'),
)
block_list_children = (
('html', 'html06'),
('html', 'html07'),
)
(revision, result) = revision_and_result
# The vertical is a draft.
self.assertOLXIsDraftOnly(block_list_to_delete)
if revision in (ModuleStoreEnum.RevisionOption.published_only, ModuleStoreEnum.RevisionOption.all):
# MODULESTORE_DIFFERENCE:
# Split throws an exception when trying to delete an item from the published branch
# that isn't yet published.
with self.assertRaises(ValueError):
self.delete_item(block_list_to_delete, revision=revision)
else:
self.delete_item(block_list_to_delete, revision=revision)
self._check_for_item_deletion(block_list_to_delete, result)
self.assertOLXIsDeleted(block_list_children)
@ddt.data(*itertools.product(
MODULESTORE_SETUPS,
(
(ModuleStoreEnum.RevisionOption.published_only, 'assertOLXIsDeleted'),
(ModuleStoreEnum.RevisionOption.all, 'assertOLXIsDeleted'),
(None, 'assertOLXIsDeleted'),
)
))
@ddt.unpack
def test_delete_sequential(self, modulestore_builder, revision_and_result):
with self._setup_test(modulestore_builder):
block_list_to_delete = (
('sequential', 'sequential03'),
)
block_list_children = (
('vertical', 'vertical06'),
('vertical', 'vertical07'),
('html', 'html12'),
('html', 'html13'),
('html', 'html14'),
('html', 'html15'),
)
(revision, result) = revision_and_result
# Sequentials are auto-published.
self.assertOLXIsPublishedOnly(block_list_to_delete)
self.delete_item(block_list_to_delete, revision=revision)
self._check_for_item_deletion(block_list_to_delete, result)
# MODULESTORE_DIFFERENCE
if self.is_split_modulestore:
# Split:
if revision == ModuleStoreEnum.RevisionOption.published_only:
# If deleting published_only items, the children that are drafts remain.
self.assertOLXIsDraftOnly(block_list_children)
else:
self.assertOLXIsDeleted(block_list_children)
elif self.is_old_mongo_modulestore:
# Old Mongo:
# If deleting draft_only or both items, the drafts will be deleted.
self.assertOLXIsDeleted(block_list_children)
else:
raise Exception("Must test either Old Mongo or Split modulestore!")
@ddt.data(*itertools.product(
MODULESTORE_SETUPS,
(
(ModuleStoreEnum.RevisionOption.published_only, 'assertOLXIsDeleted'),
(ModuleStoreEnum.RevisionOption.all, 'assertOLXIsDeleted'),
(None, 'assertOLXIsDeleted'),
)
))
@ddt.unpack
def test_delete_chapter(self, modulestore_builder, revision_and_result):
with self._setup_test(modulestore_builder):
block_list_to_delete = (
('chapter', 'chapter01'),
)
autopublished_children = (
('sequential', 'sequential02'),
('sequential', 'sequential03'),
)
block_list_draft_children = (
('vertical', 'vertical04'),
('vertical', 'vertical05'),
('vertical', 'vertical06'),
('vertical', 'vertical07'),
('html', 'html08'),
('html', 'html09'),
('html', 'html10'),
('html', 'html11'),
('html', 'html12'),
('html', 'html13'),
('html', 'html14'),
('html', 'html15'),
)
(revision, result) = revision_and_result
# Chapters are auto-published.
self.assertOLXIsPublishedOnly(block_list_to_delete)
self.delete_item(block_list_to_delete, revision=revision)
self._check_for_item_deletion(block_list_to_delete, result)
self.assertOLXIsDeleted(autopublished_children)
# MODULESTORE_DIFFERENCE
if self.is_split_modulestore:
# Split:
if revision == ModuleStoreEnum.RevisionOption.published_only:
# If deleting published_only items, the children that are drafts remain.
self.assertOLXIsDraftOnly(block_list_draft_children)
else:
self.assertOLXIsDeleted(block_list_draft_children)
elif self.is_old_mongo_modulestore:
# Old Mongo:
# If deleting draft_only or both items, the drafts will be deleted.
self.assertOLXIsDeleted(block_list_draft_children)
else:
raise Exception("Must test either Old Mongo or Split modulestore!")
@ddt.ddt
class ElementalConvertToDraftTests(DraftPublishedOpBaseTestSetup):
"""
Tests for the convert_to_draft() operation.
"""
@ddt.data(*MODULESTORE_SETUPS)
def test_convert_to_draft_published_vertical(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_to_convert = (
('vertical', 'vertical02'),
)
# At first, no vertical is published.
self.assertOLXIsDraftOnly(block_list_to_convert)
# Then publish a vertical.
self.publish(block_list_to_convert)
# The vertical will be published.
self.assertOLXIsPublishedOnly(block_list_to_convert)
# Now, convert the same vertical to draft.
self.convert_to_draft(block_list_to_convert)
# MODULESTORE_DIFFERENCE:
if self.is_split_modulestore:
# Split:
# This operation is a no-op is Split since there's always a draft version maintained.
self.assertOLXIsPublishedOnly(block_list_to_convert)
elif self.is_old_mongo_modulestore:
# Old Mongo:
# A draft -and- a published block now exists.
self.assertOLXIsDraftAndPublished(block_list_to_convert)
else:
raise Exception("Must test either Old Mongo or Split modulestore!")
@ddt.data(*MODULESTORE_SETUPS)
def test_convert_to_draft_autopublished_sequential(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_to_convert = (
('sequential', 'sequential03'),
)
# Sequentials are auto-published.
self.assertOLXIsPublishedOnly(block_list_to_convert)
# MODULESTORE_DIFFERENCE:
if self.is_split_modulestore:
# Split:
# Now, convert the same sequential to draft.
self.convert_to_draft(block_list_to_convert)
# This operation is a no-op is Split since there's always a draft version maintained.
self.assertOLXIsPublishedOnly(block_list_to_convert)
elif self.is_old_mongo_modulestore:
# Old Mongo:
# Direct-only categories are never allowed to be converted to draft.
with self.assertRaises(InvalidVersionError):
self.convert_to_draft(block_list_to_convert)
else:
raise Exception("Must test either Old Mongo or Split modulestore!")
@ddt.ddt
class ElementalRevertToPublishedTests(DraftPublishedOpBaseTestSetup):
"""
Tests for the revert_to_published() operation.
"""
@ddt.data(*MODULESTORE_SETUPS)
def test_revert_to_published_unpublished_vertical(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_to_revert = (
('vertical', 'vertical02'),
)
# At first, no vertical is published.
self.assertOLXIsDraftOnly(block_list_to_revert)
# Now, without publishing anything first, revert the same vertical to published.
# Since no published version exists, an exception is raised.
with self.assertRaises(InvalidVersionError):
self.revert_to_published(block_list_to_revert)
@ddt.data(*MODULESTORE_SETUPS)
def test_revert_to_published_published_vertical(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_to_revert = (
('vertical', 'vertical02'),
)
# At first, no vertical is published.
self.assertOLXIsDraftOnly(block_list_to_revert)
# Then publish a vertical.
self.publish(block_list_to_revert)
# The vertical will be published.
self.assertOLXIsPublishedOnly(block_list_to_revert)
# Now, revert the same vertical to published.
self.revert_to_published(block_list_to_revert)
# Basically a no-op - there was no draft version to revert.
self.assertOLXIsPublishedOnly(block_list_to_revert)
@ddt.data(*MODULESTORE_SETUPS)
def test_revert_to_published_vertical(self, modulestore_builder):
with self._setup_test(modulestore_builder):
block_list_to_revert = (
('vertical', 'vertical02'),
)
# At first, no vertical is published.
self.assertOLXIsDraftOnly(block_list_to_revert)
# Then publish a vertical.
self.publish(block_list_to_revert)
# The vertical will be published.
self.assertOLXIsPublishedOnly(block_list_to_revert)
# Change something in the draft item and update it.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
item = self.store.get_item(
self.course.id.make_usage_key(block_type='vertical', block_id='vertical02')
)
item.display_name = 'SNAFU'
self.store.update_item(item, self.user_id)
self.export_dir = self._make_new_export_dir_name()
# The vertical now has a draft -and- published version.
self.assertOLXIsDraftAndPublished(block_list_to_revert)
# Now, revert the same vertical to published.
self.revert_to_published(block_list_to_revert)
# The draft version is now gone.
self.assertOLXIsPublishedOnly(block_list_to_revert)
| agpl-3.0 |
Yoplitein/gadget | commands/frequencies.py | 1 | 1770 | #!/usr/bin/env python
import sys
import random
units = [
"Hz",
"kHz",
"MHz",
"GHz",
"THz",
]
randrange = (lambda min, max: random.randrange(min, max+1))
def format_frequencies(string):
times = string.count("%d")
fmt = ()
for x in range(times):
fmt += (randrange(0, 9),)
string = (string % fmt).strip("0")
if not string.lower().endswith("hz"):
string += " " + random.choice(units)
return string
def generate_frequencies(count):
frequencies = []
for x in range(count):
before = randrange(1, 6)
after = randrange(1, 4)
frequency = ""
for y in range(before):
frequency += "%d"
frequency += "."
for z in range(after):
frequency += "%d"
frequencies.append(format_frequencies(frequency))
return frequencies
def main():
args = sys.argv[1:]
if len(args) > 0:
if "%d" in args[0]:
if len(args) > 1:
try:
count = int(args[1])
for x in range(int(args[1])):
print format_frequencies(args[0])
except ValueError:
print "%s isn't a number, ya dingus" % (args[1],)
else:
for x in range(3):
print format_frequencies(args[0])
else:
try:
count = int(args[0])
print "\n".join(generate_frequencies(count))
except ValueError:
print "%s yourself" % (args[0],)
else:
print "\n".join(generate_frequencies(5))
if __name__ == '__main__':
main()
| bsd-2-clause |
yuma-m/pychord | test/test_quality.py | 1 | 2253 | # -*- coding: utf-8 -*-
import unittest
from pychord import QualityManager, Chord, note_to_chord
class TestQuality(unittest.TestCase):
def setUp(self):
self.quality_manager = QualityManager()
def test_eq(self):
q1 = self.quality_manager.get_quality("m7-5")
q2 = self.quality_manager.get_quality("m7-5")
self.assertEqual(q1, q2)
def test_eq_alias_maj9(self):
q1 = self.quality_manager.get_quality("M9")
q2 = self.quality_manager.get_quality("maj9")
self.assertEqual(q1, q2)
def test_eq_alias_m7b5(self):
q1 = self.quality_manager.get_quality("m7-5")
q2 = self.quality_manager.get_quality("m7b5")
self.assertEqual(q1, q2)
def test_eq_alias_min(self):
q1 = self.quality_manager.get_quality("m")
q2 = self.quality_manager.get_quality("min")
q3 = self.quality_manager.get_quality("-")
self.assertEqual(q1, q2)
self.assertEqual(q1, q3)
def test_invalid_eq(self):
q = self.quality_manager.get_quality("m7")
with self.assertRaises(TypeError):
print(q == 0)
class TestQualityManager(unittest.TestCase):
def test_singleton(self):
quality_manager = QualityManager()
quality_manager2 = QualityManager()
self.assertIs(quality_manager, quality_manager2)
class TestOverwriteQuality(unittest.TestCase):
def setUp(self):
self.quality_manager = QualityManager()
def tearDown(self):
self.quality_manager.load_default_qualities()
def test_overwrite(self):
self.quality_manager.set_quality("11", (0, 4, 7, 10, 14, 17))
chord = Chord("C11")
self.assertEqual(chord.components(), ['C', 'E', 'G', 'Bb', 'D', 'F'])
def test_find_from_components(self):
self.quality_manager.set_quality("13", (0, 4, 7, 10, 14, 17, 21))
chords = note_to_chord(['C', 'E', 'G', 'Bb', 'D', 'F', 'A'])
self.assertEqual(chords, [Chord("C13")])
def test_keep_existing_chord(self):
chord = Chord("C11")
self.quality_manager.set_quality("11", (0, 4, 7, 10, 14, 17))
self.assertEqual(chord.components(), ['C', 'G', 'Bb', 'D', 'F'])
if __name__ == '__main__':
unittest.main()
| mit |
ricardogsilva/QGIS | tests/src/python/test_provider_gdal.py | 13 | 4902 | # -*- coding: utf-8 -*-
"""Generic Unit tests for the GDAL provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2018-30-10'
__copyright__ = 'Copyright 2018, Nyall Dawson'
import os
from qgis.core import (
QgsProviderRegistry,
QgsDataProvider,
QgsRasterLayer,
QgsRectangle,
)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtGui import qRed
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class PyQgsGdalProvider(unittest.TestCase):
def checkBlockContents(self, block, expected):
res = []
for r in range(block.height()):
res.extend([block.value(r, c) for c in range(block.width())])
self.assertEqual(res, expected)
def testCapabilities(self):
self.assertTrue(QgsProviderRegistry.instance().providerCapabilities("gdal") & QgsDataProvider.File)
self.assertTrue(QgsProviderRegistry.instance().providerCapabilities("gdal") & QgsDataProvider.Dir)
self.assertTrue(QgsProviderRegistry.instance().providerCapabilities("gdal") & QgsDataProvider.Net)
def testRasterBlock(self):
"""Test raster block with extent"""
path = os.path.join(unitTestDataPath(), 'landsat_4326.tif')
raster_layer = QgsRasterLayer(path, 'test')
self.assertTrue(raster_layer.isValid())
extent = QgsRectangle(17.94284482577178252, 30.23021770271909503, 17.94407867909909626, 30.23154272264058307)
block = raster_layer.dataProvider().block(1, extent, 2, 3)
self.checkBlockContents(block, [
125.0, 125.0,
125.0, 125.0,
125.0, 124.0,
])
full_content = [
125.0, 125.0, 125.0,
125.0, 125.0, 125.0,
125.0, 124.0, 125.0,
126.0, 127.0, 127.0,
]
extent = raster_layer.extent()
block = raster_layer.dataProvider().block(1, extent, 3, 4)
self.checkBlockContents(block, full_content)
extent = raster_layer.extent()
extent.grow(-0.0001)
block = raster_layer.dataProvider().block(1, extent, 3, 4)
self.checkBlockContents(block, full_content)
row_height = raster_layer.extent().height() / raster_layer.height()
for row in range(raster_layer.height()):
extent = raster_layer.extent()
extent.setYMaximum(extent.yMaximum() - row_height * row)
extent.setYMinimum(extent.yMaximum() - row_height)
block = raster_layer.dataProvider().block(1, extent, 3, 1)
self.checkBlockContents(block, full_content[row * 3:row * 3 + 3])
def testDecodeEncodeUriGpkg(self):
"""Test decodeUri/encodeUri geopackage support"""
uri = '/my/raster.gpkg'
parts = QgsProviderRegistry.instance().decodeUri('gdal', uri)
self.assertEqual(parts, {'path': '/my/raster.gpkg', 'layerName': None})
encodedUri = QgsProviderRegistry.instance().encodeUri('gdal', parts)
self.assertEqual(encodedUri, uri)
uri = 'GPKG:/my/raster.gpkg'
parts = QgsProviderRegistry.instance().decodeUri('gdal', uri)
self.assertEqual(parts, {'path': '/my/raster.gpkg', 'layerName': None})
encodedUri = QgsProviderRegistry.instance().encodeUri('gdal', parts)
self.assertEqual(encodedUri, '/my/raster.gpkg')
uri = 'GPKG:/my/raster.gpkg:mylayer'
parts = QgsProviderRegistry.instance().decodeUri('gdal', uri)
self.assertEqual(parts, {'path': '/my/raster.gpkg', 'layerName': 'mylayer'})
encodedUri = QgsProviderRegistry.instance().encodeUri('gdal', parts)
self.assertEqual(encodedUri, uri)
def testDecodeEncodeUriOptions(self):
"""Test decodeUri/encodeUri options support"""
uri = '/my/raster.pdf|option:DPI=300|option:GIVEME=TWO'
parts = QgsProviderRegistry.instance().decodeUri('gdal', uri)
self.assertEqual(parts, {'path': '/my/raster.pdf', 'layerName': None, 'openOptions': ['DPI=300', 'GIVEME=TWO']})
encodedUri = QgsProviderRegistry.instance().encodeUri('gdal', parts)
self.assertEqual(encodedUri, uri)
def testDecodeEncodeUriVsizip(self):
"""Test decodeUri/encodeUri for /vsizip/ prefixed URIs"""
uri = '/vsizip//my/file.zip/image.tif'
parts = QgsProviderRegistry.instance().decodeUri('gdal', uri)
self.assertEqual(parts, {'path': '/my/file.zip', 'layerName': None, 'vsiPrefix': '/vsizip/', 'vsiSuffix': '/image.tif'})
encodedUri = QgsProviderRegistry.instance().encodeUri('gdal', parts)
self.assertEqual(encodedUri, uri)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
h3/django-project-template | fabfile.py | 1 | 4374 | import os
from fabric.api import *
from fabric.contrib import files
from dukeclient.fabric.utils import get_role, get_conf, get_project_path
from dukeclient.fabric.tasks import *
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
env.roledefs.update({
'example': ['user@beta.myproject.com:2022'],
})
env.site = {
'domain': 'example.com',
'package': 'example-project',
'project': 'example',
'repos': 'git+git@gitserver.com:user/example-project.git',
}
env.roleconfs = {}
# Example config using UWSGI with Supervisor behind Nginx
"""
env.roleconfs['example'] = {
'hosts': env.roledefs['example'],
'user': 'www-data',
'group': 'www-data',
'document-root': '/var/www/vhosts/%(domain)s/',
'media-root': '/var/www/vhosts/%(domain)s/media',
'static-root': '/var/www/vhosts/%(domain)s/static',
'nginx-conf': '/etc/nginx/sites-available/%(domain)s',
# Unless you are not using dajaxice, you want to use static-copy.
# https://github.com/jorgebastida/django-dajaxice/issues/66
'static-copy': True,
'virtualenv': True,
'virtualenv-root': '/var/www/vhosts/%(domain)s/virtualenv/%(project)s',
'on-deploy': [
'mkdir -p /var/www/vhosts/%(domain)s/media/',
],
'on-deploy-done': [
'ln -sf /etc/nginx/sites-available/%(domain)s /etc/nginx/sites-enabled/%(domain)s',
'ln -sf /var/www/vhosts/%(domain)s/%(package)s/deploy/prod.%(domain)s.ini /etc/uwsgi/apps-enabled/%(domain)s.ini',
'ln -sf /var/www/vhosts/%(domain)s/%(package)s/deploy/prod_supervisord.conf /etc/supervisor/conf.d/clients.%(domain)s.conf',
'touch /etc/uwsgi/apps-enabled/%(domain)s.ini',
'/etc/init.d/nginx reload',
'supervisorctl reread && supervisorctl restart %(package)s',
],
}
"""
# Example config for Plesk
"""
env.roleconfs['beta'] = {
'hosts': env.roledefs['beta'],
'user': 'ncXXXXX',
'group': 'psaserv',
'document-root': '/var/www/vhosts/%(domain)s/httpdocs/',
'media-root': '/var/www/vhosts/%(domain)s/httpdocs/media/',
'static-root': '/var/www/vhosts/%(domain)s/httpdocs/static/',
'vhost-conf': '/var/www/vhosts/%(domain)s/conf/vhost.conf',
'wsgi-processes': 1,
'wsgi-threads': 5,
'virtualenv': True,
'static-copy': True,
'on-apache-reload': [
'/usr/local/psa/admin/sbin/websrvmng --reconfigure-vhost --vhost-name=%(domain)s',
],
'on-deploy-done': [
'ln -sf /var/www/vhosts/%(domain)s/httpdocs/%(domain)s/%(project)s/media /var/www/vhosts/%(domain)s/httpdocs/media',
],
}
"""
# Example config for CPanel. But seriously, don't host any django on CPanel. It
# will ruin your life and leave you with PTSD. Seriously. Don't.
"""
env.roleconfs['alpha'] = {
'hosts': env.roledefs['alpha'],
'user': 'ncXXXXX',
'group': 'ncXXXXX',
'document-root': '/home/ncXXXXX/public_html/',
'media-root': '/home/ncXXXXX/public_html/media/',
'static-root': '/home/ncXXXXX/public_html/static/',
# You will have to do some preparation on the server, like activating
# dynamic virtual hosts. Good luck with that.
'vhost-conf': '/usr/local/apache/conf/userdata/std/2/ncXXXXX/%(domain)s/vhost.conf',
'virtualenv': True,
'static-copy': True,
'on-deploy': [
# CPHulk will blacklist your user systematically after a certain amount
# of sudo, regardless if your script disconnect/reconnect or not. Fail.
'/usr/local/cpanel/bin/cphulk_pam_ctl --disable',
# One have to respect the conciseness and implacable logic of
# CPanel's choice of directory structure..
'mkdir -p /usr/local/apache/conf/userdata/std/2/ncXXXXX/%(domain)s',
],
'on-deploy-done': [
'sudo /scripts/verify_vhost_includes',
'sudo /scripts/ensure_vhost_includes --user=ncXXXXX',
'ln -sf /home/ncXXXXX/public_html/%(domain)s/%(project)s/media /home/ncXXXXX/public_html/media',
# Here we re-enable our friendly fire prone buddy (CPHulk)
'/usr/local/cpanel/bin/cphulk_pam_ctl --enable',
# The Frontpage extension will fuck randomly with your .htaccess every
# now and then.. the only reliable workaround I've found is to
# unsetup/setup it.
'/scripts/unsetupfp4 %(domain)s && /scripts/setupfp4 %(domain)s',
],
'wsgi-processes': 1,
'wsgi-threads': 5,
}
"""
| mit |
gautam1858/tensorflow | tensorflow/contrib/model_pruning/__init__.py | 27 | 2666 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model pruning implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.model_pruning.python.layers.layers import masked_conv2d
from tensorflow.contrib.model_pruning.python.layers.layers import masked_convolution
from tensorflow.contrib.model_pruning.python.layers.layers import masked_fully_connected
from tensorflow.contrib.model_pruning.python.layers.rnn_cells import MaskedBasicLSTMCell
from tensorflow.contrib.model_pruning.python.layers.rnn_cells import MaskedLSTMCell
from tensorflow.contrib.model_pruning.python.learning import train
from tensorflow.contrib.model_pruning.python.pruning import apply_mask
from tensorflow.contrib.model_pruning.python.pruning import get_masked_weights
from tensorflow.contrib.model_pruning.python.pruning import get_masks
from tensorflow.contrib.model_pruning.python.pruning import get_pruning_hparams
from tensorflow.contrib.model_pruning.python.pruning import get_thresholds
from tensorflow.contrib.model_pruning.python.pruning import get_weight_sparsity
from tensorflow.contrib.model_pruning.python.pruning import get_weights
from tensorflow.contrib.model_pruning.python.pruning import Pruning
from tensorflow.contrib.model_pruning.python.strip_pruning_vars_lib import graph_def_from_checkpoint
from tensorflow.contrib.model_pruning.python.strip_pruning_vars_lib import strip_pruning_vars_fn
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'masked_convolution', 'masked_conv2d', 'masked_fully_connected',
'MaskedBasicLSTMCell', 'MaskedLSTMCell', 'train', 'apply_mask',
'get_masked_weights', 'get_masks', 'get_pruning_hparams', 'get_thresholds',
'get_weights', 'get_weight_sparsity', 'Pruning', 'strip_pruning_vars_fn',
'graph_def_from_checkpoint'
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
Jonekee/chromium.src | build/android/gyp/write_ordered_libraries.py | 36 | 4035 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Writes dependency ordered list of native libraries.
The list excludes any Android system libraries, as those are not bundled with
the APK.
This list of libraries is used for several steps of building an APK.
In the component build, the --input-libraries only needs to be the top-level
library (i.e. libcontent_shell_content_view). This will then use readelf to
inspect the shared libraries and determine the full list of (non-system)
libraries that should be included in the APK.
"""
# TODO(cjhopman): See if we can expose the list of library dependencies from
# gyp, rather than calculating it ourselves.
# http://crbug.com/225558
import optparse
import os
import re
import sys
from util import build_utils
_readelf = None
_library_dirs = None
_library_re = re.compile(
'.*NEEDED.*Shared library: \[(?P<library_name>[\w/.]+)\]')
def SetReadelfPath(path):
global _readelf
_readelf = path
def SetLibraryDirs(dirs):
global _library_dirs
_library_dirs = dirs
def FullLibraryPath(library_name):
assert _library_dirs is not None
for directory in _library_dirs:
path = '%s/%s' % (directory, library_name)
if os.path.exists(path):
return path
return library_name
def IsSystemLibrary(library_name):
# If the library doesn't exist in the libraries directory, assume that it is
# an Android system library.
return not os.path.exists(FullLibraryPath(library_name))
def CallReadElf(library_or_executable):
assert _readelf is not None
readelf_cmd = [_readelf,
'-d',
FullLibraryPath(library_or_executable)]
return build_utils.CheckOutput(readelf_cmd)
def GetDependencies(library_or_executable):
elf = CallReadElf(library_or_executable)
return set(_library_re.findall(elf))
def GetNonSystemDependencies(library_name):
all_deps = GetDependencies(FullLibraryPath(library_name))
return set((lib for lib in all_deps if not IsSystemLibrary(lib)))
def GetSortedTransitiveDependencies(libraries):
"""Returns all transitive library dependencies in dependency order."""
return build_utils.GetSortedTransitiveDependencies(
libraries, GetNonSystemDependencies)
def GetSortedTransitiveDependenciesForBinaries(binaries):
if binaries[0].endswith('.so'):
libraries = [os.path.basename(lib) for lib in binaries]
else:
assert len(binaries) == 1
all_deps = GetDependencies(binaries[0])
libraries = [lib for lib in all_deps if not IsSystemLibrary(lib)]
return GetSortedTransitiveDependencies(libraries)
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--input-libraries',
help='A list of top-level input libraries.')
parser.add_option('--libraries-dir',
help='The directory which contains shared libraries.')
parser.add_option('--readelf', help='Path to the readelf binary.')
parser.add_option('--output', help='Path to the generated .json file.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
SetReadelfPath(options.readelf)
SetLibraryDirs(options.libraries_dir.split(','))
libraries = build_utils.ParseGypList(options.input_libraries)
if len(libraries):
libraries = GetSortedTransitiveDependenciesForBinaries(libraries)
# Convert to "base" library names: e.g. libfoo.so -> foo
java_libraries_list = (
'{%s}' % ','.join(['"%s"' % s[3:-3] for s in libraries]))
build_utils.WriteJson(
{'libraries': libraries, 'java_libraries_list': java_libraries_list},
options.output,
only_if_changed=True)
if options.stamp:
build_utils.Touch(options.stamp)
if options.depfile:
print libraries
build_utils.WriteDepfile(
options.depfile,
libraries + build_utils.GetPythonDependencies())
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
pradyu1993/scikit-learn | examples/neighbors/plot_classification.py | 7 | 1724 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print __doc__
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
pl.axis('tight')
pl.show()
| bsd-3-clause |
dan1/horizon-proto | openstack_dashboard/dashboards/project/routers/ports/forms.py | 35 | 8126 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class AddInterface(forms.SelfHandlingForm):
subnet_id = forms.ChoiceField(label=_("Subnet"))
ip_address = forms.IPField(
label=_("IP Address (optional)"), required=False, initial="",
help_text=_("Specify an IP address for the interface "
"created (e.g. 192.168.0.254)."),
version=forms.IPv4 | forms.IPv6, mask=False)
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
failure_url = 'horizon:project:routers:detail'
def __init__(self, request, *args, **kwargs):
super(AddInterface, self).__init__(request, *args, **kwargs)
c = self.populate_subnet_id_choices(request)
self.fields['subnet_id'].choices = c
def populate_subnet_id_choices(self, request):
tenant_id = self.request.user.tenant_id
networks = []
router_subnet_ids = []
router_id = request.REQUEST.get('router_id',
self.initial.get('router_id'))
try:
networks = api.neutron.network_list_for_tenant(request, tenant_id)
if router_id:
ports = api.neutron.port_list(request, device_id=router_id)
router_subnet_ids = [fixed_ip["subnet_id"] for port in ports
for fixed_ip in port.fixed_ips]
except Exception as e:
msg = _('Failed to get network list %s') % e
LOG.info(msg)
messages.error(request, msg)
if router_id:
redirect = reverse(self.failure_url, args=[router_id])
else:
redirect = reverse('horizon:project:routers:index')
exceptions.handle(request, msg, redirect=redirect)
return
choices = []
for n in networks:
net_name = n.name + ': ' if n.name else ''
choices += [(subnet.id,
'%s%s (%s)' % (net_name, subnet.cidr,
subnet.name or subnet.id))
for subnet in n['subnets']
if subnet.id not in router_subnet_ids]
if choices:
choices.insert(0, ("", _("Select Subnet")))
else:
choices.insert(0, ("", _("No subnets available")))
return choices
def handle(self, request, data):
if data['ip_address']:
port = self._add_interface_by_port(request, data)
else:
port = self._add_interface_by_subnet(request, data)
msg = _('Interface added')
if port:
msg += ' ' + port.fixed_ips[0]['ip_address']
LOG.debug(msg)
messages.success(request, msg)
return True
def _add_interface_by_subnet(self, request, data):
router_id = data['router_id']
try:
router_inf = api.neutron.router_add_interface(
request, router_id, subnet_id=data['subnet_id'])
except Exception as e:
self._handle_error(request, router_id, e)
try:
port = api.neutron.port_get(request, router_inf['port_id'])
except Exception:
# Ignore an error when port_get() since it is just
# to get an IP address for the interface.
port = None
return port
def _add_interface_by_port(self, request, data):
router_id = data['router_id']
subnet_id = data['subnet_id']
try:
subnet = api.neutron.subnet_get(request, subnet_id)
except Exception:
msg = _('Unable to get subnet "%s"') % subnet_id
self._handle_error(request, router_id, msg)
try:
ip_address = data['ip_address']
body = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_address}]}
port = api.neutron.port_create(request, **body)
except Exception as e:
self._handle_error(request, router_id, e)
try:
api.neutron.router_add_interface(request, router_id,
port_id=port.id)
except Exception as e:
self._delete_port(request, port)
self._handle_error(request, router_id, e)
return port
def _handle_error(self, request, router_id, reason):
msg = _('Failed to add_interface: %s') % reason
LOG.info(msg)
redirect = reverse(self.failure_url, args=[router_id])
exceptions.handle(request, msg, redirect=redirect)
def _delete_port(self, request, port):
try:
api.neutron.port_delete(request, port.id)
except Exception:
msg = _('Failed to delete port %s') % port.id
LOG.info(msg)
exceptions.handle(request, msg)
class SetGatewayForm(forms.SelfHandlingForm):
network_id = forms.ChoiceField(label=_("External Network"))
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
failure_url = 'horizon:project:routers:index'
def __init__(self, request, *args, **kwargs):
super(SetGatewayForm, self).__init__(request, *args, **kwargs)
c = self.populate_network_id_choices(request)
self.fields['network_id'].choices = c
def populate_network_id_choices(self, request):
search_opts = {'router:external': True}
try:
networks = api.neutron.network_list(request, **search_opts)
except Exception as e:
msg = _('Failed to get network list %s') % e
LOG.info(msg)
messages.error(request, msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
return
choices = [(network.id, network.name or network.id)
for network in networks]
if choices:
choices.insert(0, ("", _("Select network")))
else:
choices.insert(0, ("", _("No networks available")))
return choices
def handle(self, request, data):
try:
api.neutron.router_add_gateway(request,
data['router_id'],
data['network_id'])
msg = _('Gateway interface is added')
LOG.debug(msg)
messages.success(request, msg)
return True
except Exception as e:
msg = _('Failed to set gateway %s') % e
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
| apache-2.0 |
boothead/karl | karl/evolve/zodb/evolve2.py | 1 | 2263 | from datetime import datetime
from zope.component import getSiteManager
from repoze.bfg.traversal import model_path
from repoze.folder.interfaces import IObjectWillBeAddedEvent
from repoze.lemonade.content import create_content
from karl.content.interfaces import ICalendar
from karl.content.interfaces import ICalendarLayer
from karl.content.interfaces import ICalendarCategory
from karl.models.interfaces import ICatalogSearch
from karl.models.interfaces import IContent
from karl.models.subscribers import set_created
def evolve(context):
# add default category and layer to all calendars
# Prevent 'set_created' event handler from being called since it will,
# in turn, set the content_modified attribute of community which is used
# as the "Last Activity" in the user interface. We don't want this tweak
# to impact a community's last activity. This means we need to set created
# and modified on the new layers and categories ourselves.
registry = getSiteManager()
registry.adapters.unsubscribe(
(IContent, IObjectWillBeAddedEvent), None, set_created)
try:
search = ICatalogSearch(context)
default_category_name = ICalendarCategory.getTaggedValue('default_name')
default_layer_name = ICalendarLayer.getTaggedValue('default_name')
now = datetime.now()
cnt, docids, resolver = search(interfaces=[ICalendar])
for docid in docids:
calendar = resolver(docid)
default_category = create_content(ICalendarCategory, 'Default')
default_category.created = default_category.modified = now
if not default_category_name in calendar:
calendar[default_category_name] = default_category
local_layer = create_content(ICalendarLayer,
"This Calendar's Events Only", 'blue',
[model_path(default_category)])
local_layer.created = local_layer.modified = now
if not default_layer_name in calendar:
calendar[default_layer_name] = local_layer
finally:
registry.adapters.subscribe(
(IContent, IObjectWillBeAddedEvent), None, set_created)
| gpl-2.0 |
xuleiboy1234/autoTitle | tensorflow/tensorflow/contrib/keras/python/keras/__init__.py | 17 | 1926 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Keras API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import applications
from tensorflow.contrib.keras.python.keras import backend
from tensorflow.contrib.keras.python.keras import callbacks
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import datasets
from tensorflow.contrib.keras.python.keras import engine
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import layers
from tensorflow.contrib.keras.python.keras import losses
from tensorflow.contrib.keras.python.keras import metrics
from tensorflow.contrib.keras.python.keras import models
from tensorflow.contrib.keras.python.keras import optimizers
from tensorflow.contrib.keras.python.keras import preprocessing
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras import utils
from tensorflow.contrib.keras.python.keras import wrappers
from tensorflow.contrib.keras.python.keras.layers import Input
__version__ = '2.0.6-tf'
| mit |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GL/AMD/conservative_depth.py | 9 | 1568 | '''OpenGL extension AMD.conservative_depth
This module customises the behaviour of the
OpenGL.raw.GL.AMD.conservative_depth to provide a more
Python-friendly API
Overview (from the spec)
There is a common optimization for hardware accelerated implementation of
OpenGL which relies on an early depth test to be run before the fragment
shader so that the shader evaluation can be skipped if the fragment ends
up being discarded because it is occluded.
This optimization does not affect the final rendering, and is typically
possible when the fragment does not change the depth programmatically.
(i.e.: it does not write to the built-in gl_FragDepth output). There are,
however a class of operations on the depth in the shader which could
still be performed while allowing the early depth test to operate.
This extension allows the application to pass enough information to the
GL implementation to activate such optimizations safely.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/AMD/conservative_depth.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.AMD.conservative_depth import *
from OpenGL.raw.GL.AMD.conservative_depth import _EXTENSION_NAME
def glInitConservativeDepthAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | lgpl-3.0 |
StackPointCloud/libcloud | libcloud/container/providers.py | 7 | 1876 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.container.types import Provider
from libcloud.common.providers import get_driver as _get_provider_driver
from libcloud.common.providers import set_driver as _set_provider_driver
DRIVERS = {
Provider.DUMMY:
('libcloud.container.drivers.dummy', 'DummyContainerDriver'),
Provider.DOCKER:
('libcloud.container.drivers.docker', 'DockerContainerDriver'),
Provider.JOYENT:
('libcloud.container.drivers.joyent', 'JoyentContainerDriver'),
Provider.ECS:
('libcloud.container.drivers.ecs', 'ElasticContainerDriver'),
Provider.KUBERNETES:
('libcloud.container.drivers.kubernetes', 'KubernetesContainerDriver'),
Provider.RANCHER:
('libcloud.container.drivers.rancher', 'RancherContainerDriver'),
Provider.GKE:
('libcloud.container.drivers.gke', 'GKEContainerDriver')
}
def get_driver(provider):
return _get_provider_driver(drivers=DRIVERS, provider=provider)
def set_driver(provider, module, klass):
return _set_provider_driver(drivers=DRIVERS, provider=provider,
module=module, klass=klass)
| apache-2.0 |
fialakarel/haut-server | brain/logic/cmds.py | 1 | 1365 | #!/usr/bin/python3
# author: Karel Fiala
# email: fiala.karel@gmail.com
# commands
# testing
#self.setcmd("dev.haut.local", "temp", "temp.py", "")
#self.setcmd("dev.haut.local", "temp2", "temperaturex.py", "5 5")
#self.setcmd("dev.haut.local", "dev-cool", "cool.py", "")
#self.setcmd("dev.haut.local", "dev-cool2", "cool2.py", "aaa")
# real
self.setcmd("dev.haut.local", "temperature", "temperature", "28-000005e6d5be")
self.setcmd("dev.haut.local", "pir-18", "pir", "18")
#self.setcmd("dev.haut.local", "gpio-17-on", "gpio.py", "17 1")
#self.setcmd("dev.haut.local", "gpio-17-off", "gpio.py", "17 0")
self.setcmd("dev.haut.local", "gpio-17-on", "blaster", "17 0.2")
self.setcmd("dev.haut.local", "gpio-17-fadein", "fade", "17 0 0.8")
self.setcmd("dev.haut.local", "gpio-17-half", "blaster", "17 0.25")
self.setcmd("dev.haut.local", "gpio-17-off", "blaster", "17 0")
self.setcmd("dev.haut.local", "gpio-17-fadeout", "fade", "17 0.2 0")
self.setcmd("dev.haut.local", "gpio-17-fadehalf", "fade", "17 0.8 0.2")
self.setcmd("dev.haut.local", "dev-status", "status", "")
self.setcmd("bedroom1.haut.local", "bedroom1-status", "status", "")
self.setcmd("bedroom1.haut.local", "bedroom1-temp1", "temperature", "28-000006dc1ec1")
self.setcmd("bedroom1.haut.local", "heatbed-on", "gpio", "17 1")
self.setcmd("bedroom1.haut.local", "heatbed-off", "gpio", "17 0")
| mit |
hkernbach/arangodb | 3rdParty/V8/v5.7.492.77/tools/try_perf.py | 4 | 3438 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
BOTS = {
'--arm32': 'v8_arm32_perf_try',
'--linux32': 'v8_linux32_perf_try',
'--linux64': 'v8_linux64_perf_try',
'--linux64_atom': 'v8_linux64_atom_perf_try',
'--linux64_haswell': 'v8_linux64_haswell_perf_try',
'--nexus5': 'v8_nexus5_perf_try',
'--nexus7': 'v8_nexus7_perf_try',
'--nexus9': 'v8_nexus9_perf_try',
'--nexus10': 'v8_nexus10_perf_try',
}
DEFAULT_BOTS = [
'v8_arm32_perf_try',
'v8_linux32_perf_try',
'v8_linux64_haswell_perf_try',
'v8_nexus10_perf_try',
]
PUBLIC_BENCHMARKS = [
'arewefastyet',
'embenchen',
'emscripten',
'compile',
'jetstream',
'jetstream-ignition',
'jsbench',
'jstests',
'kraken_orig',
'kraken_orig-ignition',
'massive',
'memory',
'octane',
'octane-noopt',
'octane-ignition',
'octane-pr',
'octane-tf',
'octane-tf-pr',
'simdjs',
'sunspider',
'sunspider-ignition',
'unity',
'wasm',
]
V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.')
parser.add_argument('--extra-flags', default='',
help='Extra flags to be passed to the executable.')
parser.add_argument('-r', '--revision', type=str, default=None,
help='Revision (use full hash!) to use for the try job; '
'default: the revision will be determined by the '
'try server; see its waterfall for more info')
for option in sorted(BOTS):
parser.add_argument(
option, dest='bots', action='append_const', const=BOTS[option],
help='Add %s trybot.' % BOTS[option])
options = parser.parse_args()
if not options.bots:
print 'No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS)
options.bots = DEFAULT_BOTS
if not options.benchmarks:
print 'Please specify the benchmarks to run as arguments.'
return 1
for benchmark in options.benchmarks:
if benchmark not in PUBLIC_BENCHMARKS:
print ('%s not found in our benchmark list. The respective trybot might '
'fail, unless you run something this script isn\'t aware of. '
'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
print 'Proceed anyways? [Y/n] ',
answer = sys.stdin.readline().strip()
if answer != "" and answer != "Y" and answer != "y":
return 1
assert '"' not in options.extra_flags and '\'' not in options.extra_flags, (
'Invalid flag specification.')
# Ensure depot_tools are updated.
subprocess.check_output(
'gclient', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
cmd = ['git cl try -m internal.client.v8']
cmd += ['-b %s' % bot for bot in options.bots]
if options.revision: cmd += ['-r %s' % options.revision]
benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
if options.extra_flags:
cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
if __name__ == '__main__': # pragma: no cover
sys.exit(main())
| apache-2.0 |
napkindrawing/ansible | lib/ansible/modules/database/misc/kibana_plugin.py | 21 | 7064 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to manage elasticsearch shield role
# (c) 2016, Thierno IB. BARRY @barryib
# Sponsored by Polyconseil http://polyconseil.fr.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kibana_plugin
short_description: Manage Kibana plugins
description:
- Manages Kibana plugins.
version_added: "2.2"
author: Thierno IB. BARRY (@barryib)
options:
name:
description:
- Name of the plugin to install
required: True
state:
description:
- Desired state of a plugin.
required: False
choices: ["present", "absent"]
default: present
url:
description:
- Set exact URL to download the plugin from.
For local file, prefix its absolute path with file://
required: False
default: None
timeout:
description:
- "Timeout setting: 30s, 1m, 1h..."
required: False
default: 1m
plugin_bin:
description:
- Location of the plugin binary
required: False
default: /opt/kibana/bin/kibana
plugin_dir:
description:
- Your configured plugin directory specified in Kibana
required: False
default: /opt/kibana/installedPlugins/
version:
description:
- Version of the plugin to be installed.
If plugin exists with previous version, it will NOT be updated if C(force) is not set to yes
required: False
default: None
force:
description:
- Delete and re-install the plugin. Can be useful for plugins update
required: False
choices: ["yes", "no"]
default: no
'''
EXAMPLES = '''
- name: Install Elasticsearch head plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
- name: Install specific version of a plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
version: '2.3.3'
- name: Uninstall Elasticsearch head plugin
kibana_plugin:
state: absent
name: elasticsearch/marvel
'''
RETURN = '''
cmd:
description: the launched command during plugin mangement (install / remove)
returned: success
type: string
name:
description: the plugin name to install or remove
returned: success
type: string
url:
description: the url from where the plugin is installed from
returned: success
type: string
timeout:
description: the timeout for plugin download
returned: success
type: string
stdout:
description: the command stdout
returned: success
type: string
stderr:
description: the command stderr
returned: success
type: string
state:
description: the state for the managed plugin
returned: success
type: string
'''
import os
PACKAGE_STATE_MAP = dict(
present="--install",
absent="--remove"
)
def parse_plugin_repo(string):
elements = string.split("/")
# We first consider the simplest form: pluginname
repo = elements[0]
# We consider the form: username/pluginname
if len(elements) > 1:
repo = elements[1]
# remove elasticsearch- prefix
# remove es- prefix
for string in ("elasticsearch-", "es-"):
if repo.startswith(string):
return repo[len(string):]
return repo
def is_plugin_present(plugin_dir, working_dir):
return os.path.isdir(os.path.join(working_dir, plugin_dir))
def parse_error(string):
reason = "reason: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, url, timeout):
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
if url:
cmd_args.append("--url %s" % url)
if timeout:
cmd_args.append("--timeout %s" % timeout)
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
url=dict(default=None),
timeout=dict(default="1m"),
plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
version=dict(default=None),
force=dict(default="no", type="bool")
),
supports_check_mode=True,
)
name = module.params["name"]
state = module.params["state"]
url = module.params["url"]
timeout = module.params["timeout"]
plugin_bin = module.params["plugin_bin"]
plugin_dir = module.params["plugin_dir"]
version = module.params["version"]
force = module.params["force"]
present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
# skip if the state is correct
if (present and state == "present" and not force) or (state == "absent" and not present and not force):
module.exit_json(changed=False, name=name, state=state)
if (version):
name = name + '/' + version
if state == "present":
if force:
remove_plugin(module, plugin_bin, name)
changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ThePletch/ansible | test/runner/lib/git.py | 41 | 1869 | """Wrapper around git command-line tools."""
from __future__ import absolute_import, print_function
from lib.util import (
CommonConfig,
run_command,
)
class Git(object):
"""Wrapper around git command-line tools."""
def __init__(self, args):
"""
:type args: CommonConfig
"""
self.args = args
self.git = 'git'
def get_diff_names(self, args):
"""
:type args: list[str]
:rtype: list[str]
"""
cmd = ['diff', '--name-only', '--no-renames', '-z'] + args
return self.run_git_split(cmd, '\0')
def get_file_names(self, args):
"""
:type args: list[str]
:rtype: list[str]
"""
cmd = ['ls-files', '-z'] + args
return self.run_git_split(cmd, '\0')
def get_branches(self):
"""
:rtype: list[str]
"""
cmd = ['for-each-ref', 'refs/heads/', '--format', '%(refname:strip=2)']
return self.run_git_split(cmd)
def get_branch(self):
"""
:rtype: str
"""
cmd = ['symbolic-ref', '--short', 'HEAD']
return self.run_git(cmd).strip()
def get_branch_fork_point(self, branch):
"""
:type branch: str
:rtype: str
"""
cmd = ['merge-base', '--fork-point', branch]
return self.run_git(cmd).strip()
def run_git_split(self, cmd, separator=None):
"""
:type cmd: list[str]
:param separator: str | None
:rtype: list[str]
"""
output = self.run_git(cmd).strip(separator)
if len(output) == 0:
return []
return output.split(separator)
def run_git(self, cmd):
"""
:type cmd: list[str]
:rtype: str
"""
return run_command(self.args, [self.git] + cmd, capture=True, always=True)[0]
| gpl-3.0 |
birryree/servo | python/servo/bootstrap_commands.py | 15 | 16812 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
from socket import error as socket_error
import base64
import json
import os
import os.path as path
import re
import shutil
import sys
import StringIO
import tarfile
import zipfile
import urllib2
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, host_triple, BIN_SUFFIX
def download(desc, src, writer, start_byte=0):
if start_byte:
print("Resuming download of %s..." % desc)
else:
print("Downloading %s..." % desc)
dumb = (os.environ.get("TERM") == "dumb") or (not sys.stdout.isatty())
try:
req = urllib2.Request(src)
if start_byte:
req = urllib2.Request(src, headers={'Range': 'bytes={}-'.format(start_byte)})
resp = urllib2.urlopen(req)
fsize = None
if resp.info().getheader('Content-Length'):
fsize = int(resp.info().getheader('Content-Length').strip()) + start_byte
recved = start_byte
chunk_size = 8192
while True:
chunk = resp.read(chunk_size)
if not chunk:
break
recved += len(chunk)
if not dumb:
if fsize is not None:
pct = recved * 100.0 / fsize
print("\rDownloading %s: %5.1f%%" % (desc, pct), end="")
sys.stdout.flush()
writer.write(chunk)
if not dumb:
print()
except urllib2.HTTPError, e:
print("Download failed (%d): %s - %s" % (e.code, e.reason, src))
if e.code == 403:
print("No Rust compiler binary available for this platform. "
"Please see https://github.com/servo/servo/#prerequisites")
sys.exit(1)
except urllib2.URLError, e:
print("Error downloading Rust compiler: %s. The failing URL was: %s" % (e.reason, src))
sys.exit(1)
except socket_error, e:
print("Looks like there's a connectivity issue, check your Internet connection. %s" % (e))
sys.exit(1)
except KeyboardInterrupt:
writer.flush()
raise
def download_file(desc, src, dst):
tmp_path = dst + ".part"
try:
start_byte = os.path.getsize(tmp_path)
with open(tmp_path, 'ab') as fd:
download(desc, src, fd, start_byte=start_byte)
except os.error:
with open(tmp_path, 'wb') as fd:
download(desc, src, fd)
os.rename(tmp_path, dst)
def download_bytes(desc, src):
content_writer = StringIO.StringIO()
download(desc, src, content_writer)
return content_writer.getvalue()
def extract(src, dst, movedir=None):
if src.endswith(".zip"):
zipfile.ZipFile(src).extractall(dst)
else:
tarfile.open(src).extractall(dst)
if movedir:
for f in os.listdir(movedir):
frm = path.join(movedir, f)
to = path.join(dst, f)
os.rename(frm, to)
os.rmdir(movedir)
os.remove(src)
@CommandProvider
class MachCommands(CommandBase):
@Command('env',
description='Print environment setup commands',
category='bootstrap')
def env(self):
env = self.build_env()
print("export PATH=%s" % env["PATH"])
if sys.platform == "darwin":
print("export DYLD_LIBRARY_PATH=%s" % env["DYLD_LIBRARY_PATH"])
else:
print("export LD_LIBRARY_PATH=%s" % env["LD_LIBRARY_PATH"])
@Command('bootstrap',
description='Install required packages for building.',
category='bootstrap')
@CommandArgument('--interactive', "-i",
action='store_true',
help='Need to answer any (Y/n) interactive prompts.')
@CommandArgument('--android',
action='store_true',
help='Install required packages for Android')
@CommandArgument('--force', '-f',
action='store_true',
help='Force reinstall packages')
def bootstrap(self, android=False, interactive=False, force=False):
from servo.bootstrapper.bootstrap import Bootstrapper
bootstrapper = Bootstrapper(self.context)
bootstrapper.bootstrap(android=android, interactive=interactive, force=force)
@Command('bootstrap-rust',
description='Download the Rust compiler',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Force download even if a copy already exists')
@CommandArgument('--target',
action='append',
default=[],
help='Download rust stdlib for specified target')
@CommandArgument('--stable',
action='store_true',
help='Use stable rustc version')
def bootstrap_rustc(self, force=False, target=[], stable=False):
self.set_use_stable_rust(stable)
version = self.rust_version()
rust_path = self.rust_path()
rust_dir = path.join(self.context.sharedir, "rust", rust_path)
install_dir = path.join(self.context.sharedir, "rust", version)
if not force and path.exists(path.join(rust_dir, "rustc", "bin", "rustc" + BIN_SUFFIX)):
print("Rust compiler already downloaded.", end=" ")
print("Use |bootstrap-rust --force| to download again.")
else:
if path.isdir(rust_dir):
shutil.rmtree(rust_dir)
os.makedirs(rust_dir)
# The nightly Rust compiler is hosted on the nightly server under the date with a name
# rustc-nightly-HOST-TRIPLE.tar.gz, whereas the stable compiler is named
# rustc-VERSION-HOST-TRIPLE.tar.gz. We just need to pull down and extract it,
# giving a directory name that will be the same as the tarball name (rustc is
# in that directory).
if stable:
tarball = "rustc-%s-%s.tar.gz" % (version, host_triple())
else:
tarball = "%s/rustc-nightly-%s.tar.gz" % (version, host_triple())
rustc_url = "https://static-rust-lang-org.s3.amazonaws.com/dist/" + tarball
tgz_file = rust_dir + '-rustc.tar.gz'
download_file("Rust compiler", rustc_url, tgz_file)
print("Extracting Rust compiler...")
extract(tgz_file, install_dir)
print("Rust compiler ready.")
# Each Rust stdlib has a name of the form `rust-std-nightly-TRIPLE.tar.gz` for the nightly
# releases, or rust-std-VERSION-TRIPLE.tar.gz for stable releases, with
# a directory of the name `rust-std-TRIPLE` inside and then a `lib` directory.
# This `lib` directory needs to be extracted and merged with the `rustc/lib`
# directory from the host compiler above.
nightly_suffix = "" if stable else "-nightly"
stable_version = "-{}".format(version) if stable else ""
lib_dir = path.join(install_dir,
"rustc{}{}-{}".format(nightly_suffix, stable_version, host_triple()),
"rustc", "lib", "rustlib")
# ensure that the libs for the host's target is downloaded
host_target = host_triple()
if host_target not in target:
target.append(host_target)
for target_triple in target:
target_lib_dir = path.join(lib_dir, target_triple)
if path.exists(target_lib_dir):
# No need to check for force. If --force the directory is already deleted
print("Rust lib for target {} already downloaded.".format(target_triple), end=" ")
print("Use |bootstrap-rust --force| to download again.")
continue
if self.use_stable_rust():
std_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/rust-std-%s-%s.tar.gz"
% (version, target_triple))
tgz_file = install_dir + ('rust-std-%s-%s.tar.gz' % (version, target_triple))
else:
std_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/%s/rust-std-nightly-%s.tar.gz"
% (version, target_triple))
tgz_file = install_dir + ('rust-std-nightly-%s.tar.gz' % target_triple)
download_file("Host rust library for target %s" % target_triple, std_url, tgz_file)
print("Extracting Rust stdlib for target %s..." % target_triple)
extract(tgz_file, install_dir)
shutil.copytree(path.join(install_dir,
"rust-std%s%s-%s" % (nightly_suffix, stable_version, target_triple),
"rust-std-%s" % target_triple, "lib", "rustlib", target_triple),
path.join(install_dir,
"rustc%s%s-%s" % (nightly_suffix, stable_version, host_triple()),
"rustc", "lib", "rustlib", target_triple))
shutil.rmtree(path.join(install_dir,
"rust-std%s%s-%s" % (nightly_suffix, stable_version, target_triple)))
print("Rust {} libs ready.".format(target_triple))
@Command('bootstrap-rust-docs',
description='Download the Rust documentation',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Force download even if docs already exist')
def bootstrap_rustc_docs(self, force=False):
self.ensure_bootstrapped()
rust_root = self.config["tools"]["rust-root"]
docs_dir = path.join(rust_root, "doc")
if not force and path.exists(docs_dir):
print("Rust docs already downloaded.", end=" ")
print("Use |bootstrap-rust-docs --force| to download again.")
return
if path.isdir(docs_dir):
shutil.rmtree(docs_dir)
docs_name = self.rust_path().replace("rustc-", "rust-docs-")
docs_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/rust-docs-nightly-%s.tar.gz"
% host_triple())
tgz_file = path.join(rust_root, 'doc.tar.gz')
download_file("Rust docs", docs_url, tgz_file)
print("Extracting Rust docs...")
temp_dir = path.join(rust_root, "temp_docs")
if path.isdir(temp_dir):
shutil.rmtree(temp_dir)
extract(tgz_file, temp_dir)
shutil.move(path.join(temp_dir, docs_name.split("/")[1],
"rust-docs", "share", "doc", "rust", "html"),
docs_dir)
shutil.rmtree(temp_dir)
print("Rust docs ready.")
@Command('bootstrap-cargo',
description='Download the Cargo build tool',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Force download even if cargo already exists')
def bootstrap_cargo(self, force=False):
cargo_dir = path.join(self.context.sharedir, "cargo",
self.cargo_build_id())
if not force and path.exists(path.join(cargo_dir, "cargo", "bin", "cargo" + BIN_SUFFIX)):
print("Cargo already downloaded.", end=" ")
print("Use |bootstrap-cargo --force| to download again.")
return
if path.isdir(cargo_dir):
shutil.rmtree(cargo_dir)
os.makedirs(cargo_dir)
tgz_file = "cargo-nightly-%s.tar.gz" % host_triple()
nightly_url = "https://static-rust-lang-org.s3.amazonaws.com/cargo-dist/%s/%s" % \
(self.cargo_build_id(), tgz_file)
download_file("Cargo nightly", nightly_url, tgz_file)
print("Extracting Cargo nightly...")
nightly_dir = path.join(cargo_dir,
path.basename(tgz_file).replace(".tar.gz", ""))
extract(tgz_file, cargo_dir, movedir=nightly_dir)
print("Cargo ready.")
@Command('update-hsts-preload',
description='Download the HSTS preload list',
category='bootstrap')
def bootstrap_hsts_preload(self, force=False):
preload_filename = "hsts_preload.json"
preload_path = path.join(self.context.topdir, "resources")
chromium_hsts_url = "https://chromium.googlesource.com/chromium/src" + \
"/net/+/master/http/transport_security_state_static.json?format=TEXT"
try:
content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url)
except urllib2.URLError:
print("Unable to download chromium HSTS preload list; are you connected to the internet?")
sys.exit(1)
content_decoded = base64.b64decode(content_base64)
# The chromium "json" has single line comments in it which, of course,
# are non-standard/non-valid json. Simply strip them out before parsing
content_json = re.sub(r'(^|\s+)//.*$', '', content_decoded, flags=re.MULTILINE)
try:
pins_and_static_preloads = json.loads(content_json)
entries = {
"entries": [
{
"host": e["name"],
"include_subdomains": e.get("include_subdomains", False)
}
for e in pins_and_static_preloads["entries"]
]
}
with open(path.join(preload_path, preload_filename), 'w') as fd:
json.dump(entries, fd, indent=4)
except ValueError, e:
print("Unable to parse chromium HSTS preload list, has the format changed?")
sys.exit(1)
@Command('update-pub-domains',
description='Download the public domains list and update resources/public_domains.txt',
category='bootstrap')
def bootstrap_pub_suffix(self, force=False):
list_url = "https://publicsuffix.org/list/public_suffix_list.dat"
dst_filename = path.join(self.context.topdir, "resources", "public_domains.txt")
not_implemented_case = re.compile(r'^[^*]+\*')
try:
content = download_bytes("Public suffix list", list_url)
except urllib2.URLError:
print("Unable to download the public suffix list; are you connected to the internet?")
sys.exit(1)
lines = [l.strip() for l in content.decode("utf8").split("\n")]
suffixes = [l for l in lines if not l.startswith("//") and not l == ""]
with open(dst_filename, "wb") as fo:
for suffix in suffixes:
if not_implemented_case.match(suffix):
print("Warning: the new list contains a case that servo can't handle: %s" % suffix)
fo.write(suffix.encode("idna") + "\n")
@Command('clean-nightlies',
description='Clean unused nightly builds of Rust and Cargo',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Actually remove stuff')
def clean_nightlies(self, force=False):
rust_current = self.rust_path().split('/')[0]
cargo_current = self.cargo_build_id()
print("Current Rust version: " + rust_current)
print("Current Cargo version: " + cargo_current)
removing_anything = False
for current, base in [(rust_current, "rust"), (cargo_current, "cargo")]:
base = path.join(self.context.sharedir, base)
for name in os.listdir(base):
if name != current:
removing_anything = True
name = path.join(base, name)
if force:
print("Removing " + name)
if os.path.isdir(name):
shutil.rmtree(name)
else:
os.remove(name)
else:
print("Would remove " + name)
if not removing_anything:
print("Nothing to remove.")
elif not force:
print("Nothing done. "
"Run `./mach clean-nightlies -f` to actually remove.")
| mpl-2.0 |
kellinm/blivet | blivet/populator.py | 1 | 70277 | # populator.py
# Backend code for populating a DeviceTree.
#
# Copyright (C) 2009-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU Lesser General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY expressed or implied, including the implied
# warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU Lesser General Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with this
# program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks
# that are incorporated in the source code or documentation are not subject
# to the GNU Lesser General Public License and may only be used or
# replicated with the express permission of Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
import os
import re
import shutil
import pprint
import copy
import parted
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
from .errors import CorruptGPTError, DeviceError, DeviceTreeError, DiskLabelScanError, DuplicateVGError, FSError, InvalidDiskLabelError, LUKSError
from .devices import BTRFSSubVolumeDevice, BTRFSVolumeDevice, BTRFSSnapShotDevice
from .devices import DASDDevice, DMDevice, DMLinearDevice, DMRaidArrayDevice, DiskDevice
from .devices import FcoeDiskDevice, FileDevice, LoopDevice, LUKSDevice
from .devices import LVMLogicalVolumeDevice, LVMVolumeGroupDevice
from .devices import LVMThinPoolDevice, LVMThinLogicalVolumeDevice
from .devices import LVMSnapShotDevice, LVMThinSnapShotDevice
from .devices import MDRaidArrayDevice, MDBiosRaidArrayDevice
from .devices import MDContainerDevice
from .devices import MultipathDevice, OpticalDevice
from .devices import PartitionDevice, ZFCPDiskDevice, iScsiDiskDevice
from .devices import devicePathToName
from .devices.lvm import get_internal_lv_class
from . import formats
from .devicelibs import lvm
from .devicelibs import raid
from . import udev
from . import util
from .util import open # pylint: disable=redefined-builtin
from .flags import flags
from .storage_log import log_exception_info, log_method_call
from .i18n import _
from .size import Size
import logging
log = logging.getLogger("blivet")
def parted_exn_handler(exn_type, exn_options, exn_msg):
""" Answer any of parted's yes/no questions in the affirmative.
This allows us to proceed with partially corrupt gpt disklabels.
"""
log.info("parted exception: %s", exn_msg)
ret = parted.EXCEPTION_RESOLVE_UNHANDLED
if exn_type == parted.EXCEPTION_TYPE_ERROR and \
exn_options == parted.EXCEPTION_OPT_YES_NO:
ret = parted.EXCEPTION_RESOLVE_YES
return ret
class Populator(object):
def __init__(self, devicetree=None, conf=None, passphrase=None,
luksDict=None, iscsi=None, dasd=None):
"""
:keyword conf: storage discovery configuration
:type conf: :class:`~.StorageDiscoveryConfig`
:keyword passphrase: default LUKS passphrase
:keyword luksDict: a dict with UUID keys and passphrase values
:type luksDict: dict
:keyword iscsi: ISCSI control object
:type iscsi: :class:`~.iscsi.iscsi`
:keyword dasd: DASD control object
:type dasd: :class:`~.dasd.DASD`
"""
self.devicetree = devicetree
# indicates whether or not the tree has been fully populated
self.populated = False
self.exclusiveDisks = getattr(conf, "exclusiveDisks", [])
self.ignoredDisks = getattr(conf, "ignoredDisks", [])
self.iscsi = iscsi
self.dasd = dasd
self.diskImages = {}
images = getattr(conf, "diskImages", {})
if images:
# this will overwrite self.exclusiveDisks
self.setDiskImages(images)
# protected device specs as provided by the user
self.protectedDevSpecs = getattr(conf, "protectedDevSpecs", [])
self.liveBackingDevice = None
# names of protected devices at the time of tree population
self.protectedDevNames = []
self.unusedRaidMembers = []
self.__passphrases = []
if passphrase:
self.__passphrases.append(passphrase)
self.__luksDevs = {}
if luksDict and isinstance(luksDict, dict):
self.__luksDevs = luksDict
self.__passphrases.extend([p for p in luksDict.values() if p])
self._cleanup = False
def setDiskImages(self, images):
""" Set the disk images and reflect them in exclusiveDisks.
:param images: dict with image name keys and filename values
:type images: dict
.. note::
Disk images are automatically exclusive. That means that, in the
presence of disk images, any local storage not associated with
the disk images is ignored.
"""
self.diskImages = images
# disk image files are automatically exclusive
self.exclusiveDisks = list(self.diskImages.keys())
def addIgnoredDisk(self, disk):
self.ignoredDisks.append(disk)
lvm.lvm_cc_addFilterRejectRegexp(disk)
def isIgnored(self, info):
""" Return True if info is a device we should ignore.
:param info: udevdb device entry
:type info: dict
:returns: whether the device will be ignored
:rtype: bool
"""
sysfs_path = udev.device_get_sysfs_path(info)
name = udev.device_get_name(info)
if not sysfs_path:
return None
# Special handling for mdraid external metadata sets (mdraid BIOSRAID):
# 1) The containers are intermediate devices which will never be
# in exclusiveDisks
# 2) Sets get added to exclusive disks with their dmraid set name by
# the filter ui. Note that making the ui use md names instead is not
# possible as the md names are simpy md# and we cannot predict the #
if udev.device_is_md(info) and \
udev.device_get_md_level(info) == "container":
return False
if udev.device_get_md_container(info) and \
udev.device_is_md(info) and \
udev.device_get_md_name(info):
md_name = udev.device_get_md_name(info)
# mdadm may have appended _<digit>+ if the current hostname
# does not match the one in the array metadata
alt_name = re.sub(r"_\d+$", "", md_name)
raw_pattern = "isw_[a-z]*_%s"
# XXX FIXME: This is completely insane.
for i in range(0, len(self.exclusiveDisks)):
if re.match(raw_pattern % md_name, self.exclusiveDisks[i]) or \
re.match(raw_pattern % alt_name, self.exclusiveDisks[i]):
self.exclusiveDisks[i] = name
return False
# never ignore mapped disk images. if you don't want to use them,
# don't specify them in the first place
if udev.device_is_dm_anaconda(info) or udev.device_is_dm_livecd(info):
return False
# Ignore loop and ram devices, we normally already skip these in
# udev.py: enumerate_block_devices(), but we can still end up trying
# to add them to the tree when they are slaves of other devices, this
# happens for example with the livecd
if name.startswith("ram"):
return True
if name.startswith("loop"):
# ignore loop devices unless they're backed by a file
return (not blockdev.loop.get_backing_file(name))
# FIXME: check for virtual devices whose slaves are on the ignore list
def _isIgnoredDisk(self, disk):
return self.devicetree._isIgnoredDisk(disk)
def udevDeviceIsDisk(self, info):
""" Return True if the udev device looks like a disk.
:param info: udevdb device entry
:type info: dict
:returns: whether the device is a disk
:rtype: bool
We want exclusiveDisks to operate on anything that could be
considered a directly usable disk, ie: fwraid array, mpath, or disk.
Unfortunately, since so many things are represented as disks by
udev/sysfs, we have to define what is a disk in terms of what is
not a disk.
"""
return (udev.device_is_disk(info) and
not (udev.device_is_cdrom(info) or
udev.device_is_partition(info) or
udev.device_is_dm_partition(info) or
udev.device_is_dm_lvm(info) or
udev.device_is_dm_crypt(info) or
(udev.device_is_md(info) and
not udev.device_get_md_container(info))))
def _addSlaveDevices(self, info):
""" Add all slaves of a device, raising DeviceTreeError on failure.
:param :class:`pyudev.Device` info: the device's udev info
:raises: :class:`~.errors.DeviceTreeError if no slaves are found or
if we fail to add any slave
:returns: a list of slave devices
:rtype: list of :class:`~.StorageDevice`
"""
name = udev.device_get_name(info)
sysfs_path = udev.device_get_sysfs_path(info)
slave_dir = os.path.normpath("%s/slaves" % sysfs_path)
slave_names = os.listdir(slave_dir)
slave_devices = []
if not slave_names:
log.error("no slaves found for %s", name)
raise DeviceTreeError("no slaves found for device %s" % name)
for slave_name in slave_names:
path = os.path.normpath("%s/%s" % (slave_dir, slave_name))
slave_info = udev.get_device(os.path.realpath(path))
# cciss in sysfs is "cciss!cXdYpZ" but we need "cciss/cXdYpZ"
slave_name = udev.device_get_name(slave_info).replace("!", "/")
if not slave_info:
log.warning("unable to get udev info for %s", slave_name)
slave_dev = self.getDeviceByName(slave_name)
if not slave_dev and slave_info:
# we haven't scanned the slave yet, so do it now
self.addUdevDevice(slave_info)
slave_dev = self.getDeviceByName(slave_name)
if slave_dev is None:
if udev.device_is_dm_lvm(info):
if slave_name not in self.devicetree.lvInfo:
# we do not expect hidden lvs to be in the tree
continue
# if the current slave is still not in
# the tree, something has gone wrong
log.error("failure scanning device %s: could not add slave %s", name, slave_name)
msg = "failed to add slave %s of device %s" % (slave_name,
name)
raise DeviceTreeError(msg)
slave_devices.append(slave_dev)
return slave_devices
def addUdevLVDevice(self, info):
name = udev.device_get_name(info)
log_method_call(self, name=name)
vg_name = udev.device_get_lv_vg_name(info)
device = self.getDeviceByName(vg_name, hidden=True)
if device and not isinstance(device, LVMVolumeGroupDevice):
log.warning("found non-vg device with name %s", vg_name)
device = None
self._addSlaveDevices(info)
# LVM provides no means to resolve conflicts caused by duplicated VG
# names, so we're just being optimistic here. Woo!
vg_name = udev.device_get_lv_vg_name(info)
vg_device = self.getDeviceByName(vg_name)
if not vg_device:
log.error("failed to find vg '%s' after scanning pvs", vg_name)
return self.getDeviceByName(name)
def addUdevDMDevice(self, info):
name = udev.device_get_name(info)
log_method_call(self, name=name)
sysfs_path = udev.device_get_sysfs_path(info)
slave_devices = self._addSlaveDevices(info)
device = self.getDeviceByName(name)
# if this is a luks device whose map name is not what we expect,
# fix up the map name and see if that sorts us out
handle_luks = (udev.device_is_dm_luks(info) and
(self._cleanup or not flags.installer_mode))
if device is None and handle_luks and slave_devices:
slave_dev = slave_devices[0]
slave_dev.format.mapName = name
slave_info = udev.get_device(slave_dev.sysfsPath)
self.handleUdevLUKSFormat(slave_info, slave_dev)
# try once more to get the device
device = self.getDeviceByName(name)
# create a device for the livecd OS image(s)
if device is None and udev.device_is_dm_livecd(info):
device = DMDevice(name, dmUuid=info.get('DM_UUID'),
sysfsPath=sysfs_path, exists=True,
parents=[slave_devices[0]])
device.protected = True
device.controllable = False
self.devicetree._addDevice(device)
# if we get here, we found all of the slave devices and
# something must be wrong -- if all of the slaves are in
# the tree, this device should be as well
if device is None:
lvm.lvm_cc_addFilterRejectRegexp(name)
log.warning("ignoring dm device %s", name)
return device
def addUdevMultiPathDevice(self, info):
name = udev.device_get_name(info)
log_method_call(self, name=name)
slave_devices = self._addSlaveDevices(info)
device = None
if slave_devices:
try:
serial = info["DM_UUID"].split("-", 1)[1]
except (IndexError, AttributeError):
log.error("multipath device %s has no DM_UUID", name)
raise DeviceTreeError("multipath %s has no DM_UUID" % name)
device = MultipathDevice(name, parents=slave_devices,
sysfsPath=udev.device_get_sysfs_path(info),
serial=serial)
self.devicetree._addDevice(device)
return device
def addUdevMDDevice(self, info):
name = udev.device_get_md_name(info)
log_method_call(self, name=name)
self._addSlaveDevices(info)
# try to get the device again now that we've got all the slaves
device = self.getDeviceByName(name, incomplete=flags.allow_imperfect_devices)
if device is None:
try:
uuid = udev.device_get_md_uuid(info)
except KeyError:
log.warning("failed to obtain uuid for mdraid device")
else:
device = self.getDeviceByUuid(uuid, incomplete=flags.allow_imperfect_devices)
if device:
# update the device instance with the real name in case we had to
# look it up by something other than name
device.name = name
else:
# if we get here, we found all of the slave devices and
# something must be wrong -- if all of the slaves are in
# the tree, this device should be as well
if name is None:
name = udev.device_get_name(info)
path = "/dev/" + name
else:
path = "/dev/md/" + name
log.error("failed to scan md array %s", name)
try:
blockdev.md.deactivate(path)
except blockdev.MDRaidError:
log.error("failed to stop broken md array %s", name)
return device
def addUdevPartitionDevice(self, info, disk=None):
name = udev.device_get_name(info)
log_method_call(self, name=name)
sysfs_path = udev.device_get_sysfs_path(info)
if name.startswith("md"):
name = blockdev.md.name_from_node(name)
device = self.getDeviceByName(name)
if device:
return device
if disk is None:
disk_name = os.path.basename(os.path.dirname(sysfs_path))
disk_name = disk_name.replace('!','/')
if disk_name.startswith("md"):
disk_name = blockdev.md.name_from_node(disk_name)
disk = self.getDeviceByName(disk_name)
if disk is None:
# create a device instance for the disk
new_info = udev.get_device(os.path.dirname(sysfs_path))
if new_info:
self.addUdevDevice(new_info)
disk = self.getDeviceByName(disk_name)
if disk is None:
# if the current device is still not in
# the tree, something has gone wrong
log.error("failure scanning device %s", disk_name)
lvm.lvm_cc_addFilterRejectRegexp(name)
return
if not disk.partitioned:
# Ignore partitions on:
# - devices we do not support partitioning of, like logical volumes
# - devices that do not have a usable disklabel
# - devices that contain disklabels made by isohybrid
#
if disk.partitionable and \
disk.format.type != "iso9660" and \
not disk.format.hidden and \
not self._isIgnoredDisk(disk):
if info.get("ID_PART_TABLE_TYPE") == "gpt":
msg = "corrupt gpt disklabel on disk %s" % disk.name
cls = CorruptGPTError
else:
msg = "failed to scan disk %s" % disk.name
cls = DiskLabelScanError
raise cls(msg)
# there's no need to filter partitions on members of multipaths or
# fwraid members from lvm since multipath and dmraid are already
# active and lvm should therefore know to ignore them
if not disk.format.hidden:
lvm.lvm_cc_addFilterRejectRegexp(name)
log.debug("ignoring partition %s on %s", name, disk.format.type)
return
device = None
try:
device = PartitionDevice(name, sysfsPath=sysfs_path,
major=udev.device_get_major(info),
minor=udev.device_get_minor(info),
exists=True, parents=[disk])
except DeviceError as e:
# corner case sometime the kernel accepts a partition table
# which gets rejected by parted, in this case we will
# prompt to re-initialize the disk, so simply skip the
# faulty partitions.
# XXX not sure about this
log.error("Failed to instantiate PartitionDevice: %s", e)
return
self.devicetree._addDevice(device)
return device
def addUdevDiskDevice(self, info):
name = udev.device_get_name(info)
log_method_call(self, name=name)
sysfs_path = udev.device_get_sysfs_path(info)
serial = udev.device_get_serial(info)
bus = udev.device_get_bus(info)
vendor = util.get_sysfs_attr(sysfs_path, "device/vendor")
model = util.get_sysfs_attr(sysfs_path, "device/model")
kwargs = { "serial": serial, "vendor": vendor, "model": model, "bus": bus }
if udev.device_is_iscsi(info) and not self._cleanup:
diskType = iScsiDiskDevice
initiator = udev.device_get_iscsi_initiator(info)
target = udev.device_get_iscsi_name(info)
address = udev.device_get_iscsi_address(info)
port = udev.device_get_iscsi_port(info)
nic = udev.device_get_iscsi_nic(info)
kwargs["initiator"] = initiator
if initiator == self.iscsi.initiator:
node = self.iscsi.getNode(target, address, port, nic)
kwargs["node"] = node
kwargs["ibft"] = node in self.iscsi.ibftNodes
kwargs["nic"] = self.iscsi.ifaces.get(node.iface, node.iface)
log.info("%s is an iscsi disk", name)
else:
# qla4xxx partial offload
kwargs["node"] = None
kwargs["ibft"] = False
kwargs["nic"] = "offload:not_accessible_via_iscsiadm"
kwargs["fw_address"] = address
kwargs["fw_port"] = port
kwargs["fw_name"] = name
elif udev.device_is_fcoe(info):
diskType = FcoeDiskDevice
kwargs["nic"] = udev.device_get_fcoe_nic(info)
kwargs["identifier"] = udev.device_get_fcoe_identifier(info)
log.info("%s is an fcoe disk", name)
elif udev.device_get_md_container(info):
name = udev.device_get_md_name(info)
diskType = MDBiosRaidArrayDevice
parentPath = udev.device_get_md_container(info)
parentName = devicePathToName(parentPath)
container = self.getDeviceByName(parentName)
if not container:
parentSysName = blockdev.md.node_from_name(parentName)
container_sysfs = "/sys/class/block/" + parentSysName
container_info = udev.get_device(container_sysfs)
if not container_info:
log.error("failed to find md container %s at %s",
parentName, container_sysfs)
return
self.addUdevDevice(container_info)
container = self.getDeviceByName(parentName)
if not container:
log.error("failed to scan md container %s", parentName)
return
kwargs["parents"] = [container]
kwargs["level"] = udev.device_get_md_level(info)
kwargs["memberDevices"] = udev.device_get_md_devices(info)
kwargs["uuid"] = udev.device_get_md_uuid(info)
kwargs["exists"] = True
del kwargs["model"]
del kwargs["serial"]
del kwargs["vendor"]
del kwargs["bus"]
elif udev.device_is_dasd(info) and not self._cleanup:
diskType = DASDDevice
kwargs["busid"] = udev.device_get_dasd_bus_id(info)
kwargs["opts"] = {}
for attr in ['readonly', 'use_diag', 'erplog', 'failfast']:
kwargs["opts"][attr] = udev.device_get_dasd_flag(info, attr)
log.info("%s is a dasd device", name)
elif udev.device_is_zfcp(info):
diskType = ZFCPDiskDevice
for attr in ['hba_id', 'wwpn', 'fcp_lun']:
kwargs[attr] = udev.device_get_zfcp_attribute(info, attr=attr)
log.info("%s is a zfcp device", name)
else:
diskType = DiskDevice
log.info("%s is a disk", name)
device = diskType(name,
major=udev.device_get_major(info),
minor=udev.device_get_minor(info),
sysfsPath=sysfs_path, **kwargs)
if diskType == DASDDevice:
self.dasd.append(device)
self.devicetree._addDevice(device)
return device
def addUdevOpticalDevice(self, info):
log_method_call(self)
# XXX should this be RemovableDevice instead?
#
# Looks like if it has ID_INSTANCE=0:1 we can ignore it.
device = OpticalDevice(udev.device_get_name(info),
major=udev.device_get_major(info),
minor=udev.device_get_minor(info),
sysfsPath=udev.device_get_sysfs_path(info),
vendor=udev.device_get_vendor(info),
model=udev.device_get_model(info))
self.devicetree._addDevice(device)
return device
def addUdevLoopDevice(self, info):
name = udev.device_get_name(info)
log_method_call(self, name=name)
sysfs_path = udev.device_get_sysfs_path(info)
sys_file = "%s/loop/backing_file" % sysfs_path
backing_file = open(sys_file).read().strip()
file_device = self.getDeviceByName(backing_file)
if not file_device:
file_device = FileDevice(backing_file, exists=True)
self.devicetree._addDevice(file_device)
device = LoopDevice(name,
parents=[file_device],
sysfsPath=sysfs_path,
exists=True)
if not self._cleanup or file_device not in self.diskImages.values():
# don't allow manipulation of loop devices other than those
# associated with disk images, and then only during cleanup
file_device.controllable = False
device.controllable = False
self.devicetree._addDevice(device)
return device
def addUdevDevice(self, info, updateOrigFmt=False):
"""
:param :class:`pyudev.Device` info: udev info for the device
:keyword bool updateOrigFmt: update original format unconditionally
If a device is added to the tree based on info its original format
will be saved after the format has been detected. If the device
that corresponds to info is already in the tree, its original format
will not be updated unless updateOrigFmt is True.
"""
name = udev.device_get_name(info)
log_method_call(self, name=name, info=pprint.pformat(dict(info)))
uuid = udev.device_get_uuid(info)
sysfs_path = udev.device_get_sysfs_path(info)
# make sure this device was not scheduled for removal and also has not
# been hidden
removed = [a.device for a in self.devicetree.actions.find(
action_type="destroy",
object_type="device")]
for ignored in removed + self.devicetree._hidden:
if (sysfs_path and ignored.sysfsPath == sysfs_path) or \
(uuid and uuid in (ignored.uuid, ignored.format.uuid)):
if ignored in removed:
reason = "removed"
else:
reason = "hidden"
log.debug("skipping %s device %s", reason, name)
return
# make sure we note the name of every device we see
if name not in self.names:
self.names.append(name)
if self.isIgnored(info):
log.info("ignoring %s (%s)", name, sysfs_path)
if name not in self.ignoredDisks:
self.addIgnoredDisk(name)
return
log.info("scanning %s (%s)...", name, sysfs_path)
device = self.getDeviceByName(name)
if device is None and udev.device_is_md(info):
# If the md name is None, then some udev info is missing. Likely,
# this is because the array is degraded, and mdadm has deactivated
# it. Try to activate it and re-get the udev info.
if flags.allow_imperfect_devices and udev.device_get_md_name(info) is None:
devname = udev.device_get_devname(info)
if devname:
try:
blockdev.md.run(devname)
except blockdev.MDRaidError as e:
log.warning("Failed to start possibly degraded md array: %s", e)
else:
udev.settle()
info = udev.get_device(sysfs_path)
else:
log.warning("Failed to get devname for possibly degraded md array.")
md_name = udev.device_get_md_name(info)
if md_name is None:
log.warning("No name for possibly degraded md array.")
else:
device = self.getDeviceByName(md_name, incomplete=flags.allow_imperfect_devices)
if device and not isinstance(device, MDRaidArrayDevice):
log.warning("Found device %s, but it turns out not be an md array device after all.", device.name)
device = None
if device and device.isDisk and \
blockdev.mpath.is_mpath_member(device.path):
# newly added device (eg iSCSI) could make this one a multipath member
if device.format and device.format.type != "multipath_member":
log.debug("%s newly detected as multipath member, dropping old format and removing kids", device.name)
# remove children from tree so that we don't stumble upon them later
for child in self.devicetree.getChildren(device):
self.devicetree.recursiveRemove(child, actions=False)
device.format = None
#
# The first step is to either look up or create the device
#
device_added = True
if device:
device_added = False
elif udev.device_is_loop(info):
log.info("%s is a loop device", name)
device = self.addUdevLoopDevice(info)
elif udev.device_is_dm_mpath(info) and \
not udev.device_is_dm_partition(info):
log.info("%s is a multipath device", name)
device = self.addUdevMultiPathDevice(info)
elif udev.device_is_dm_lvm(info):
log.info("%s is an lvm logical volume", name)
device = self.addUdevLVDevice(info)
elif udev.device_is_dm(info):
log.info("%s is a device-mapper device", name)
device = self.addUdevDMDevice(info)
elif udev.device_is_md(info) and not udev.device_get_md_container(info):
log.info("%s is an md device", name)
device = self.addUdevMDDevice(info)
elif udev.device_is_cdrom(info):
log.info("%s is a cdrom", name)
device = self.addUdevOpticalDevice(info)
elif udev.device_is_disk(info):
device = self.addUdevDiskDevice(info)
elif udev.device_is_partition(info):
log.info("%s is a partition", name)
device = self.addUdevPartitionDevice(info)
else:
log.error("Unknown block device type for: %s", name)
return
if not device:
log.debug("no device obtained for %s", name)
return
# If this device is read-only, mark it as such now.
if self.udevDeviceIsDisk(info) and \
util.get_sysfs_attr(udev.device_get_sysfs_path(info), 'ro') == '1':
device.readonly = True
# If this device is protected, mark it as such now. Once the tree
# has been populated, devices' protected attribute is how we will
# identify protected devices.
if device.name in self.protectedDevNames:
device.protected = True
# if this is the live backing device we want to mark its parents
# as protected also
if device.name == self.liveBackingDevice:
for parent in device.parents:
parent.protected = True
# If we just added a multipath or fwraid disk that is in exclusiveDisks
# we have to make sure all of its members are in the list too.
mdclasses = (DMRaidArrayDevice, MDRaidArrayDevice, MultipathDevice)
if device.isDisk and isinstance(device, mdclasses):
if device.name in self.exclusiveDisks:
for parent in device.parents:
if parent.name not in self.exclusiveDisks:
self.exclusiveDisks.append(parent.name)
log.info("got device: %r", device)
# now handle the device's formatting
self.handleUdevDeviceFormat(info, device)
if device_added or updateOrigFmt:
device.originalFormat = copy.deepcopy(device.format)
device.deviceLinks = udev.device_get_symlinks(info)
def handleUdevDiskLabelFormat(self, info, device):
disklabel_type = udev.device_get_disklabel_type(info)
log_method_call(self, device=device.name, label_type=disklabel_type)
# if there is no disklabel on the device
# blkid doesn't understand dasd disklabels, so bypass for dasd
if disklabel_type is None and not \
(device.isDisk and udev.device_is_dasd(info)):
log.debug("device %s does not contain a disklabel", device.name)
return
if device.partitioned:
# this device is already set up
log.debug("disklabel format on %s already set up", device.name)
return
try:
device.setup()
except Exception: # pylint: disable=broad-except
log_exception_info(log.warning, "setup of %s failed, aborting disklabel handler", [device.name])
return
# special handling for unsupported partitioned devices
if not device.partitionable:
try:
fmt = formats.getFormat("disklabel",
device=device.path,
labelType=disklabel_type,
exists=True)
except InvalidDiskLabelError:
log.warning("disklabel detected but not usable on %s",
device.name)
else:
device.format = fmt
return
try:
fmt = formats.getFormat("disklabel",
device=device.path,
exists=True)
except InvalidDiskLabelError as e:
log.info("no usable disklabel on %s", device.name)
if disklabel_type == "gpt":
log.debug(e)
device.format = formats.getFormat(_("Invalid Disk Label"))
else:
device.format = fmt
def handleUdevLUKSFormat(self, info, device):
# pylint: disable=unused-argument
log_method_call(self, name=device.name, type=device.format.type)
if not device.format.uuid:
log.info("luks device %s has no uuid", device.path)
return
# look up or create the mapped device
if not self.getDeviceByName(device.format.mapName):
passphrase = self.__luksDevs.get(device.format.uuid)
if device.format.configured:
pass
elif passphrase:
device.format.passphrase = passphrase
elif device.format.uuid in self.__luksDevs:
log.info("skipping previously-skipped luks device %s",
device.name)
elif self._cleanup or flags.testing:
# if we're only building the devicetree so that we can
# tear down all of the devices we don't need a passphrase
if device.format.status:
# this makes device.configured return True
device.format.passphrase = 'yabbadabbadoo'
else:
# Try each known passphrase. Include luksDevs values in case a
# passphrase has been set for a specific device without a full
# reset/populate, in which case the new passphrase would not be
# in self.__passphrases.
for passphrase in self.__passphrases + list(self.__luksDevs.values()):
device.format.passphrase = passphrase
try:
device.format.setup()
except blockdev.BlockDevError:
device.format.passphrase = None
else:
break
luks_device = LUKSDevice(device.format.mapName,
parents=[device],
exists=True)
try:
luks_device.setup()
except (LUKSError, blockdev.CryptoError, DeviceError) as e:
log.info("setup of %s failed: %s", device.format.mapName, e)
device.removeChild()
else:
luks_device.updateSysfsPath()
self.devicetree._addDevice(luks_device)
luks_info = udev.get_device(luks_device.sysfsPath)
if not luks_info:
log.error("failed to get udev data for %s", luks_device.name)
return
self.addUdevDevice(luks_info, updateOrigFmt=True)
else:
log.warning("luks device %s already in the tree",
device.format.mapName)
def handleVgLvs(self, vg_device):
""" Handle setup of the LV's in the vg_device. """
vg_name = vg_device.name
lv_info = dict((k, v) for (k, v) in iter(self.devicetree.lvInfo.items())
if v.vg_name == vg_name)
self.names.extend(n for n in lv_info.keys() if n not in self.names)
if not vg_device.complete:
log.warning("Skipping LVs for incomplete VG %s", vg_name)
return
if not lv_info:
log.debug("no LVs listed for VG %s", vg_name)
return
all_lvs = []
internal_lvs = []
def addRequiredLV(name, msg):
""" Add a prerequisite/parent LV.
The parent is strictly required in order to be able to add
some other LV that depends on it. For this reason, failure to
add the specified LV results in a DeviceTreeError with the
message string specified in the msg parameter.
:param str name: the full name of the LV (including vgname)
:param str msg: message to pass DeviceTreeError ctor on error
:returns: None
:raises: :class:`~.errors.DeviceTreeError` on failure
"""
vol = self.getDeviceByName(name)
if vol is None:
new_lv = addLV(lv_info[name])
if new_lv:
all_lvs.append(new_lv)
vol = self.getDeviceByName(name)
if vol is None:
log.error("%s: %s", msg, name)
raise DeviceTreeError(msg)
def addLV(lv):
""" Instantiate and add an LV based on data from the VG. """
lv_name = lv.lv_name
lv_uuid = lv.uuid
lv_attr = lv.attr
lv_size = Size(lv.size)
lv_type = lv.segtype
lv_class = LVMLogicalVolumeDevice
lv_parents = [vg_device]
lv_kwargs = {}
name = "%s-%s" % (vg_name, lv_name)
if self.getDeviceByName(name):
# some lvs may have been added on demand below
log.debug("already added %s", name)
return
if lv_attr[0] in 'Ss':
log.info("found lvm snapshot volume '%s'", name)
origin_name = blockdev.lvm.lvorigin(vg_name, lv_name)
if not origin_name:
log.error("lvm snapshot '%s-%s' has unknown origin",
vg_name, lv_name)
return
if origin_name.endswith("_vorigin]"):
lv_kwargs["vorigin"] = True
origin = None
else:
origin_device_name = "%s-%s" % (vg_name, origin_name)
addRequiredLV(origin_device_name,
"failed to locate origin lv")
origin = self.getDeviceByName(origin_device_name)
lv_kwargs["origin"] = origin
lv_class = LVMSnapShotDevice
elif lv_attr[0] == 'v':
# skip vorigins
return
elif lv_attr[0] in 'IielTCo' and lv_name.endswith(']'):
# an internal LV, add the an instance of the appropriate class
# to internal_lvs for later processing when non-internal LVs are
# processed
internal_lvs.append(lv_name)
return
elif lv_attr[0] == 't':
# thin pool
lv_class = LVMThinPoolDevice
elif lv_attr[0] == 'V':
# thin volume
pool_name = blockdev.lvm.thlvpoolname(vg_name, lv_name)
pool_device_name = "%s-%s" % (vg_name, pool_name)
addRequiredLV(pool_device_name, "failed to look up thin pool")
origin_name = blockdev.lvm.lvorigin(vg_name, lv_name)
if origin_name:
origin_device_name = "%s-%s" % (vg_name, origin_name)
addRequiredLV(origin_device_name, "failed to locate origin lv")
origin = self.getDeviceByName(origin_device_name)
lv_kwargs["origin"] = origin
lv_class = LVMThinSnapShotDevice
else:
lv_class = LVMThinLogicalVolumeDevice
lv_parents = [self.getDeviceByName(pool_device_name)]
elif lv_name.endswith(']'):
# unrecognized Internal LVM2 device
return
elif lv_attr[0] not in '-mMrRoOC':
# Ignore anything else except for the following:
# - normal lv
# m mirrored
# M mirrored without initial sync
# r raid
# R raid without initial sync
# o origin
# O origin with merging snapshot
# C cached LV
return
lv_dev = self.getDeviceByUuid(lv_uuid)
if lv_dev is None:
lv_device = lv_class(lv_name, parents=lv_parents,
uuid=lv_uuid, size=lv_size,segType=lv_type,
exists=True, **lv_kwargs)
self.devicetree._addDevice(lv_device)
if flags.installer_mode:
lv_device.setup()
if lv_device.status:
lv_device.updateSysfsPath()
lv_device.updateSize()
lv_info = udev.get_device(lv_device.sysfsPath)
if not lv_info:
log.error("failed to get udev data for lv %s", lv_device.name)
return lv_device
# do format handling now
self.addUdevDevice(lv_info, updateOrigFmt=True)
return lv_device
return None
def createInternalLV(lv):
lv_name = lv.lv_name
lv_uuid = lv.uuid
lv_attr = lv.attr
lv_size = Size(lv.size)
lv_type = lv.segtype
matching_cls = get_internal_lv_class(lv_attr)
if matching_cls is None:
raise DeviceTreeError("No internal LV class supported for type '%s'" % lv_attr[0])
# strip the "[]"s marking the LV as internal
lv_name = lv_name.strip("[]")
# don't know the parent LV yet, will be set later
new_lv = matching_cls(lv_name, vg_device, parent_lv=None, size=lv_size, uuid=lv_uuid, exists=True, segType=lv_type)
if new_lv.status:
new_lv.updateSysfsPath()
new_lv.updateSize()
lv_info = udev.get_device(new_lv.sysfsPath)
if not lv_info:
log.error("failed to get udev data for lv %s", new_lv.name)
return new_lv
return new_lv
# add LVs
for lv in lv_info.values():
# add the LV to the DeviceTree
new_lv = addLV(lv)
if new_lv:
# save the reference for later use
all_lvs.append(new_lv)
# Instead of doing a topological sort on internal LVs to make sure the
# parent LV is always created before its internal LVs (an internal LV
# can have internal LVs), we just create all the instances here and
# assign their parents later. Those who are not assinged a parent (which
# would hold a reference to them) will get eaten by the garbage
# collector.
# create device instances for the internal LVs
orphan_lvs = dict()
for lv_name in internal_lvs:
full_name = "%s-%s" % (vg_name, lv_name)
try:
new_lv = createInternalLV(lv_info[full_name])
except DeviceTreeError as e:
log.warning("Failed to process an internal LV '%s': %s", full_name, e)
else:
orphan_lvs[full_name] = new_lv
all_lvs.append(new_lv)
# assign parents to internal LVs (and vice versa, see
# :class:`~.devices.lvm.LVMInternalLogicalVolumeDevice`)
for lv in orphan_lvs.values():
parent_lv = lvm.determine_parent_lv(vg_name, lv, all_lvs)
if parent_lv:
lv.parent_lv = parent_lv
else:
log.warning("Failed to determine parent LV for an internal LV '%s'", lv.name)
def handleUdevLVMPVFormat(self, info, device):
# pylint: disable=unused-argument
log_method_call(self, name=device.name, type=device.format.type)
pv_info = self.devicetree.pvInfo.get(device.path, None)
if pv_info:
vg_name = pv_info.vg_name
vg_uuid = pv_info.vg_uuid
else:
# no info about the PV -> we're done
return
if not vg_name:
log.info("lvm pv %s has no vg", device.name)
return
vg_device = self.getDeviceByUuid(vg_uuid, incomplete=True)
if vg_device:
vg_device.parents.append(device)
else:
same_name = self.getDeviceByName(vg_name)
if isinstance(same_name, LVMVolumeGroupDevice) and \
not (all(self._isIgnoredDisk(d) for d in same_name.disks) or
all(self._isIgnoredDisk(d) for d in device.disks)):
raise DuplicateVGError("multiple LVM volume groups with the same name (%s)" % vg_name)
try:
vg_size = Size(pv_info.vg_size)
vg_free = Size(pv_info.vg_free)
pe_size = Size(pv_info.vg_extent_size)
pe_count = pv_info.vg_extent_count
pe_free = pv_info.vg_free_count
pv_count = pv_info.vg_pv_count
except (KeyError, ValueError) as e:
log.warning("invalid data for %s: %s", device.name, e)
return
vg_device = LVMVolumeGroupDevice(vg_name,
parents=[device],
uuid=vg_uuid,
size=vg_size,
free=vg_free,
peSize=pe_size,
peCount=pe_count,
peFree=pe_free,
pvCount=pv_count,
exists=True)
self.devicetree._addDevice(vg_device)
self.handleVgLvs(vg_device)
def handleUdevMDMemberFormat(self, info, device):
# pylint: disable=unused-argument
log_method_call(self, name=device.name, type=device.format.type)
md_info = blockdev.md.examine(device.path)
# Use mdadm info if udev info is missing
md_uuid = md_info.uuid
device.format.mdUuid = device.format.mdUuid or md_uuid
md_array = self.getDeviceByUuid(device.format.mdUuid, incomplete=True)
if md_array:
md_array.parents.append(device)
else:
# create the array with just this one member
# level is reported as, eg: "raid1"
md_level = md_info.level
md_devices = md_info.num_devices
if md_level is None:
log.warning("invalid data for %s: no RAID level", device.name)
return
# md_examine yields metadata (MD_METADATA) only for metadata version > 0.90
# if MD_METADATA is missing, assume metadata version is 0.90
md_metadata = md_info.metadata or "0.90"
md_name = None
md_path = md_info.device or ""
if md_path:
md_name = devicePathToName(md_path)
if re.match(r'md\d+$', md_name):
# md0 -> 0
md_name = md_name[2:]
if md_name:
array = self.getDeviceByName(md_name, incomplete=True)
if array and array.uuid != md_uuid:
log.error("found multiple devices with the name %s", md_name)
if md_name:
log.info("using name %s for md array containing member %s",
md_name, device.name)
else:
log.error("failed to determine name for the md array %s", (md_uuid or "unknown"))
return
array_type = MDRaidArrayDevice
try:
if raid.getRaidLevel(md_level) is raid.Container and \
getattr(device.format, "biosraid", False):
array_type = MDContainerDevice
except raid.RaidError as e:
log.error("failed to create md array: %s", e)
return
try:
md_array = array_type(
md_name,
level=md_level,
memberDevices=md_devices,
uuid=md_uuid,
metadataVersion=md_metadata,
exists=True
)
except (ValueError, DeviceError) as e:
log.error("failed to create md array: %s", e)
return
md_array.updateSysfsPath()
md_array.parents.append(device)
self.devicetree._addDevice(md_array)
if md_array.status:
array_info = udev.get_device(md_array.sysfsPath)
if not array_info:
log.error("failed to get udev data for %s", md_array.name)
return
self.addUdevDevice(array_info, updateOrigFmt=True)
def handleUdevDMRaidMemberFormat(self, info, device):
# if dmraid usage is disabled skip any dmraid set activation
if not flags.dmraid:
return
log_method_call(self, name=device.name, type=device.format.type)
name = udev.device_get_name(info)
uuid = udev.device_get_uuid(info)
major = udev.device_get_major(info)
minor = udev.device_get_minor(info)
# Have we already created the DMRaidArrayDevice?
rs_names = blockdev.dm.get_member_raid_sets(uuid, name, major, minor)
if len(rs_names) == 0:
log.warning("dmraid member %s does not appear to belong to any "
"array", device.name)
return
for rs_name in rs_names:
dm_array = self.getDeviceByName(rs_name, incomplete=True)
if dm_array is not None:
# We add the new device.
dm_array.parents.append(device)
else:
# Activate the Raid set.
blockdev.dm.activate_raid_set(rs_name)
dm_array = DMRaidArrayDevice(rs_name,
parents=[device])
self.devicetree._addDevice(dm_array)
# Wait for udev to scan the just created nodes, to avoid a race
# with the udev.get_device() call below.
udev.settle()
# Get the DMRaidArrayDevice a DiskLabel format *now*, in case
# its partitions get scanned before it does.
dm_array.updateSysfsPath()
dm_array_info = udev.get_device(dm_array.sysfsPath)
self.handleUdevDiskLabelFormat(dm_array_info, dm_array)
# Use the rs's object on the device.
# pyblock can return the memebers of a set and the
# device has the attribute to hold it. But ATM we
# are not really using it. Commenting this out until
# we really need it.
#device.format.raidmem = block.getMemFromRaidSet(dm_array,
# major=major, minor=minor, uuid=uuid, name=name)
def handleBTRFSFormat(self, info, device):
log_method_call(self, name=device.name)
uuid = udev.device_get_uuid(info)
btrfs_dev = None
for d in self.devicetree.devices:
if isinstance(d, BTRFSVolumeDevice) and d.uuid == uuid:
btrfs_dev = d
break
if btrfs_dev:
log.info("found btrfs volume %s", btrfs_dev.name)
btrfs_dev.parents.append(device)
else:
label = udev.device_get_label(info)
log.info("creating btrfs volume btrfs.%s", label)
btrfs_dev = BTRFSVolumeDevice(label, parents=[device], uuid=uuid,
exists=True)
self.devicetree._addDevice(btrfs_dev)
if not btrfs_dev.subvolumes:
snapshots = btrfs_dev.listSubVolumes(snapshotsOnly=True)
snapshot_ids = [s.id for s in snapshots]
for subvol_dict in btrfs_dev.listSubVolumes():
vol_id = subvol_dict.id
vol_path = subvol_dict.path
parent_id = subvol_dict.parent_id
if vol_path in [sv.name for sv in btrfs_dev.subvolumes]:
continue
# look up the parent subvol
parent = None
subvols = [btrfs_dev] + btrfs_dev.subvolumes
for sv in subvols:
if sv.vol_id == parent_id:
parent = sv
break
if parent is None:
log.error("failed to find parent (%d) for subvol %s",
parent_id, vol_path)
raise DeviceTreeError("could not find parent for subvol")
fmt = formats.getFormat("btrfs",
device=btrfs_dev.path,
exists=True,
volUUID=btrfs_dev.format.volUUID,
subvolspec=vol_path,
mountopts="subvol=%s" % vol_path)
if vol_id in snapshot_ids:
device_class = BTRFSSnapShotDevice
else:
device_class = BTRFSSubVolumeDevice
subvol = device_class(vol_path,
vol_id=vol_id,
fmt=fmt,
parents=[parent],
exists=True)
self.devicetree._addDevice(subvol)
def handleUdevDeviceFormat(self, info, device):
log_method_call(self, name=getattr(device, "name", None))
if not info:
log.debug("no information for device %s", device.name)
return
if not device.mediaPresent:
log.debug("no media present for device %s", device.name)
return
name = udev.device_get_name(info)
uuid = udev.device_get_uuid(info)
label = udev.device_get_label(info)
format_type = udev.device_get_format(info)
serial = udev.device_get_serial(info)
is_multipath_member = blockdev.mpath.is_mpath_member(device.path)
if is_multipath_member:
format_type = "multipath_member"
# Now, if the device is a disk, see if there is a usable disklabel.
# If not, see if the user would like to create one.
# XXX ignore disklabels on multipath or biosraid member disks
if not udev.device_is_biosraid_member(info) and \
not is_multipath_member and \
format_type != "iso9660":
self.handleUdevDiskLabelFormat(info, device)
if device.partitioned or self.isIgnored(info) or \
(not device.partitionable and
device.format.type == "disklabel"):
# If the device has a disklabel, or the user chose not to
# create one, we are finished with this device. Otherwise
# it must have some non-disklabel formatting, in which case
# we fall through to handle that.
return
if (not device) or (not format_type) or device.format.type:
# this device has no formatting or it has already been set up
# FIXME: this probably needs something special for disklabels
log.debug("no type or existing type for %s, bailing", name)
return
# set up the common arguments for the format constructor
format_designator = format_type
kwargs = {"uuid": uuid,
"label": label,
"device": device.path,
"serial": serial,
"exists": True}
# set up type-specific arguments for the format constructor
if format_type == "crypto_LUKS":
# luks/dmcrypt
kwargs["name"] = "luks-%s" % uuid
elif format_type in formats.mdraid.MDRaidMember._udevTypes:
# mdraid
try:
# ID_FS_UUID contains the array UUID
kwargs["mdUuid"] = udev.device_get_uuid(info)
except KeyError:
log.warning("mdraid member %s has no md uuid", name)
# reset the uuid to the member-specific value
# this will be None for members of v0 metadata arrays
kwargs["uuid"] = udev.device_get_md_device_uuid(info)
kwargs["biosraid"] = udev.device_is_biosraid_member(info)
elif format_type == "LVM2_member":
# lvm
pv_info = self.devicetree.pvInfo.get(device.path, None)
if pv_info:
if pv_info.vg_name:
kwargs["vgName"] = pv_info.vg_name
else:
log.warning("PV %s has no vg_name", name)
if pv_info.vg_uuid:
kwargs["vgUuid"] = pv_info.vg_uuid
else:
log.warning("PV %s has no vg_uuid", name)
if pv_info.pe_start:
kwargs["peStart"] = Size(pv_info.pe_start)
else:
log.warning("PV %s has no pe_start", name)
elif format_type == "vfat":
# efi magic
if isinstance(device, PartitionDevice) and device.bootable:
efi = formats.getFormat("efi")
if efi.minSize <= device.size <= efi.maxSize:
format_designator = "efi"
elif format_type == "hfsplus":
if isinstance(device, PartitionDevice):
macefi = formats.getFormat("macefi")
if macefi.minSize <= device.size <= macefi.maxSize and \
device.partedPartition.name == macefi.name:
format_designator = "macefi"
elif format_type == "hfs":
# apple bootstrap magic
if isinstance(device, PartitionDevice) and device.bootable:
apple = formats.getFormat("appleboot")
if apple.minSize <= device.size <= apple.maxSize:
format_designator = "appleboot"
elif format_type == "btrfs":
# the format's uuid attr will contain the UUID_SUB, while the
# overarching volume UUID will be stored as volUUID
kwargs["uuid"] = info["ID_FS_UUID_SUB"]
kwargs["volUUID"] = uuid
try:
log.info("type detected on '%s' is '%s'", name, format_designator)
device.format = formats.getFormat(format_designator, **kwargs)
if device.format.type:
log.info("got format: %s", device.format)
except FSError:
log.warning("type '%s' on '%s' invalid, assuming no format",
format_designator, name)
device.format = formats.DeviceFormat()
return
#
# now do any special handling required for the device's format
#
if device.format.type == "luks":
self.handleUdevLUKSFormat(info, device)
elif device.format.type == "mdmember":
self.handleUdevMDMemberFormat(info, device)
elif device.format.type == "dmraidmember":
self.handleUdevDMRaidMemberFormat(info, device)
elif device.format.type == "lvmpv":
self.handleUdevLVMPVFormat(info, device)
elif device.format.type == "btrfs":
self.handleBTRFSFormat(info, device)
def updateDeviceFormat(self, device):
log.info("updating format of device: %s", device)
try:
util.notify_kernel(device.sysfsPath)
except (ValueError, IOError) as e:
log.warning("failed to notify kernel of change: %s", e)
udev.settle()
info = udev.get_device(device.sysfsPath)
self.handleUdevDeviceFormat(info, device)
def _handleInconsistencies(self):
for vg in [d for d in self.devicetree.devices if d.type == "lvmvg"]:
if vg.complete:
continue
# Make sure lvm doesn't get confused by PVs that belong to
# incomplete VGs. We will remove the PVs from the blacklist when/if
# the time comes to remove the incomplete VG and its PVs.
for pv in vg.pvs:
lvm.lvm_cc_addFilterRejectRegexp(pv.name)
def setupDiskImages(self):
""" Set up devices to represent the disk image files. """
for (name, path) in self.diskImages.items():
log.info("setting up disk image file '%s' as '%s'", path, name)
dmdev = self.getDeviceByName(name)
if dmdev and isinstance(dmdev, DMLinearDevice) and \
path in (d.path for d in dmdev.ancestors):
log.debug("using %s", dmdev)
dmdev.setup()
continue
try:
filedev = FileDevice(path, exists=True)
filedev.setup()
log.debug("%s", filedev)
loop_name = blockdev.loop.get_loop_name(filedev.path)
loop_sysfs = None
if loop_name:
loop_sysfs = "/class/block/%s" % loop_name
loopdev = LoopDevice(name=loop_name,
parents=[filedev],
sysfsPath=loop_sysfs,
exists=True)
loopdev.setup()
log.debug("%s", loopdev)
dmdev = DMLinearDevice(name,
dmUuid="ANACONDA-%s" % name,
parents=[loopdev],
exists=True)
dmdev.setup()
dmdev.updateSysfsPath()
log.debug("%s", dmdev)
except (ValueError, DeviceError) as e:
log.error("failed to set up disk image: %s", e)
else:
self.devicetree._addDevice(filedev)
self.devicetree._addDevice(loopdev)
self.devicetree._addDevice(dmdev)
info = udev.get_device(dmdev.sysfsPath)
self.addUdevDevice(info, updateOrigFmt=True)
def teardownDiskImages(self):
""" Tear down any disk image stacks. """
for (name, _path) in self.diskImages.items():
dm_device = self.getDeviceByName(name)
if not dm_device:
continue
dm_device.deactivate()
loop_device = dm_device.parents[0]
loop_device.teardown()
def backupConfigs(self, restore=False):
""" Create a backup copies of some storage config files. """
configs = ["/etc/mdadm.conf"]
for cfg in configs:
if restore:
src = cfg + ".anacbak"
dst = cfg
func = os.rename
op = "restore from backup"
else:
src = cfg
dst = cfg + ".anacbak"
func = shutil.copy2
op = "create backup copy"
if os.access(dst, os.W_OK):
try:
os.unlink(dst)
except OSError as e:
msg = str(e)
log.info("failed to remove %s: %s", dst, msg)
if os.access(src, os.W_OK):
# copy the config to a backup with extension ".anacbak"
try:
func(src, dst)
except (IOError, OSError) as e:
msg = str(e)
log.error("failed to %s of %s: %s", op, cfg, msg)
elif restore and os.access(cfg, os.W_OK):
# remove the config since we created it
log.info("removing anaconda-created %s", cfg)
try:
os.unlink(cfg)
except OSError as e:
msg = str(e)
log.error("failed to remove %s: %s", cfg, msg)
else:
# don't try to backup non-existent configs
log.info("not going to %s of non-existent %s", op, cfg)
def restoreConfigs(self):
self.backupConfigs(restore=True)
def saveLUKSpassphrase(self, device):
""" Save a device's LUKS passphrase in case of reset. """
passphrase = device.format._LUKS__passphrase
self.__luksDevs[device.format.uuid] = passphrase
self.__passphrases.append(passphrase)
def populate(self, cleanupOnly=False):
""" Locate all storage devices.
Everything should already be active. We just go through and gather
details as needed and set up the relations between various devices.
Devices excluded via disk filtering (or because of disk images) are
scanned just the rest, but then they are hidden at the end of this
process.
"""
self.backupConfigs()
if cleanupOnly:
self._cleanup = True
parted.register_exn_handler(parted_exn_handler)
try:
self._populate()
except Exception:
raise
finally:
parted.clear_exn_handler()
self.restoreConfigs()
def _populate(self):
log.info("DeviceTree.populate: ignoredDisks is %s ; exclusiveDisks is %s",
self.ignoredDisks, self.exclusiveDisks)
self.devicetree.dropLVMCache()
if flags.installer_mode and not flags.image_install:
blockdev.mpath.set_friendly_names(flags.multipath_friendly_names)
self.setupDiskImages()
# mark the tree as unpopulated so exception handlers can tell the
# exception originated while finding storage devices
self.populated = False
# resolve the protected device specs to device names
for spec in self.protectedDevSpecs:
name = udev.resolve_devspec(spec)
log.debug("protected device spec %s resolved to %s", spec, name)
if name:
self.protectedDevNames.append(name)
# FIXME: the backing dev for the live image can't be used as an
# install target. note that this is a little bit of a hack
# since we're assuming that /run/initramfs/live will exist
for mnt in open("/proc/mounts").readlines():
if " /run/initramfs/live " not in mnt:
continue
live_device_name = mnt.split()[0].split("/")[-1]
log.info("%s looks to be the live device; marking as protected",
live_device_name)
self.protectedDevNames.append(live_device_name)
self.liveBackingDevice = live_device_name
break
old_devices = {}
# Now, loop and scan for devices that have appeared since the two above
# blocks or since previous iterations.
while True:
devices = []
new_devices = udev.get_devices()
for new_device in new_devices:
new_name = udev.device_get_name(new_device)
if new_name not in old_devices:
old_devices[new_name] = new_device
devices.append(new_device)
if len(devices) == 0:
# nothing is changing -- we are finished building devices
break
log.info("devices to scan: %s", [udev.device_get_name(d) for d in devices])
for dev in devices:
self.addUdevDevice(dev)
self.populated = True
# After having the complete tree we make sure that the system
# inconsistencies are ignored or resolved.
self._handleInconsistencies()
@property
def names(self):
return self.devicetree.names
def getDeviceByName(self, *args, **kwargs):
return self.devicetree.getDeviceByName(*args, **kwargs)
def getDeviceByUuid(self, *args, **kwargs):
return self.devicetree.getDeviceByUuid(*args, **kwargs)
| gpl-2.0 |
iradul/qtwebkit | Tools/Scripts/webkitpy/style/checkers/watchlist.py | 134 | 2280 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Checks WebKit style for the watchlist file."""
from webkitpy.common.watchlist.watchlistparser import WatchListParser
class WatchListChecker(object):
"""Processes the watch list for checking style."""
def __init__(self, file_path, handle_style_error):
self._handle_style_error = handle_style_error
self._handle_style_error.turn_off_line_filtering()
def check(self, lines):
def log_to_style_error(message):
# Always report line 0 since we don't have anything better.
self._handle_style_error(0,
'watchlist/general', 5,
message)
WatchListParser(log_error=log_to_style_error).parse('\n'.join(lines))
| gpl-2.0 |
MicroMappers/Pybossa | test/test_uploader/test_local_uploader.py | 5 | 7479 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2014 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
"""This module tests the Uploader class."""
import os
import tempfile
from default import Test
from pybossa.uploader.local import LocalUploader
from mock import patch
from werkzeug.datastructures import FileStorage
from nose.tools import assert_raises
class TestLocalUploader(Test):
"""Test PyBossa Uploader module."""
def test_local_uploader_relative_directory_init(self):
"""Test LOCAL UPLOADER init works with relative path."""
new_upload_folder = 'uploads'
new_config_uf = {'UPLOAD_FOLDER': new_upload_folder}
with patch.dict(self.flask_app.config, new_config_uf):
new_uploader = LocalUploader()
new_uploader.init_app(self.flask_app)
err_msg = "Upload folder should be absolute not relative"
assert os.path.isabs(new_uploader.upload_folder) is True, err_msg
err_msg = "Upload folder uploads should be existing"
assert os.path.isdir(new_uploader.upload_folder) is True, err_msg
def test_wrong_local_uploader_relative_directory_init(self):
"""Test LOCAL UPLOADER init with wrong relative path."""
new_upload_folder = 'iamnotexisting'
err_msg = "Uploadfolder ./iamnotexisting should not exist"
assert os.path.isdir(new_upload_folder) is False, err_msg
new_config_uf = {'UPLOAD_FOLDER': new_upload_folder}
with patch.dict(self.flask_app.config, new_config_uf):
new_uploader = LocalUploader()
assert_raises(IOError, new_uploader.init_app, self.flask_app) # Should raise IOError
err_msg = "wrong upload folder ./iamnotexisting should not exist"
assert os.path.isdir(new_upload_folder) is False, err_msg
def test_local_uploader_standard_directory_existing(self):
"""Test if local uploads directory existing"""
uploads_path = os.path.join(os.path.dirname(self.flask_app.root_path), 'uploads') # ../uploads
err_msg = "./uploads folder is not existing"
assert os.path.isdir(uploads_path) is True, err_msg
context_uploads_path = os.path.join(self.flask_app.root_path, 'uploads') # pybossa/uploads
err_msg = "pybossa/uploads should not exist"
assert os.path.isdir(context_uploads_path) is False, err_msg
def test_local_uploader_init(self):
"""Test LOCAL UPLOADER init works."""
u = LocalUploader()
u.init_app(self.flask_app)
new_extensions = ['pdf', 'doe']
new_upload_folder = '/tmp/'
new_config_ext = {'ALLOWED_EXTENSIONS': new_extensions}
new_config_uf = {'UPLOAD_FOLDER': new_upload_folder}
with patch.dict(self.flask_app.config, new_config_ext):
with patch.dict(self.flask_app.config, new_config_uf):
new_uploader = LocalUploader()
new_uploader.init_app(self.flask_app)
expected_extensions = set.union(u.allowed_extensions,
new_extensions)
err_msg = "The new uploader should support two extra extensions"
assert expected_extensions == new_uploader.allowed_extensions, err_msg
err_msg = "Upload folder /tmp should be existing"
assert os.path.isdir(new_uploader.upload_folder) is True, err_msg
err_msg = "Upload folder by default is /tmp/"
assert new_uploader.upload_folder == '/tmp/', err_msg
@patch('werkzeug.datastructures.FileStorage.save', side_effect=IOError)
def test_local_uploader_upload_fails(self, mock):
"""Test LOCAL UPLOADER upload fails."""
u = LocalUploader()
file = FileStorage(filename='test.jpg')
res = u.upload_file(file, container='user_3')
err_msg = ("Upload file should return False, \
as there is an exception")
assert res is False, err_msg
@patch('werkzeug.datastructures.FileStorage.save', return_value=None)
def test_local_uploader_upload_correct_file(self, mock):
"""Test LOCAL UPLOADER upload works."""
mock.save.return_value = None
u = LocalUploader()
file = FileStorage(filename='test.jpg')
res = u.upload_file(file, container='user_3')
err_msg = ("Upload file should return True, \
as this extension is allowed")
assert res is True, err_msg
@patch('werkzeug.datastructures.FileStorage.save', return_value=None)
def test_local_uploader_upload_wrong_file(self, mock):
"""Test LOCAL UPLOADER upload works with wrong extension."""
mock.save.return_value = None
u = LocalUploader()
file = FileStorage(filename='test.txt')
res = u.upload_file(file, container='user_3')
err_msg = ("Upload file should return False, \
as this extension is not allowed")
assert res is False, err_msg
@patch('werkzeug.datastructures.FileStorage.save', return_value=None)
def test_local_folder_is_created(self, mock):
"""Test LOCAL UPLOADER folder creation works."""
mock.save.return_value = True
u = LocalUploader()
u.upload_folder = tempfile.mkdtemp()
file = FileStorage(filename='test.jpg')
container = 'mycontainer'
res = u.upload_file(file, container=container)
path = os.path.join(u.upload_folder, container)
err_msg = "This local path should exist: %s" % path
assert os.path.isdir(path) is True, err_msg
@patch('os.remove', return_value=None)
def test_local_folder_delete(self, mock):
"""Test LOCAL UPLOADER delete works."""
u = LocalUploader()
err_msg = "Delete should return true"
assert u.delete_file('file', 'container') is True, err_msg
@patch('os.remove', side_effect=OSError)
def test_local_folder_delete_fails(self, mock):
"""Test LOCAL UPLOADER delete fail works."""
u = LocalUploader()
err_msg = "Delete should return False"
assert u.delete_file('file', 'container') is False, err_msg
def test_file_exists_for_missing_file(self):
"""Test LOCAL UPLOADER file_exists returns False if the file does not exist"""
u = LocalUploader()
container = 'mycontainer'
assert u.file_exists('noexist.txt', container) is False
def test_file_exists_for_real_file(self):
"""Test LOCAL UPLOADER file_exists returns True if the file exists"""
u = LocalUploader()
u.upload_folder = tempfile.mkdtemp()
file = FileStorage(filename='test.jpg')
container = 'mycontainer'
u.upload_file(file, container=container)
assert u.file_exists('test.jpg', container) is True
| agpl-3.0 |
thumbimigwe/golber | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/scripts/reindent.py | 194 | 9926 | #! /usr/bin/env python
# Released to the public domain, by Tim Peters, 03 October 2000.
# -B option added by Georg Brandl, 2006.
"""reindent [-d][-r][-v] [ path ... ]
-d (--dryrun) Dry run. Analyze, but don't make any changes to files.
-r (--recurse) Recurse. Search for all .py files in subdirectories too.
-B (--no-backup) Don't write .bak backup files.
-v (--verbose) Verbose. Print informative msgs; else only names of changed files.
-h (--help) Help. Print this usage information and exit.
Change Python (.py) files to use 4-space indents and no hard tab characters.
Also trim excess spaces and tabs from ends of lines, and remove empty lines
at the end of files. Also ensure the last line ends with a newline.
If no paths are given on the command line, reindent operates as a filter,
reading a single source file from standard input and writing the transformed
source to standard output. In this case, the -d, -r and -v flags are
ignored.
You can pass one or more file and/or directory paths. When a directory
path, all .py files within the directory will be examined, and, if the -r
option is given, likewise recursively for subdirectories.
If output is not to standard output, reindent overwrites files in place,
renaming the originals with a .bak extension. If it finds nothing to
change, the file is left alone. If reindent does change a file, the changed
file is a fixed-point for future runs (i.e., running reindent on the
resulting .py file won't change it again).
The hard part of reindenting is figuring out what to do with comment
lines. So long as the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
"""
__version__ = "1"
import tokenize
import os
import sys
verbose = 0
recurse = 0
dryrun = 0
no_backup = 0
def usage(msg=None):
if msg is not None:
print >> sys.stderr, msg
print >> sys.stderr, __doc__
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
import getopt
global verbose, recurse, dryrun, no_backup
try:
opts, args = getopt.getopt(sys.argv[1:], "drvhB",
["dryrun", "recurse", "verbose", "help",
"no-backup"])
except getopt.error, msg:
usage(msg)
return
for o, a in opts:
if o in ('-d', '--dryrun'):
dryrun += 1
elif o in ('-r', '--recurse'):
recurse += 1
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-B', '--no-backup'):
no_backup += 1
elif o in ('-h', '--help'):
usage()
return
if not args:
r = Reindenter(sys.stdin)
r.run()
r.write(sys.stdout)
return
for arg in args:
check(arg)
def check(file):
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "listing directory", file
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if ((recurse and os.path.isdir(fullname) and
not os.path.islink(fullname))
or name.lower().endswith(".py")):
check(fullname)
return
if verbose:
print "checking", file, "...",
try:
f = open(file)
except IOError, msg:
errprint("%s: I/O Error: %s" % (file, str(msg)))
return
r = Reindenter(f)
f.close()
if r.run():
if verbose:
print "changed."
if dryrun:
print "But this is a dry run, so leaving it alone."
else:
print "reindented", file, (dryrun and "(dry run => not really)" or "")
if not dryrun:
if not no_backup:
bak = file + ".bak"
if os.path.exists(bak):
os.remove(bak)
os.rename(file, bak)
if verbose:
print "renamed", file, "to", bak
f = open(file, "w")
r.write(f)
f.close()
if verbose:
print "wrote new", file
else:
if verbose:
print "unchanged."
class Reindenter:
def __init__(self, f):
self.find_stmt = 1 # next token begins a fresh stmt?
self.level = 0 # current indent level
# Raw file lines.
self.raw = f.readlines()
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
self.lines = [line.rstrip('\n \t').expandtabs() + "\n"
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one for each stmt and
# comment line. indentlevel is -1 for comment lines, as a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
def run(self):
tokenize.tokenize(self.getline, self.tokeneater)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == "\n":
lines.pop()
# Sentinel.
stats = self.stats
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats)-1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i+1][0]
have = getlspace(lines[thisstmt])
want = thislevel * 4
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in xrange(i+1, len(stats)-1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == getlspace(lines[jline]):
want = jlevel * 4
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in xrange(i-1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = have + getlspace(after[jline-1]) - \
getlspace(lines[jline])
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line in lines[thisstmt:nextstmt]:
if diff > 0:
if line == "\n":
after.append(line)
else:
after.append(" " * diff + line)
else:
remove = min(getlspace(line), -diff)
after.append(line[remove:])
return self.raw != self.after
def write(self, f):
f.writelines(self.after)
# Line-getter for tokenize.
def getline(self):
if self.index >= len(self.lines):
line = ""
else:
line = self.lines[self.index]
self.index += 1
return line
# Line-eater for tokenize.
def tokeneater(self, type, token, (sline, scol), end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
if type == NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = 1
elif type == INDENT:
self.find_stmt = 1
self.level += 1
elif type == DEDENT:
self.find_stmt = 1
self.level -= 1
elif type == COMMENT:
if self.find_stmt:
self.stats.append((sline, -1))
# but we're still looking for a new stmt, so leave
# find_stmt alone
elif type == NL:
pass
elif self.find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
self.find_stmt = 0
if line: # not endmarker
self.stats.append((sline, self.level))
# Count number of leading blanks.
def getlspace(line):
i, n = 0, len(line)
while i < n and line[i] == " ":
i += 1
return i
if __name__ == '__main__':
main()
| mit |
xinhunbie/NS3- | src/antenna/bindings/modulegen__gcc_ILP32.py | 38 | 97360 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.antenna', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## angles.h (module 'antenna'): ns3::Angles [struct]
module.add_class('Angles')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## antenna-model.h (module 'antenna'): ns3::AntennaModel [class]
module.add_class('AntennaModel', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel [class]
module.add_class('CosineAntennaModel', parent=root_module['ns3::AntennaModel'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel [class]
module.add_class('IsotropicAntennaModel', parent=root_module['ns3::AntennaModel'])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel [class]
module.add_class('ParabolicAntennaModel', parent=root_module['ns3::AntennaModel'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3Angles_methods(root_module, root_module['ns3::Angles'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AntennaModel_methods(root_module, root_module['ns3::AntennaModel'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3CosineAntennaModel_methods(root_module, root_module['ns3::CosineAntennaModel'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3IsotropicAntennaModel_methods(root_module, root_module['ns3::IsotropicAntennaModel'])
register_Ns3ParabolicAntennaModel_methods(root_module, root_module['ns3::ParabolicAntennaModel'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Angles_methods(root_module, cls):
cls.add_output_stream_operator()
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Angles const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Angles const &', 'arg0')])
## angles.h (module 'antenna'): ns3::Angles::Angles() [constructor]
cls.add_constructor([])
## angles.h (module 'antenna'): ns3::Angles::Angles(double phi, double theta) [constructor]
cls.add_constructor([param('double', 'phi'), param('double', 'theta')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v) [constructor]
cls.add_constructor([param('ns3::Vector', 'v')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v, ns3::Vector o) [constructor]
cls.add_constructor([param('ns3::Vector', 'v'), param('ns3::Vector', 'o')])
## angles.h (module 'antenna'): ns3::Angles::phi [variable]
cls.add_instance_attribute('phi', 'double', is_const=False)
## angles.h (module 'antenna'): ns3::Angles::theta [variable]
cls.add_instance_attribute('theta', 'double', is_const=False)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AntennaModel_methods(root_module, cls):
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel(ns3::AntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AntennaModel const &', 'arg0')])
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel() [constructor]
cls.add_constructor([])
## antenna-model.h (module 'antenna'): double ns3::AntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_pure_virtual=True, is_virtual=True)
## antenna-model.h (module 'antenna'): static ns3::TypeId ns3::AntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3CosineAntennaModel_methods(root_module, cls):
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel() [constructor]
cls.add_constructor([])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel(ns3::CosineAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CosineAntennaModel const &', 'arg0')])
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetBeamwidth() const [member function]
cls.add_method('GetBeamwidth',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::CosineAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function]
cls.add_method('SetBeamwidth',
'void',
[param('double', 'beamwidthDegrees')])
## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetOrientation(double orientationDegrees) [member function]
cls.add_method('SetOrientation',
'void',
[param('double', 'orientationDegrees')])
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3IsotropicAntennaModel_methods(root_module, cls):
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel(ns3::IsotropicAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IsotropicAntennaModel const &', 'arg0')])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel() [constructor]
cls.add_constructor([])
## isotropic-antenna-model.h (module 'antenna'): double ns3::IsotropicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## isotropic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::IsotropicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3ParabolicAntennaModel_methods(root_module, cls):
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel() [constructor]
cls.add_constructor([])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel(ns3::ParabolicAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParabolicAntennaModel const &', 'arg0')])
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetBeamwidth() const [member function]
cls.add_method('GetBeamwidth',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::ParabolicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function]
cls.add_method('SetBeamwidth',
'void',
[param('double', 'beamwidthDegrees')])
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetOrientation(double orientationDegrees) [member function]
cls.add_method('SetOrientation',
'void',
[param('double', 'orientationDegrees')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## angles.h (module 'antenna'): extern double ns3::DegreesToRadians(double degrees) [free function]
module.add_function('DegreesToRadians',
'double',
[param('double', 'degrees')])
## angles.h (module 'antenna'): extern double ns3::RadiansToDegrees(double radians) [free function]
module.add_function('RadiansToDegrees',
'double',
[param('double', 'radians')])
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
chouseknecht/ansible | lib/ansible/modules/network/fortios/fortios_system_csf.py | 13 | 15155 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_csf
short_description: Add this FortiGate to a Security Fabric or set up a new Security Fabric on this FortiGate in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and csf category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
system_csf:
description:
- Add this FortiGate to a Security Fabric or set up a new Security Fabric on this FortiGate.
default: null
type: dict
suboptions:
configuration_sync:
description:
- Configuration sync mode.
type: str
choices:
- default
- local
fabric_device:
description:
- Fabric device configuration.
type: list
suboptions:
device_ip:
description:
- Device IP.
type: str
device_type:
description:
- Device type.
type: str
choices:
- fortimail
login:
description:
- Device login name.
type: str
name:
description:
- Device name.
required: true
type: str
password:
description:
- Device login password.
type: str
fixed_key:
description:
- Auto-generated fixed key used when this device is the root. (Will automatically be generated if not set.)
type: str
group_name:
description:
- Security Fabric group name. All FortiGates in a Security Fabric must have the same group name.
type: str
group_password:
description:
- Security Fabric group password. All FortiGates in a Security Fabric must have the same group password.
type: str
management_ip:
description:
- Management IP address of this FortiGate. Used to log into this FortiGate from another FortiGate in the Security Fabric.
type: str
management_port:
description:
- Overriding port for management connection (Overrides admin port).
type: int
status:
description:
- Enable/disable Security Fabric.
type: str
choices:
- enable
- disable
trusted_list:
description:
- Pre-authorized and blocked security fabric nodes.
type: list
suboptions:
action:
description:
- Security fabric authorization action.
type: str
choices:
- accept
- deny
downstream_authorization:
description:
- Trust authorizations by this node's administrator.
type: str
choices:
- enable
- disable
ha_members:
description:
- HA members.
type: str
serial:
description:
- Serial.
required: true
type: str
upstream_ip:
description:
- IP address of the FortiGate upstream from this FortiGate in the Security Fabric.
type: str
upstream_port:
description:
- The port number to use to communicate with the FortiGate upstream from this FortiGate in the Security Fabric .
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Add this FortiGate to a Security Fabric or set up a new Security Fabric on this FortiGate.
fortios_system_csf:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_csf:
configuration_sync: "default"
fabric_device:
-
device_ip: "<your_own_value>"
device_type: "fortimail"
login: "<your_own_value>"
name: "default_name_8"
password: "<your_own_value>"
fixed_key: "<your_own_value>"
group_name: "<your_own_value>"
group_password: "<your_own_value>"
management_ip: "<your_own_value>"
management_port: "14"
status: "enable"
trusted_list:
-
action: "accept"
downstream_authorization: "enable"
ha_members: "<your_own_value>"
serial: "<your_own_value>"
upstream_ip: "<your_own_value>"
upstream_port: "22"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_csf_data(json):
option_list = ['configuration_sync', 'fabric_device', 'fixed_key',
'group_name', 'group_password', 'management_ip',
'management_port', 'status', 'trusted_list',
'upstream_ip', 'upstream_port']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_csf(data, fos):
vdom = data['vdom']
system_csf_data = data['system_csf']
filtered_data = underscore_to_hyphen(filter_system_csf_data(system_csf_data))
return fos.set('system',
'csf',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_csf']:
resp = system_csf(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_csf": {
"required": False, "type": "dict", "default": None,
"options": {
"configuration_sync": {"required": False, "type": "str",
"choices": ["default", "local"]},
"fabric_device": {"required": False, "type": "list",
"options": {
"device_ip": {"required": False, "type": "str"},
"device_type": {"required": False, "type": "str",
"choices": ["fortimail"]},
"login": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"password": {"required": False, "type": "str"}
}},
"fixed_key": {"required": False, "type": "str"},
"group_name": {"required": False, "type": "str"},
"group_password": {"required": False, "type": "str"},
"management_ip": {"required": False, "type": "str"},
"management_port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"trusted_list": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["accept", "deny"]},
"downstream_authorization": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ha_members": {"required": False, "type": "str"},
"serial": {"required": True, "type": "str"}
}},
"upstream_ip": {"required": False, "type": "str"},
"upstream_port": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mbox/django | django/db/migrations/questioner.py | 5 | 6006 | import importlib
import os
import sys
from django.apps import apps
from django.utils import datetime_safe, six
from django.utils.six.moves import input
from .loader import MIGRATIONS_MODULE_NAME
class MigrationQuestioner(object):
"""
Gives the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None, specified_apps=None):
self.defaults = defaults or {}
self.specified_apps = specified_apps or set()
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
# Otherwise, we look to see if it has a migrations module
# without any Python files in it, apart from __init__.py.
# Apps from the new app template will have these; the python
# file check will ensure we skip South ones.
try:
app_config = apps.get_app_config(app_label)
except LookupError: # It's a fake app.
return self.defaults.get("ask_initial", False)
migrations_import_path = "%s.%s" % (app_config.name, MIGRATIONS_MODULE_NAME)
try:
migrations_module = importlib.import_module(migrations_import_path)
except ImportError:
return self.defaults.get("ask_initial", False)
else:
if hasattr(migrations_module, "__file__"):
filenames = os.listdir(os.path.dirname(migrations_module.__file__))
elif hasattr(migrations_module, "__path__"):
if len(migrations_module.__path__) > 1:
return False
filenames = os.listdir(list(migrations_module.__path__)[0])
return not any(x.endswith(".py") for x in filenames if x != "__init__.py")
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self.defaults.get("ask_rename", False)
def ask_rename_model(self, old_model_state, new_model_state):
"Was this model really renamed?"
return self.defaults.get("ask_rename_model", False)
def ask_merge(self, app_label):
"Do you really want to merge these migrations?"
return self.defaults.get("ask_merge", False)
class InteractiveMigrationQuestioner(MigrationQuestioner):
def _boolean_input(self, question, default=None):
result = input("%s " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def _choice_input(self, question, choices):
print(question)
for i, choice in enumerate(choices):
print(" %s) %s" % (i + 1, choice))
result = input("Select an option: ")
while True:
try:
value = int(result)
if 0 < value <= len(choices):
return value
except ValueError:
pass
result = input("Please select a valid option: ")
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
choice = self._choice_input(
"You are trying to add a non-nullable field '%s' to %s without a default;\n" % (field_name, model_name) +
"we can't do that (the database needs something to populate existing rows).\n" +
"Please select a fix:",
[
"Provide a one-off default now (will be set on all existing rows)",
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
sys.exit(3)
else:
print("Please enter the default value now, as valid Python")
print("The datetime module is available, so you can do e.g. datetime.date.today()")
while True:
if six.PY3:
# Six does not correctly abstract over the fact that
# py3 input returns a unicode string, while py2 raw_input
# returns a bytestring.
code = input(">>> ")
else:
code = input(">>> ").decode(sys.stdin.encoding)
if not code:
print("Please enter some code, or 'exit' (with no quotes) to exit.")
elif code == "exit":
sys.exit(1)
else:
try:
return eval(code, {}, {"datetime": datetime_safe})
except (SyntaxError, NameError) as e:
print("Invalid input: %s" % e)
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self._boolean_input("Did you rename %s.%s to %s.%s (a %s)? [y/N]" % (model_name, old_name, model_name, new_name, field_instance.__class__.__name__), False)
def ask_rename_model(self, old_model_state, new_model_state):
"Was this model really renamed?"
return self._boolean_input("Did you rename the %s.%s model to %s? [y/N]" % (old_model_state.app_label, old_model_state.name, new_model_state.name), False)
def ask_merge(self, app_label):
return self._boolean_input(
"\nMerging will only work if the operations printed above do not conflict\n" +
"with each other (working on different fields or models)\n" +
"Do you want to merge these migration branches? [y/N]",
False,
)
| bsd-3-clause |
twilio/twilio-python | twilio/rest/accounts/v1/credential/__init__.py | 2 | 3596 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.accounts.v1.credential.aws import AwsList
from twilio.rest.accounts.v1.credential.public_key import PublicKeyList
class CredentialList(ListResource):
def __init__(self, version):
"""
Initialize the CredentialList
:param Version version: Version that contains the resource
:returns: twilio.rest.accounts.v1.credential.CredentialList
:rtype: twilio.rest.accounts.v1.credential.CredentialList
"""
super(CredentialList, self).__init__(version)
# Path Solution
self._solution = {}
# Components
self._public_key = None
self._aws = None
@property
def public_key(self):
"""
Access the public_key
:returns: twilio.rest.accounts.v1.credential.public_key.PublicKeyList
:rtype: twilio.rest.accounts.v1.credential.public_key.PublicKeyList
"""
if self._public_key is None:
self._public_key = PublicKeyList(self._version, )
return self._public_key
@property
def aws(self):
"""
Access the aws
:returns: twilio.rest.accounts.v1.credential.aws.AwsList
:rtype: twilio.rest.accounts.v1.credential.aws.AwsList
"""
if self._aws is None:
self._aws = AwsList(self._version, )
return self._aws
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Accounts.V1.CredentialList>'
class CredentialPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the CredentialPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.accounts.v1.credential.CredentialPage
:rtype: twilio.rest.accounts.v1.credential.CredentialPage
"""
super(CredentialPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CredentialInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.accounts.v1.credential.CredentialInstance
:rtype: twilio.rest.accounts.v1.credential.CredentialInstance
"""
return CredentialInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Accounts.V1.CredentialPage>'
class CredentialInstance(InstanceResource):
def __init__(self, version, payload):
"""
Initialize the CredentialInstance
:returns: twilio.rest.accounts.v1.credential.CredentialInstance
:rtype: twilio.rest.accounts.v1.credential.CredentialInstance
"""
super(CredentialInstance, self).__init__(version)
# Context
self._context = None
self._solution = {}
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Accounts.V1.CredentialInstance>'
| mit |
rupran/ansible | lib/ansible/modules/cloud/webfaction/webfaction_app.py | 6 | 6427 | #!/usr/bin/python
#
# Create a Webfaction application using Ansible and the Webfaction API
#
# Valid application types can be found by looking here:
# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
# * Andy Baker
# * Federico Tarantini
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_app
short_description: Add or remove applications on a Webfaction host
description:
- Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the application
required: true
state:
description:
- Whether the application should exist
required: false
choices: ['present', 'absent']
default: "present"
type:
description:
- The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list.
required: true
autostart:
description:
- Whether the app should restart with an autostart.cgi script
required: false
default: "no"
extra_info:
description:
- Any extra parameters required by the app
required: false
default: null
port_open:
description:
- IF the port should be opened
required: false
default: false
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
machine:
description:
- The machine name to use (optional for accounts with only one machine)
required: false
'''
EXAMPLES = '''
- name: Create a test app
webfaction_app:
name="my_wsgi_app1"
state=present
type=mod_wsgi35-python27
login_name={{webfaction_user}}
login_password={{webfaction_passwd}}
machine={{webfaction_machine}}
'''
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
type = dict(required=True),
autostart = dict(required=False, type='bool', default=False),
extra_info = dict(required=False, default=""),
port_open = dict(required=False, type='bool', default=False),
login_name = dict(required=True),
login_password = dict(required=True, no_log=True),
machine = dict(required=False, default=False),
),
supports_check_mode=True
)
app_name = module.params['name']
app_type = module.params['type']
app_state = module.params['state']
if module.params['machine']:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password'],
module.params['machine']
)
else:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
app_list = webfaction.list_apps(session_id)
app_map = dict([(i['name'], i) for i in app_list])
existing_app = app_map.get(app_name)
result = {}
# Here's where the real stuff happens
if app_state == 'present':
# Does an app with this name already exist?
if existing_app:
if existing_app['type'] != app_type:
module.fail_json(msg="App already exists with different type. Please fix by hand.")
# If it exists with the right type, we don't change it
# Should check other parameters.
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, create the app
result.update(
webfaction.create_app(
session_id, app_name, app_type,
module.boolean(module.params['autostart']),
module.params['extra_info'],
module.boolean(module.params['port_open'])
)
)
elif app_state == 'absent':
# If the app's already not there, nothing changed.
if not existing_app:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the app
result.update(
webfaction.delete_app(session_id, app_name)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(app_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
avikivity/scylla | tools/scyllatop/views/aggregate.py | 2 | 1522 | from . import groups
from . import table
from . import base
from . import helpers
class Aggregate(base.Base):
def update(self, liveData):
self.clearScreen()
self.writeStatusLine(liveData.measurements)
metricGroups = groups.Groups(liveData.measurements)
visible = metricGroups.all()
tableForm = self._prepareTable(visible)
for row in tableForm.rows():
self.writeLine(row)
self.refresh()
def _prepareTable(self, groups):
result = table.Table('lr')
for group in groups:
formatted = 'avg[{0}] tot[{1}]'.format(
helpers.formatValues(group.aggregate(self._mean)),
helpers.formatValues(group.aggregate(self._sum)))
result.add(self._label(group), formatted)
return result
def _mean(self, values):
valid = self._valid(values)
if len(valid) == 0:
return 'not available'
return sum(x for x in valid) / len(valid)
def _sum(self, values):
valid = self._valid(values)
return sum(x for x in valid)
def _valid(self, values):
floats = [self._float(value) for value in values]
valid = [x for x in floats if x is not None]
return valid
def _float(self, value):
try:
return float(value)
except ValueError:
return None
def _label(self, group):
label = '{label}({size})'.format(label=group.label, size=group.size)
return label
| agpl-3.0 |
blackzw/openwrt_sdk_dev1 | staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_zipfile64.py | 158 | 4421 | # Tests of the full ZIP64 functionality of zipfile
# The test_support.requires call is the only reason for keeping this separate
# from test_zipfile
from test import test_support
# XXX(nnorwitz): disable this test by looking for extra largfile resource
# which doesn't exist. This test takes over 30 minutes to run in general
# and requires more disk space than most of the buildbots.
test_support.requires(
'extralargefile',
'test requires loads of disk-space bytes and a long time to run'
)
# We can test part of the module without zlib.
try:
import zlib
except ImportError:
zlib = None
import zipfile, os, unittest
import time
import sys
from tempfile import TemporaryFile
from test.test_support import TESTFN, run_unittest
TESTFN2 = TESTFN + "2"
# How much time in seconds can pass before we print a 'Still working' message.
_PRINT_WORKING_MSG_INTERVAL = 5 * 60
class TestsWithSourceFile(unittest.TestCase):
def setUp(self):
# Create test data.
# xrange() is important here -- don't want to create immortal space
# for a million ints.
line_gen = ("Test of zipfile line %d." % i for i in xrange(1000000))
self.data = '\n'.join(line_gen)
# And write it to a file.
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
def zipTest(self, f, compression):
# Create the ZIP archive.
zipfp = zipfile.ZipFile(f, "w", compression, allowZip64=True)
# It will contain enough copies of self.data to reach about 6GB of
# raw data to store.
filecount = 6*1024**3 // len(self.data)
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
for num in range(filecount):
zipfp.writestr("testfn%d" % num, self.data)
# Print still working message since this test can be really slow
if next_time <= time.time():
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
print >>sys.__stdout__, (
' zipTest still writing %d of %d, be patient...' %
(num, filecount))
sys.__stdout__.flush()
zipfp.close()
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
for num in range(filecount):
self.assertEqual(zipfp.read("testfn%d" % num), self.data)
# Print still working message since this test can be really slow
if next_time <= time.time():
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
print >>sys.__stdout__, (
' zipTest still reading %d of %d, be patient...' %
(num, filecount))
sys.__stdout__.flush()
zipfp.close()
def testStored(self):
# Try the temp file first. If we do TESTFN2 first, then it hogs
# gigabytes of disk space for the duration of the test.
for f in TemporaryFile(), TESTFN2:
self.zipTest(f, zipfile.ZIP_STORED)
if zlib:
def testDeflated(self):
# Try the temp file first. If we do TESTFN2 first, then it hogs
# gigabytes of disk space for the duration of the test.
for f in TemporaryFile(), TESTFN2:
self.zipTest(f, zipfile.ZIP_DEFLATED)
def tearDown(self):
for fname in TESTFN, TESTFN2:
if os.path.exists(fname):
os.remove(fname)
class OtherTests(unittest.TestCase):
def testMoreThan64kFiles(self):
# This test checks that more than 64k files can be added to an archive,
# and that the resulting archive can be read properly by ZipFile
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.debug = 100
numfiles = (1 << 16) * 3/2
for i in xrange(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf2 = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(len(zipf2.namelist()), numfiles)
for i in xrange(numfiles):
self.assertEqual(zipf2.read("foo%08d" % i), "%d" % (i**3 % 57))
zipf.close()
def tearDown(self):
test_support.unlink(TESTFN)
test_support.unlink(TESTFN2)
def test_main():
run_unittest(TestsWithSourceFile, OtherTests)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
resmo/ansible | lib/ansible/modules/network/fortios/fortios_webfilter_content.py | 13 | 13586 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_content
short_description: Configure Web filter banned word table in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify webfilter feature and content category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
webfilter_content:
description:
- Configure Web filter banned word table.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
comment:
description:
- Optional comments.
type: str
entries:
description:
- Configure banned word entries.
type: list
suboptions:
action:
description:
- Block or exempt word when a match is found.
type: str
choices:
- block
- exempt
lang:
description:
- Language of banned word.
type: str
choices:
- western
- simch
- trach
- japanese
- korean
- french
- thai
- spanish
- cyrillic
name:
description:
- Banned word.
required: true
type: str
pattern_type:
description:
- "Banned word pattern type: wildcard pattern or Perl regular expression."
type: str
choices:
- wildcard
- regexp
score:
description:
- Score, to be applied every time the word appears on a web page (0 - 4294967295).
type: int
status:
description:
- Enable/disable banned word.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
name:
description:
- Name of table.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure Web filter banned word table.
fortios_webfilter_content:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
webfilter_content:
comment: "Optional comments."
entries:
-
action: "block"
lang: "western"
name: "default_name_7"
pattern_type: "wildcard"
score: "9"
status: "enable"
id: "11"
name: "default_name_12"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_webfilter_content_data(json):
option_list = ['comment', 'entries', 'id',
'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def webfilter_content(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['webfilter_content'] and data['webfilter_content']:
state = data['webfilter_content']['state']
else:
state = True
webfilter_content_data = data['webfilter_content']
filtered_data = underscore_to_hyphen(filter_webfilter_content_data(webfilter_content_data))
if state == "present":
return fos.set('webfilter',
'content',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('webfilter',
'content',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_webfilter(data, fos):
if data['webfilter_content']:
resp = webfilter_content(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"webfilter_content": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"entries": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["block", "exempt"]},
"lang": {"required": False, "type": "str",
"choices": ["western", "simch", "trach",
"japanese", "korean", "french",
"thai", "spanish", "cyrillic"]},
"name": {"required": True, "type": "str"},
"pattern_type": {"required": False, "type": "str",
"choices": ["wildcard", "regexp"]},
"score": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
bac/horizon | openstack_dashboard/dashboards/admin/aggregates/tests.py | 3 | 20879 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.aggregates import constants
from openstack_dashboard.dashboards.admin.aggregates import workflows
from openstack_dashboard.test import helpers as test
class BaseAggregateWorkflowTests(test.BaseAdminViewTests):
def _get_create_workflow_data(self, aggregate, hosts=None):
aggregate_info = {"name": aggregate.name,
"availability_zone": aggregate.availability_zone}
if hosts:
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
host_field_name = 'add_host_to_aggregate_role_member'
aggregate_info[host_field_name] = \
[h.host_name for h in compute_hosts]
return aggregate_info
def _get_manage_workflow_data(self, aggregate, hosts=None, ):
aggregate_info = {"id": aggregate.id}
if hosts:
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
host_field_name = 'add_host_to_aggregate_role_member'
aggregate_info[host_field_name] = \
[h.host_name for h in compute_hosts]
return aggregate_info
class CreateAggregateWorkflowTests(BaseAggregateWorkflowTests):
@test.create_stubs({api.nova: ('host_list', ), })
def test_workflow_get(self):
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, constants.AGGREGATES_CREATE_VIEW_TEMPLATE)
self.assertEqual(workflow.name, workflows.CreateAggregateWorkflow.name)
self.assertQuerysetEqual(
workflow.steps,
['<SetAggregateInfoStep: set_aggregate_info>',
'<AddHostsToAggregateStep: add_host_to_aggregate>'])
@test.create_stubs({api.nova: ('host_list', 'aggregate_details_list',
'aggregate_create'), })
def _test_generic_create_aggregate(self, workflow_data, aggregate,
error_count=0,
expected_error_message=None):
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
api.nova.aggregate_details_list(IsA(http.HttpRequest)).AndReturn([])
if not expected_error_message:
api.nova.aggregate_create(
IsA(http.HttpRequest),
name=workflow_data['name'],
availability_zone=workflow_data['availability_zone'],
).AndReturn(aggregate)
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.post(url, workflow_data)
if not expected_error_message:
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(
res, reverse(constants.AGGREGATES_INDEX_URL))
else:
self.assertFormErrors(res, error_count, expected_error_message)
def test_create_aggregate(self):
aggregate = self.aggregates.first()
workflow_data = self._get_create_workflow_data(aggregate)
self._test_generic_create_aggregate(workflow_data, aggregate)
def test_create_aggregate_fails_missing_fields(self):
aggregate = self.aggregates.first()
workflow_data = self._get_create_workflow_data(aggregate)
workflow_data['name'] = ''
workflow_data['availability_zone'] = ''
self._test_generic_create_aggregate(workflow_data, aggregate, 1,
u'This field is required')
@test.create_stubs({api.nova: ('host_list',
'aggregate_details_list',
'aggregate_create',
'add_host_to_aggregate'), })
def test_create_aggregate_with_hosts(self):
aggregate = self.aggregates.first()
hosts = self.hosts.list()
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
api.nova.aggregate_details_list(IsA(http.HttpRequest)).AndReturn([])
workflow_data = self._get_create_workflow_data(aggregate, hosts)
api.nova.aggregate_create(
IsA(http.HttpRequest),
name=workflow_data['name'],
availability_zone=workflow_data['availability_zone'],
).AndReturn(aggregate)
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
for host in compute_hosts:
api.nova.add_host_to_aggregate(
IsA(http.HttpRequest),
aggregate.id, host.host_name).InAnyOrder()
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('host_list', 'aggregate_details_list', ), })
def test_host_list_nova_compute(self):
hosts = self.hosts.list()
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.get(url)
workflow = res.context['workflow']
step = workflow.get_step("add_host_to_aggregate")
field_name = step.get_member_field_name('member')
self.assertEqual(len(step.action.fields[field_name].choices),
len(compute_hosts))
class AggregatesViewTests(test.BaseAdminViewTests):
@mock.patch('openstack_dashboard.api.nova.extension_supported',
mock.Mock(return_value=False))
@test.create_stubs({api.nova: ('aggregate_details_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.cinder: ('tenant_absolute_limits',),
api.neutron: ('is_extension_supported',),
api.network: ('tenant_floating_ip_list',
'security_group_list'),
api.keystone: ('tenant_list',)})
def test_panel_not_available(self):
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(self.limits['absolute'])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(self.cinder_limits['absolute'])
api.neutron.\
is_extension_supported(IsA(http.HttpRequest), 'security-group'). \
MultipleTimes().AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn(self.tenants.list())
self.mox.ReplayAll()
self.patchers['aggregates'].stop()
res = self.client.get(reverse('horizon:admin:overview:index'))
self.assertNotIn(b'Host Aggregates', res.content)
@test.create_stubs({api.nova: ('aggregate_details_list',
'availability_zone_list',)})
def test_index(self):
api.nova.aggregate_details_list(IsA(http.HttpRequest)) \
.AndReturn(self.aggregates.list())
api.nova.availability_zone_list(IsA(http.HttpRequest), detailed=True) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
res = self.client.get(reverse(constants.AGGREGATES_INDEX_URL))
self.assertTemplateUsed(res, constants.AGGREGATES_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['host_aggregates_table'].data,
self.aggregates.list())
self.assertItemsEqual(res.context['availability_zones_table'].data,
self.availability_zones.list())
@test.create_stubs({api.nova: ('aggregate_update', 'aggregate_get',), })
def _test_generic_update_aggregate(self, form_data, aggregate,
error_count=0,
expected_error_message=None):
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id))\
.AndReturn(aggregate)
if not expected_error_message:
az = form_data['availability_zone']
aggregate_data = {'name': form_data['name'],
'availability_zone': az}
api.nova.aggregate_update(IsA(http.HttpRequest), str(aggregate.id),
aggregate_data)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_UPDATE_URL,
args=[aggregate.id]),
form_data)
if not expected_error_message:
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(
res, reverse(constants.AGGREGATES_INDEX_URL))
else:
self.assertFormErrors(res, error_count, expected_error_message)
def test_update_aggregate(self):
aggregate = self.aggregates.first()
form_data = {'id': aggregate.id,
'name': 'my_new_name',
'availability_zone': 'my_new_zone'}
self._test_generic_update_aggregate(form_data, aggregate)
def test_update_aggregate_fails_missing_fields(self):
aggregate = self.aggregates.first()
form_data = {'id': aggregate.id}
self._test_generic_update_aggregate(form_data, aggregate, 1,
u'This field is required')
class ManageHostsTests(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('aggregate_get', 'host_list')})
def test_manage_hosts(self):
aggregate = self.aggregates.first()
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
self.mox.ReplayAll()
res = self.client.get(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,
constants.AGGREGATES_MANAGE_HOSTS_TEMPLATE)
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def test_manage_hosts_update_add_remove_not_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['host1', 'host2']
host = self.hosts.list()[0]
form_data = {'manageaggregatehostsaction_role_member':
[host.host_name]}
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host2').InAnyOrder()
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host1').InAnyOrder()
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.add_host_to_aggregate(IsA(http.HttpRequest),
str(aggregate.id), host.host_name)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def test_manage_hosts_update_add_not_empty_aggregate_should_fail(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['devstack001']
host1 = self.hosts.list()[0]
host3 = self.hosts.list()[2]
form_data = {'manageaggregatehostsaction_role_member':
[host1.host_name, host3.host_name]}
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.InAnyOrder().AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.InAnyOrder().AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.InAnyOrder().AndReturn(aggregate)
api.nova.add_host_to_aggregate(IsA(http.HttpRequest),
str(aggregate.id), host3.host_name) \
.InAnyOrder().AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def test_manage_hosts_update_clean_not_empty_aggregate_should_fail(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['host2']
form_data = {'manageaggregatehostsaction_role_member':
[]}
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host2')\
.AndRaise(self.exceptions.nova)
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def _test_manage_hosts_update(self,
host,
aggregate,
form_data,
addAggregate=False,
cleanAggregates=False):
if cleanAggregates:
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host3').InAnyOrder()
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host2').InAnyOrder()
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host1').InAnyOrder()
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
if addAggregate:
api.nova.add_host_to_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
host.host_name)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
def test_manage_hosts_update_nothing_not_empty_aggregate(self):
aggregate = self.aggregates.first()
host = self.hosts.list()[0]
aggregate.hosts = [host.host_name]
form_data = {'manageaggregatehostsaction_role_member':
[host.host_name]}
self._test_manage_hosts_update(host,
aggregate,
form_data,
addAggregate=False)
def test_manage_hosts_update_nothing_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = []
form_data = {'manageaggregatehostsaction_role_member':
[]}
self._test_manage_hosts_update(None,
aggregate,
form_data,
addAggregate=False)
def test_manage_hosts_update_add_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = []
host = self.hosts.list()[0]
form_data = {'manageaggregatehostsaction_role_member':
[host.host_name]}
self._test_manage_hosts_update(host,
aggregate,
form_data,
addAggregate=True)
def test_manage_hosts_update_add_not_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['devstack001']
host1 = self.hosts.list()[0]
host3 = self.hosts.list()[2]
form_data = {'manageaggregatehostsaction_role_member':
[host1.host_name, host3.host_name]}
self._test_manage_hosts_update(host3,
aggregate,
form_data,
addAggregate=True)
def test_manage_hosts_update_clean_not_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['host1', 'host2', 'host3']
form_data = {'manageaggregatehostsaction_role_member':
[]}
self._test_manage_hosts_update(None,
aggregate,
form_data,
addAggregate=False,
cleanAggregates=True)
| apache-2.0 |
esander91/zulip | zerver/lib/bulk_create.py | 121 | 5432 | from __future__ import absolute_import
from zerver.lib.initial_password import initial_password
from zerver.models import Realm, Stream, UserProfile, Huddle, \
Subscription, Recipient, Client, get_huddle_hash, resolve_email_to_domain
from zerver.lib.create_user import create_user_profile
def bulk_create_realms(realm_list):
existing_realms = set(r.domain for r in Realm.objects.select_related().all())
realms_to_create = []
for domain in realm_list:
if domain not in existing_realms:
realms_to_create.append(Realm(domain=domain, name=domain))
existing_realms.add(domain)
Realm.objects.bulk_create(realms_to_create)
def bulk_create_users(realms, users_raw, bot=False):
"""
Creates and saves a UserProfile with the given email.
Has some code based off of UserManage.create_user, but doesn't .save()
"""
users = []
existing_users = set(u.email for u in UserProfile.objects.all())
for (email, full_name, short_name, active) in users_raw:
if email in existing_users:
continue
users.append((email, full_name, short_name, active))
existing_users.add(email)
# Now create user_profiles
profiles_to_create = []
for (email, full_name, short_name, active) in users:
domain = resolve_email_to_domain(email)
profile = create_user_profile(realms[domain], email,
initial_password(email), active, bot,
full_name, short_name, None, False)
profiles_to_create.append(profile)
UserProfile.objects.bulk_create(profiles_to_create)
profiles_by_email = {}
profiles_by_id = {}
for profile in UserProfile.objects.select_related().all():
profiles_by_email[profile.email] = profile
profiles_by_id[profile.id] = profile
recipients_to_create = []
for (email, _, _, _) in users:
recipients_to_create.append(Recipient(type_id=profiles_by_email[email].id,
type=Recipient.PERSONAL))
Recipient.objects.bulk_create(recipients_to_create)
recipients_by_email = {}
for recipient in Recipient.objects.filter(type=Recipient.PERSONAL):
recipients_by_email[profiles_by_id[recipient.type_id].email] = recipient
subscriptions_to_create = []
for (email, _, _, _) in users:
subscriptions_to_create.append(
Subscription(user_profile_id=profiles_by_email[email].id,
recipient=recipients_by_email[email]))
Subscription.objects.bulk_create(subscriptions_to_create)
def bulk_create_streams(realms, stream_list):
existing_streams = set((stream.realm.domain, stream.name.lower())
for stream in Stream.objects.select_related().all())
streams_to_create = []
for (domain, name) in stream_list:
if (domain, name.lower()) not in existing_streams:
streams_to_create.append(Stream(realm=realms[domain], name=name))
Stream.objects.bulk_create(streams_to_create)
recipients_to_create = []
for stream in Stream.objects.select_related().all():
if (stream.realm.domain, stream.name.lower()) not in existing_streams:
recipients_to_create.append(Recipient(type_id=stream.id,
type=Recipient.STREAM))
Recipient.objects.bulk_create(recipients_to_create)
def bulk_create_clients(client_list):
existing_clients = set(client.name for client in Client.objects.select_related().all())
clients_to_create = []
for name in client_list:
if name not in existing_clients:
clients_to_create.append(Client(name=name))
existing_clients.add(name)
Client.objects.bulk_create(clients_to_create)
def bulk_create_huddles(users, huddle_user_list):
huddles = {}
huddles_by_id = {}
huddle_set = set()
existing_huddles = set()
for huddle in Huddle.objects.all():
existing_huddles.add(huddle.huddle_hash)
for huddle_users in huddle_user_list:
user_ids = [users[email].id for email in huddle_users]
huddle_hash = get_huddle_hash(user_ids)
if huddle_hash in existing_huddles:
continue
huddle_set.add((huddle_hash, tuple(sorted(user_ids))))
huddles_to_create = []
for (huddle_hash, _) in huddle_set:
huddles_to_create.append(Huddle(huddle_hash=huddle_hash))
Huddle.objects.bulk_create(huddles_to_create)
for huddle in Huddle.objects.all():
huddles[huddle.huddle_hash] = huddle
huddles_by_id[huddle.id] = huddle
recipients_to_create = []
for (huddle_hash, _) in huddle_set:
recipients_to_create.append(Recipient(type_id=huddles[huddle_hash].id, type=Recipient.HUDDLE))
Recipient.objects.bulk_create(recipients_to_create)
huddle_recipients = {}
for recipient in Recipient.objects.filter(type=Recipient.HUDDLE):
huddle_recipients[huddles_by_id[recipient.type_id].huddle_hash] = recipient
subscriptions_to_create = []
for (huddle_hash, huddle_user_ids) in huddle_set:
for user_id in huddle_user_ids:
subscriptions_to_create.append(Subscription(active=True, user_profile_id=user_id,
recipient=huddle_recipients[huddle_hash]))
Subscription.objects.bulk_create(subscriptions_to_create)
| apache-2.0 |
Liuftvafas/python-face-client | face_client/multipart.py | 2 | 5098 | '''
Classes for using multipart form data from Python, which does not (at the
time of writing) support this directly.
To use this, make an instance of Multipart and add parts to it via the factory
methods field and file. When you are done, get the content via the get method.
@author: Stacy Prowell (http://stacyprowell.com)
'''
import mimetypes
class Part(object):
'''
Class holding a single part of the form. You should never need to use
this class directly; instead, use the factory methods in Multipart:
field and file.
'''
# The boundary to use. This is shamelessly taken from the standard.
BOUNDARY = '----------AaB03x'
CRLF = '\r\n'
# Common headers.
CONTENT_TYPE = 'Content-Type'
CONTENT_DISPOSITION = 'Content-Disposition'
# The default content type for parts.
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
def __init__(self, name, filename, body, headers):
'''
Make a new part. The part will have the given headers added initially.
@param name: The part name.
@type name: str
@param filename: If this is a file, the name of the file. Otherwise
None.
@type filename: str
@param body: The body of the part.
@type body: str
@param headers: Additional headers, or overrides, for this part.
You can override Content-Type here.
@type headers: dict
'''
self._headers = headers.copy()
self._name = name
self._filename = filename
self._body = body
# We respect any content type passed in, but otherwise set it here.
# We set the content disposition now, overwriting any prior value.
if self._filename == None:
self._headers[Part.CONTENT_DISPOSITION] = \
('form-data; name="%s"' % self._name)
self._headers.setdefault(Part.CONTENT_TYPE,
Part.DEFAULT_CONTENT_TYPE)
else:
self._headers[Part.CONTENT_DISPOSITION] = \
('form-data; name="%s"; filename="%s"' %
(self._name, self._filename))
self._headers.setdefault(Part.CONTENT_TYPE,
mimetypes.guess_type(filename)[0]
or Part.DEFAULT_CONTENT_TYPE)
return
def get(self):
'''
Convert the part into a list of lines for output. This includes
the boundary lines, part header lines, and the part itself. A
blank line is included between the header and the body.
@return: Lines of this part.
@rtype: list
'''
lines = []
lines.append('--' + Part.BOUNDARY)
for (key, val) in self._headers.items():
lines.append(str('%s: %s' % (key, val)))
lines.append('')
lines.append(self._body)
return lines
class Multipart(object):
'''
Encapsulate multipart form data. To use this, make an instance and then
add parts to it via the two methods (field and file). When done, you can
get the result via the get method.
See http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.2 for
details on multipart/form-data.
Watch http://bugs.python.org/issue3244 to see if this is fixed in the
Python libraries.
@return: content type, body
@rtype: tuple
'''
def __init__(self):
self.parts = []
return
def field(self, name, value, headers={}):
'''
Create and append a field part. This kind of part has a field name
and value.
@param name: The field name.
@type name: str
@param value: The field value.
@type value: str
@param headers: Headers to set in addition to disposition.
@type headers: dict
'''
self.parts.append(Part(name, None, value, headers))
return
def file(self, name, filename, value, headers={}):
'''
Create and append a file part. THis kind of part has a field name,
a filename, and a value.
@param name: The field name.
@type name: str
@param value: The field value.
@type value: str
@param headers: Headers to set in addition to disposition.
@type headers: dict
'''
self.parts.append(Part(name, filename, value, headers))
return
def get(self):
'''
Get the multipart form data. This returns the content type, which
specifies the boundary marker, and also returns the body containing
all parts and bondary markers.
@return: content type, body
@rtype: tuple
'''
all = []
for part in self.parts:
all += part.get()
all.append('--' + Part.BOUNDARY + '--')
all.append('')
# We have to return the content type, since it specifies the boundary.
content_type = 'multipart/form-data; boundary=%s' % Part.BOUNDARY
return content_type, Part.CRLF.join(all)
| bsd-3-clause |
spiceqa/virt-test | v2v/get_started.py | 3 | 1593 | #!/usr/bin/python
"""
Program to help setup kvm test environment
:copyright: Red Hat 2010
"""
import os
import sys
import logging
import common
from virttest import data_dir, bootstrap
test_name = "v2v"
test_dir = os.path.dirname(sys.modules[__name__].__file__)
test_dir = os.path.abspath(test_dir)
base_dir = data_dir.get_data_dir()
default_userspace_paths = ["/usr/bin/virt-v2v"]
check_modules = None
online_docs_url = None
interactive = True
if __name__ == "__main__":
import optparse
option_parser = optparse.OptionParser()
option_parser.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Exhibit debug messages")
option_parser.add_option("-r", "--restore-image",
action="store_true", dest="restore",
help="Restore image from pristine image")
option_parser.add_option("--data-dir", action="store", dest="datadir",
help="Path to a data dir (that locates ISOS and images)")
options, args = option_parser.parse_args()
if options.datadir:
data_dir.set_backing_data_dir(options.datadir)
try:
bootstrap.bootstrap(test_name, test_dir, base_dir,
default_userspace_paths, check_modules,
online_docs_url, interactive=interactive,
restore_image=options.restore,
verbose=options.verbose)
except Exception, details:
logging.error("Setup error: %s", details)
| gpl-2.0 |
googleapis/python-asset | google/cloud/asset_v1/services/asset_service/transports/__init__.py | 5 | 1176 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import AssetServiceTransport
from .grpc import AssetServiceGrpcTransport
from .grpc_asyncio import AssetServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[AssetServiceTransport]]
_transport_registry["grpc"] = AssetServiceGrpcTransport
_transport_registry["grpc_asyncio"] = AssetServiceGrpcAsyncIOTransport
__all__ = (
"AssetServiceTransport",
"AssetServiceGrpcTransport",
"AssetServiceGrpcAsyncIOTransport",
)
| apache-2.0 |
lht142934/vnpy | vn.lts/vnltsl2/test/l2test.py | 47 | 7178 | # encoding: UTF-8
import sys
from time import sleep
from PyQt4 import QtGui
from vnltsl2 import *
#----------------------------------------------------------------------
def print_dict(d):
"""按照键值打印一个字典"""
for key,value in d.items():
print key + ':' + str(value)
#----------------------------------------------------------------------
def simple_log(func):
"""简单装饰器用于输出函数名"""
def wrapper(*args, **kw):
print ""
print str(func.__name__)
return func(*args, **kw)
return wrapper
########################################################################
class TestL2MdApi(L2MdApi):
"""测试用实例"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(TestL2MdApi, self).__init__()
self.brokerID = None
self.userID = None
#----------------------------------------------------------------------
@simple_log
def onFrontConnected(self):
"""服务器连接"""
pass
#----------------------------------------------------------------------
@simple_log
def onFrontDisconnected(self, n):
"""服务器断开"""
print n
#----------------------------------------------------------------------
@simple_log
def onHeartBeatWarning(self, n):
"""心跳报警"""
print n
#----------------------------------------------------------------------
@simple_log
def onRspError(self, error, n, last):
"""错误"""
print_dict(error)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
self.brokerID = data['BrokerID']
self.userID = data['UserID']
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspSubL2MarketData(self, data, error, n, last):
"""订阅L2合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubL2MarketData(self, data, error, n, last):
"""退订L2合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspSubL2Index(self, data, error, n, last):
"""订阅L2指数回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubL2Index(self, data, error, n, last):
"""退订L2指数回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRtnL2MarketData(self, data):
"""L2行情推送"""
print_dict(data)
#----------------------------------------------------------------------
@simple_log
def onRtnL2Index(self, data):
"""L2指数行情推送"""
print_dict(data)
#----------------------------------------------------------------------
@simple_log
def onRtnL2Order(self, data):
"""L2订单推送"""
print_dict(data)
#----------------------------------------------------------------------
@simple_log
def onRtnL2Trade(self, data):
"""L2成交推送"""
print_dict(data)
#----------------------------------------------------------------------
@simple_log
def onRspSubL2OrderAndTrade(self, error, n, last):
"""订阅L2订单、成交回报"""
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubL2OrderAndTrade(self, error, n, last):
"""退订L2订单、成交回报"""
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onNtfCheckOrderList(self, instrumentID, functionID):
"""通知清理SSE买卖一队列中数量为0的报单"""
print 'instrumentID: %s' % instrumentID
print 'functionID: %s' % functionID
#----------------------------------------------------------------------
def main():
"""主测试函数,出现堵塞时可以考虑使用sleep"""
reqid = 0
# 创建Qt应用对象,用于事件循环
app = QtGui.QApplication(sys.argv)
# 创建API对象
api = TestL2MdApi()
# 在C++环境中创建MdApi对象,传入参数是希望用来保存.con文件的地址
api.createFtdcL2MDUserApi('')
# 注册前置机地址
api.registerFront("tcp://101.231.210.2:8900") # 另一个地址tcp://222.66.55.171:8900
# 初始化api,连接前置机
api.init()
sleep(0.5)
# 登陆,测试通过
loginReq = {} # 创建一个空字典
loginReq['UserID'] = '' # 参数作为字典键值的方式传入
loginReq['Password'] = '' # 键名和C++中的结构体成员名对应
loginReq['BrokerID'] = '2011'
loginReq['DataLevel'] = '1' # '0'全量行情 '1'10档 '2'5档
reqid = reqid + 1 # 请求数必须保持唯一性
i = api.reqUserLogin(loginReq, 1)
sleep(0.5)
## 登出,测试失败
#reqid = reqid + 1
#logoutReq = {}
#logoutReq['UserID'] = api.userID
#loginReq['BrokerID'] = api.brokerID
#i = api.reqUserLogout(logoutReq, 1)
#sleep(0.5)
## 安全退出,测试通过
#i = api.exit()
# 获取交易日,测试通过
#day = api.getTradingDay()
#print 'Trading Day is:' + str(day)
#sleep(0.5)
# 订阅L2合约,测试通过
subReq = {}
subReq['InstrumentID'] = '510050'
subReq['ExchangeID'] = 'SSE'
i = api.subscribeL2MarketData(subReq)
## 退订L2合约,测试通过
#i = api.unSubscribeL2MarketData(subReq)
## 订阅L2指数,测试通过
#subReq = {}
#subReq['InstrumentID'] = '000300'
#subReq['ExchangeID'] = 'SSE'
#i = api.subscribeL2Index(subReq)
## 退订L2合约,测试通过
#i = api.unSubscribeL2Index(subReq)
# 订阅L2报单和成交,测试提示无此权限
i = api.subscribeL2OrderAndTrade()
# 退订L2报单和成交,测试通过
i = api.unSubscribeL2OrderAndTrade()
# 连续运行,用于输出行情
app.exec_()
if __name__ == '__main__':
main()
| mit |
jeffery9/mixprint_addons | base_gengo/res_company.py | 34 | 1601 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.Model):
_name = "res.company"
_inherit = "res.company"
_columns = {
"gengo_private_key": fields.text("Gengo Private Key"),
"gengo_public_key": fields.text("Gengo Public Key"),
"gengo_comment": fields.text("Comments", help="This comment will be automatically be enclosed in each an every request sent to Gengo"),
"gengo_auto_approve": fields.boolean("Auto Approve Translation ?", help="Jobs are Automatically Approved by Gengo."),
}
_defaults = {
"gengo_auto_approve": True,
}
| agpl-3.0 |
vrutkovs/atomic-reactor | atomic_reactor/source.py | 2 | 4737 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Code for getting source code to put inside container.
"""
import logging
import copy
import os
import shutil
import tempfile
import collections
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from atomic_reactor import util
logger = logging.getLogger(__name__)
# Intended for use as vcs-type, vcs-url and vcs-ref docker labels as defined
# in https://github.com/projectatomic/ContainerApplicationGenericLabels
VcsInfo = collections.namedtuple('VcsInfo', ['vcs_type', 'vcs_url', 'vcs_ref'])
class Source(object):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
self.provider = provider
self.uri = uri
self.dockerfile_path = dockerfile_path
self.provider_params = provider_params or {}
# TODO: do we want to delete tmpdir when destroying the object?
self.tmpdir = tmpdir or tempfile.mkdtemp()
logger.debug("workdir is %r", self.tmpdir)
parsed_uri = urlparse(uri)
git_reponame = os.path.basename(parsed_uri.path)
if git_reponame.endswith('.git'):
git_reponame = git_reponame[:-4]
self.source_path = os.path.join(self.tmpdir, git_reponame)
logger.debug("source path is %r", self.source_path)
@property
def path(self):
return self.get()
@property
def workdir(self):
return self.tmpdir
def get(self):
"""Run this to get source and save it to `tmpdir` or a newly created tmpdir."""
raise NotImplementedError('Must override in subclasses!')
def get_build_file_path(self):
# TODO: will we need figure_out_build_file as a separate method?
return util.figure_out_build_file(self.path, self.dockerfile_path)
def remove_tmpdir(self):
shutil.rmtree(self.tmpdir)
def get_vcs_info(self):
"""Returns VcsInfo namedtuple or None if not applicable."""
return None
class GitSource(Source):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
super(GitSource, self).__init__(provider, uri, dockerfile_path,
provider_params, tmpdir)
self.git_commit = self.provider_params.get('git_commit', None)
self.lg = util.LazyGit(self.uri, self.git_commit, self.source_path)
@property
def commit_id(self):
return self.lg.commit_id
def get(self):
return self.lg.git_path
def get_vcs_info(self):
return VcsInfo(
vcs_type='git',
vcs_url=self.lg.git_url,
vcs_ref=self.lg.commit_id
)
class PathSource(Source):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
super(PathSource, self).__init__(provider, uri, dockerfile_path,
provider_params, tmpdir)
# make sure we have canonical URI representation even if we got path without "file://"
if not self.uri.startswith('file://'):
self.uri = 'file://' + self.uri
self.schemeless_path = self.uri[len('file://'):]
os.makedirs(self.source_path)
def get(self):
# work around the weird behaviour of copytree, which requires the top dir
# to *not* exist
for f in os.listdir(self.schemeless_path):
old = os.path.join(self.schemeless_path, f)
new = os.path.join(self.source_path, f)
if os.path.exists(new):
# this is the second invocation of this method; just break the loop
break
else:
if os.path.isdir(old):
shutil.copytree(old, new)
else:
shutil.copy2(old, new)
return self.source_path
def get_source_instance_for(source, tmpdir=None):
validate_source_dict_schema(source)
klass = None
provider = source['provider'].lower()
if provider == 'git':
klass = GitSource
elif provider == 'path':
klass = PathSource
else:
raise ValueError('unknown source provider "{0}"'.format(provider))
# don't modify original source
args = copy.deepcopy(source)
args['tmpdir'] = tmpdir
return klass(**args)
def validate_source_dict_schema(sd):
if not isinstance(sd, dict):
raise ValueError('"source" must be a dict')
for k in ['provider', 'uri']:
if k not in sd:
raise ValueError('"source" must contain "{0}" key'.format(k))
| bsd-3-clause |
rosmo/ansible | lib/ansible/module_utils/facts/network/iscsi.py | 25 | 4647 | # iSCSI initiator related facts collection for Ansible.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import subprocess
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.network.base import NetworkCollector
class IscsiInitiatorNetworkCollector(NetworkCollector):
name = 'iscsi'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
"""
Example of contents of /etc/iscsi/initiatorname.iscsi:
## DO NOT EDIT OR REMOVE THIS FILE!
## If you remove this file, the iSCSI daemon will not start.
## If you change the InitiatorName, existing access control lists
## may reject this initiator. The InitiatorName must be unique
## for each iSCSI initiator. Do NOT duplicate iSCSI InitiatorNames.
InitiatorName=iqn.1993-08.org.debian:01:44a42c8ddb8b
Example of output from the AIX lsattr command:
# lsattr -E -l iscsi0
disc_filename /etc/iscsi/targets Configuration file False
disc_policy file Discovery Policy True
initiator_name iqn.localhost.hostid.7f000002 iSCSI Initiator Name True
isns_srvnames auto iSNS Servers IP Addresses True
isns_srvports iSNS Servers Port Numbers True
max_targets 16 Maximum Targets Allowed True
num_cmd_elems 200 Maximum number of commands to queue to driver True
Example of output from the HP-UX iscsiutil command:
#iscsiutil -l
Initiator Name : iqn.1986-03.com.hp:mcel_VMhost3.1f355cf6-e2db-11e0-a999-b44c0aef5537
Initiator Alias :
Authentication Method : None
CHAP Method : CHAP_UNI
Initiator CHAP Name :
CHAP Secret :
NAS Hostname :
NAS Secret :
Radius Server Hostname :
Header Digest : None, CRC32C (default)
Data Digest : None, CRC32C (default)
SLP Scope list for iSLPD :
"""
iscsi_facts = {}
iscsi_facts['iscsi_iqn'] = ""
if sys.platform.startswith('linux') or sys.platform.startswith('sunos'):
for line in get_file_content('/etc/iscsi/initiatorname.iscsi', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
if line.startswith('InitiatorName='):
iscsi_facts['iscsi_iqn'] = line.split('=', 1)[1]
break
elif sys.platform.startswith('aix'):
cmd = get_bin_path('lsattr')
if cmd:
cmd += " -E -l iscsi0"
rc, out, err = module.run_command(cmd)
if rc == 0 and out:
line = self.findstr(out, 'initiator_name')
iscsi_facts['iscsi_iqn'] = line.split()[1].rstrip()
elif sys.platform.startswith('hp-ux'):
# try to find it in the default PATH and opt_dirs
cmd = get_bin_path('iscsiutil', opt_dirs=['/opt/iscsi/bin'])
if cmd:
cmd += " -l"
rc, out, err = module.run_command(cmd)
if out:
line = self.findstr(out, 'Initiator Name')
iscsi_facts['iscsi_iqn'] = line.split(":", 1)[1].rstrip()
return iscsi_facts
def findstr(self, text, match):
for line in text.splitlines():
if match in line:
found = line
return found
| gpl-3.0 |
microelly2/cadquery-freecad-module | CadQuery/Libs/docutils/parsers/rst/directives/images.py | 100 | 6882 | # $Id: images.py 7753 2014-06-24 14:52:59Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Directives for figures and simple images.
"""
__docformat__ = 'reStructuredText'
import sys
import urllib
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives, states
from docutils.nodes import fully_normalize_name, whitespace_normalize_name
from docutils.parsers.rst.roles import set_classes
try: # check for the Python Imaging Library
import PIL.Image
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
class Image(Directive):
align_h_values = ('left', 'center', 'right')
align_v_values = ('top', 'middle', 'bottom')
align_values = align_v_values + align_h_values
def align(argument):
# This is not callable as self.align. We cannot make it a
# staticmethod because we're saving an unbound method in
# option_spec below.
return directives.choice(argument, Image.align_values)
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.percentage,
'align': align,
'name': directives.unchanged,
'target': directives.unchanged_required,
'class': directives.class_option}
def run(self):
if 'align' in self.options:
if isinstance(self.state, states.SubstitutionDef):
# Check for align_v_values.
if self.options['align'] not in self.align_v_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value '
'for the "align" option within a substitution '
'definition. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_v_values)))
elif self.options['align'] not in self.align_h_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value for '
'the "align" option. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_h_values)))
messages = []
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
reference_node = None
if 'target' in self.options:
block = states.escape2null(
self.options['target']).splitlines()
block = [line for line in block]
target_type, data = self.state.parse_target(
block, self.block_text, self.lineno)
if target_type == 'refuri':
reference_node = nodes.reference(refuri=data)
elif target_type == 'refname':
reference_node = nodes.reference(
refname=fully_normalize_name(data),
name=whitespace_normalize_name(data))
reference_node.indirect_reference_name = data
self.state.document.note_refname(reference_node)
else: # malformed target
messages.append(data) # data is a system message
del self.options['target']
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
if reference_node:
reference_node += image_node
return messages + [reference_node]
else:
return messages + [image_node]
class Figure(Image):
def align(argument):
return directives.choice(argument, Figure.align_h_values)
def figwidth_value(argument):
if argument.lower() == 'image':
return 'image'
else:
return directives.length_or_percentage_or_unitless(argument, 'px')
option_spec = Image.option_spec.copy()
option_spec['figwidth'] = figwidth_value
option_spec['figclass'] = directives.class_option
option_spec['align'] = align
has_content = True
def run(self):
figwidth = self.options.pop('figwidth', None)
figclasses = self.options.pop('figclass', None)
align = self.options.pop('align', None)
(image_node,) = Image.run(self)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if figwidth == 'image':
if PIL and self.state.document.settings.file_insertion_enabled:
imagepath = urllib.url2pathname(image_node['uri'])
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (IOError, UnicodeEncodeError):
pass # TODO: warn?
else:
self.state.document.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
figure_node['width'] = '%dpx' % img.size[0]
del img
elif figwidth is not None:
figure_node['width'] = figwidth
if figclasses:
figure_node['classes'] += figclasses
if align:
figure_node['align'] = align
if self.content:
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
first_node = node[0]
if isinstance(first_node, nodes.paragraph):
caption = nodes.caption(first_node.rawsource, '',
*first_node.children)
caption.source = first_node.source
caption.line = first_node.line
figure_node += caption
elif not (isinstance(first_node, nodes.comment)
and len(first_node) == 0):
error = self.state_machine.reporter.error(
'Figure caption must be a paragraph or empty comment.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [figure_node, error]
if len(node) > 1:
figure_node += nodes.legend('', *node[1:])
return [figure_node]
| lgpl-3.0 |
kubeflow/kfp-tekton | sdk/python/tests/compiler/testdata/retry.py | 1 | 1496 | # Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import dsl
def random_failure_op(exit_codes):
"""A component that fails randomly."""
return dsl.ContainerOp(
name='random_failure',
image='python:alpine3.6',
command=['python', '-c'],
arguments=['import random; import sys; '
'exit_code = random.choice([int(i) for i in sys.argv[1].split(",")]); '
'print(exit_code); sys.exit(exit_code)', exit_codes]
)
@dsl.pipeline(
name='Retry random failures',
description='The pipeline includes two steps which fail randomly. It shows how to use ContainerOp(...).set_retry(...).'
)
def retry_sample_pipeline():
op1 = random_failure_op('0,1,2,3').set_retry(10)
op2 = random_failure_op('0,1').set_retry(5)
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(retry_sample_pipeline, __file__.replace('.py', '.yaml'))
| apache-2.0 |
nirvn/QGIS | python/plugins/processing/algs/grass7/ext/v_extrude.py | 9 | 1546 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_extrude.py
------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def checkParameterValuesBeforeExecuting(alg, parameters, context):
""" Verify if we have the right parameters """
height = alg.parameterAsDouble(parameters, 'height', context)
height_column = alg.parameterAsString(parameters, 'height_column', context)
if (height and height_column) or (not height and not height_column):
return alg.tr("You need to set either a fixed height value or the height column!")
return None
| gpl-2.0 |
fzr72725/ThinkStats2 | code/linear.py | 68 | 7935 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import numpy as np
import first
import thinkplot
import thinkstats2
def Summarize(estimates, actual=None):
"""Prints standard error and 90% confidence interval.
estimates: sequence of estimates
actual: float actual value
"""
mean = thinkstats2.Mean(estimates)
stderr = thinkstats2.Std(estimates, mu=actual)
cdf = thinkstats2.Cdf(estimates)
ci = cdf.ConfidenceInterval(90)
print('mean, SE, CI', mean, stderr, ci)
def SamplingDistributions(live, iters=101):
"""Estimates sampling distributions by resampling rows.
live: DataFrame
iters: number of times to run simulations
returns: pair of sequences (inters, slopes)
"""
t = []
for _ in range(iters):
sample = thinkstats2.ResampleRows(live)
ages = sample.agepreg
weights = sample.totalwgt_lb
estimates = thinkstats2.LeastSquares(ages, weights)
t.append(estimates)
inters, slopes = zip(*t)
return inters, slopes
def PlotConfidenceIntervals(xs, inters, slopes,
res=None, percent=90, **options):
"""Plots the 90% confidence intervals for weights based on ages.
xs: sequence
inters: estimated intercepts
slopes: estimated slopes
res: residuals
percent: what percentile range to show
"""
fys_seq = []
for inter, slope in zip(inters, slopes):
fxs, fys = thinkstats2.FitLine(xs, inter, slope)
if res is not None:
fys += np.random.permutation(res)
fys_seq.append(fys)
p = (100 - percent) / 2
percents = p, 100 - p
low, high = thinkstats2.PercentileRows(fys_seq, percents)
thinkplot.FillBetween(fxs, low, high, **options)
def PlotSamplingDistributions(live):
"""Plots confidence intervals for the fitted curve and sampling dists.
live: DataFrame
"""
ages = live.agepreg
weights = live.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
print('rho', thinkstats2.Corr(ages, weights))
print('R2', r2)
print('R', math.sqrt(r2))
print('Std(ys)', thinkstats2.Std(weights))
print('Std(res)', thinkstats2.Std(res))
# plot the confidence intervals
inters, slopes = SamplingDistributions(live, iters=1001)
PlotConfidenceIntervals(ages, inters, slopes, percent=90,
alpha=0.3, label='90% CI')
thinkplot.Text(42, 7.53, '90%')
PlotConfidenceIntervals(ages, inters, slopes, percent=50,
alpha=0.5, label='50% CI')
thinkplot.Text(42, 7.59, '50%')
thinkplot.Save(root='linear3',
xlabel='age (years)',
ylabel='birth weight (lbs)',
legend=False)
# plot the confidence intervals
thinkplot.PrePlot(2)
thinkplot.Scatter(ages, weights, color='gray', alpha=0.1)
PlotConfidenceIntervals(ages, inters, slopes, res=res, alpha=0.2)
PlotConfidenceIntervals(ages, inters, slopes)
thinkplot.Save(root='linear5',
xlabel='age (years)',
ylabel='birth weight (lbs)',
title='90% CI',
axis=[10, 45, 0, 15],
legend=False)
# plot the sampling distribution of slope under null hypothesis
# and alternate hypothesis
sampling_cdf = thinkstats2.Cdf(slopes)
print('p-value, sampling distribution', sampling_cdf[0])
ht = SlopeTest((ages, weights))
pvalue = ht.PValue()
print('p-value, slope test', pvalue)
print('inter', inter, thinkstats2.Mean(inters))
Summarize(inters, inter)
print('slope', slope, thinkstats2.Mean(slopes))
Summarize(slopes, slope)
thinkplot.PrePlot(2)
thinkplot.Plot([0, 0], [0, 1], color='0.8')
ht.PlotCdf(label='null hypothesis')
thinkplot.Cdf(sampling_cdf, label='sampling distribution')
thinkplot.Save(root='linear4',
xlabel='slope (lbs / year)',
ylabel='CDF',
xlim=[-0.03, 0.03],
loc='upper left')
def PlotFit(live):
"""Plots a scatter plot and fitted curve.
live: DataFrame
"""
ages = live.agepreg
weights = live.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
fit_xs, fit_ys = thinkstats2.FitLine(ages, inter, slope)
thinkplot.Scatter(ages, weights, color='gray', alpha=0.1)
thinkplot.Plot(fit_xs, fit_ys, color='white', linewidth=3)
thinkplot.Plot(fit_xs, fit_ys, color='blue', linewidth=2)
thinkplot.Save(root='linear1',
xlabel='age (years)',
ylabel='birth weight (lbs)',
axis=[10, 45, 0, 15],
legend=False)
def PlotResiduals(live):
"""Plots percentiles of the residuals.
live: DataFrame
"""
ages = live.agepreg
weights = live.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
live['residual'] = thinkstats2.Residuals(ages, weights, inter, slope)
bins = np.arange(10, 48, 3)
indices = np.digitize(live.agepreg, bins)
groups = live.groupby(indices)
ages = [group.agepreg.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.residual) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(ages, weights, label=label)
thinkplot.Save(root='linear2',
xlabel='age (years)',
ylabel='residual (lbs)',
xlim=[10, 45])
class SlopeTest(thinkstats2.HypothesisTest):
"""Tests the slope of a linear least squares fit. """
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
ages, weights = data
_, slope = thinkstats2.LeastSquares(ages, weights)
return slope
def MakeModel(self):
"""Builds a model of the null hypothesis.
"""
_, weights = self.data
self.ybar = weights.mean()
self.res = weights - self.ybar
def RunModel(self):
"""Runs the model of the null hypothesis.
returns: simulated data
"""
ages, _ = self.data
weights = self.ybar + np.random.permutation(self.res)
return ages, weights
def ResampleRowsWeighted(df, attr='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to finalwgt.
df: DataFrame
attr: string column name to use as weights
returns: DataFrame
"""
weights = df[attr]
cdf = thinkstats2.Pmf(weights).MakeCdf()
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def EstimateBirthWeight(live, iters=1001):
"""Estimate mean birth weight by resampling, with and without weights.
live: DataFrame
iters: number of experiments to run
"""
mean = live.totalwgt_lb.mean()
print('mean', mean)
estimates = [thinkstats2.ResampleRows(live).totalwgt_lb.mean()
for _ in range(iters)]
Summarize(estimates)
estimates = [ResampleRowsWeighted(live).totalwgt_lb.mean()
for _ in range(iters)]
Summarize(estimates)
def main():
thinkstats2.RandomSeed(17)
live, _, _ = first.MakeFrames()
EstimateBirthWeight(live)
live = live.dropna(subset=['agepreg', 'totalwgt_lb'])
PlotSamplingDistributions(live)
PlotFit(live)
PlotResiduals(live)
if __name__ == '__main__':
main()
| gpl-3.0 |
marty331/jakesclock | flask/lib/python2.7/site-packages/sqlalchemy/orm/state.py | 35 | 23169 | # orm/state.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation of instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import weakref
from .. import util
from . import exc as orm_exc, interfaces
from .path_registry import PathRegistry
from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \
NO_VALUE, PASSIVE_NO_INITIALIZE, INIT_OK, PASSIVE_OFF
from . import base
class InstanceState(interfaces.InspectionAttr):
"""tracks state information at the instance level.
The :class:`.InstanceState` is a key object used by the
SQLAlchemy ORM in order to track the state of an object;
it is created the moment an object is instantiated, typically
as a result of :term:`instrumentation` which SQLAlchemy applies
to the ``__init__()`` method of the class.
:class:`.InstanceState` is also a semi-public object,
available for runtime inspection as to the state of a
mapped instance, including information such as its current
status within a particular :class:`.Session` and details
about data on individual attributes. The public API
in order to acquire a :class:`.InstanceState` object
is to use the :func:`.inspect` system::
>>> from sqlalchemy import inspect
>>> insp = inspect(some_mapped_object)
.. seealso::
:ref:`core_inspection_toplevel`
"""
session_id = None
key = None
runid = None
load_options = util.EMPTY_SET
load_path = ()
insert_order = None
_strong_obj = None
modified = False
expired = False
deleted = False
_load_pending = False
is_instance = True
callables = ()
"""A namespace where a per-state loader callable can be associated.
In SQLAlchemy 1.0, this is only used for lazy loaders / deferred
loaders that were set up via query option.
Previously, callables was used also to indicate expired attributes
by storing a link to the InstanceState itself in this dictionary.
This role is now handled by the expired_attributes set.
"""
def __init__(self, obj, manager):
self.class_ = obj.__class__
self.manager = manager
self.obj = weakref.ref(obj, self._cleanup)
self.committed_state = {}
self.expired_attributes = set()
expired_attributes = None
"""The set of keys which are 'expired' to be loaded by
the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs."""
@util.memoized_property
def attrs(self):
"""Return a namespace representing each attribute on
the mapped object, including its current value
and history.
The returned object is an instance of :class:`.AttributeState`.
This object allows inspection of the current data
within an attribute as well as attribute history
since the last flush.
"""
return util.ImmutableProperties(
dict(
(key, AttributeState(self, key))
for key in self.manager
)
)
@property
def transient(self):
"""Return true if the object is :term:`transient`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and \
not self._attached
@property
def pending(self):
"""Return true if the object is :term:`pending`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and \
self._attached
@property
def persistent(self):
"""Return true if the object is :term:`persistent`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and \
self._attached
@property
def detached(self):
"""Return true if the object is :term:`detached`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and \
not self._attached
@property
@util.dependencies("sqlalchemy.orm.session")
def _attached(self, sessionlib):
return self.session_id is not None and \
self.session_id in sessionlib._sessions
@property
@util.dependencies("sqlalchemy.orm.session")
def session(self, sessionlib):
"""Return the owning :class:`.Session` for this instance,
or ``None`` if none available.
Note that the result here can in some cases be *different*
from that of ``obj in session``; an object that's been deleted
will report as not ``in session``, however if the transaction is
still in progress, this attribute will still refer to that session.
Only when the transaction is completed does the object become
fully detached under normal circumstances.
"""
return sessionlib._state_session(self)
@property
def object(self):
"""Return the mapped object represented by this
:class:`.InstanceState`."""
return self.obj()
@property
def identity(self):
"""Return the mapped identity of the mapped object.
This is the primary key identity as persisted by the ORM
which can always be passed directly to
:meth:`.Query.get`.
Returns ``None`` if the object has no primary key identity.
.. note::
An object which is transient or pending
does **not** have a mapped identity until it is flushed,
even if its attributes include primary key values.
"""
if self.key is None:
return None
else:
return self.key[1]
@property
def identity_key(self):
"""Return the identity key for the mapped object.
This is the key used to locate the object within
the :attr:`.Session.identity_map` mapping. It contains
the identity as returned by :attr:`.identity` within it.
"""
# TODO: just change .key to .identity_key across
# the board ? probably
return self.key
@util.memoized_property
def parents(self):
return {}
@util.memoized_property
def _pending_mutations(self):
return {}
@util.memoized_property
def mapper(self):
"""Return the :class:`.Mapper` used for this mapepd object."""
return self.manager.mapper
@property
def has_identity(self):
"""Return ``True`` if this object has an identity key.
This should always have the same value as the
expression ``state.persistent or state.detached``.
"""
return bool(self.key)
def _detach(self):
self.session_id = self._strong_obj = None
def _dispose(self):
self._detach()
del self.obj
def _cleanup(self, ref):
"""Weakref callback cleanup.
This callable cleans out the state when it is being garbage
collected.
this _cleanup **assumes** that there are no strong refs to us!
Will not work otherwise!
"""
instance_dict = self._instance_dict()
if instance_dict is not None:
instance_dict._fast_discard(self)
del self._instance_dict
# we can't possibly be in instance_dict._modified
# b.c. this is weakref cleanup only, that set
# is strong referencing!
# assert self not in instance_dict._modified
self.session_id = self._strong_obj = None
del self.obj
def obj(self):
return None
@property
def dict(self):
"""Return the instance dict used by the object.
Under normal circumstances, this is always synonymous
with the ``__dict__`` attribute of the mapped object,
unless an alternative instrumentation system has been
configured.
In the case that the actual object has been garbage
collected, this accessor returns a blank dictionary.
"""
o = self.obj()
if o is not None:
return base.instance_dict(o)
else:
return {}
def _initialize_instance(*mixed, **kwargs):
self, instance, args = mixed[0], mixed[1], mixed[2:]
manager = self.manager
manager.dispatch.init(self, args, kwargs)
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
with util.safe_reraise():
manager.dispatch.init_failure(self, args, kwargs)
def get_history(self, key, passive):
return self.manager[key].impl.get_history(self, self.dict, passive)
def get_impl(self, key):
return self.manager[key].impl
def _get_pending_mutation(self, key):
if key not in self._pending_mutations:
self._pending_mutations[key] = PendingCollection()
return self._pending_mutations[key]
def __getstate__(self):
state_dict = {'instance': self.obj()}
state_dict.update(
(k, self.__dict__[k]) for k in (
'committed_state', '_pending_mutations', 'modified',
'expired', 'callables', 'key', 'parents', 'load_options',
'class_', 'expired_attributes'
) if k in self.__dict__
)
if self.load_path:
state_dict['load_path'] = self.load_path.serialize()
state_dict['manager'] = self.manager._serialize(self, state_dict)
return state_dict
def __setstate__(self, state_dict):
inst = state_dict['instance']
if inst is not None:
self.obj = weakref.ref(inst, self._cleanup)
self.class_ = inst.__class__
else:
# None being possible here generally new as of 0.7.4
# due to storage of state in "parents". "class_"
# also new.
self.obj = None
self.class_ = state_dict['class_']
self.committed_state = state_dict.get('committed_state', {})
self._pending_mutations = state_dict.get('_pending_mutations', {})
self.parents = state_dict.get('parents', {})
self.modified = state_dict.get('modified', False)
self.expired = state_dict.get('expired', False)
if 'callables' in state_dict:
self.callables = state_dict['callables']
try:
self.expired_attributes = state_dict['expired_attributes']
except KeyError:
self.expired_attributes = set()
# 0.9 and earlier compat
for k in list(self.callables):
if self.callables[k] is self:
self.expired_attributes.add(k)
del self.callables[k]
self.__dict__.update([
(k, state_dict[k]) for k in (
'key', 'load_options',
) if k in state_dict
])
if 'load_path' in state_dict:
self.load_path = PathRegistry.\
deserialize(state_dict['load_path'])
state_dict['manager'](self, inst, state_dict)
def _initialize(self, key):
"""Set this attribute to an empty value or collection,
based on the AttributeImpl in use."""
self.manager.get_impl(key).initialize(self, self.dict)
def _reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
old = dict_.pop(key, None)
if old is not None and self.manager[key].impl.collection:
self.manager[key].impl._invalidate_collection(old)
self.expired_attributes.discard(key)
if self.callables:
self.callables.pop(key, None)
@classmethod
def _instance_level_callable_processor(cls, manager, fn, key):
impl = manager[key].impl
if impl.collection:
def _set_callable(state, dict_, row):
if 'callables' not in state.__dict__:
state.callables = {}
old = dict_.pop(key, None)
if old is not None:
impl._invalidate_collection(old)
state.callables[key] = fn
else:
def _set_callable(state, dict_, row):
if 'callables' not in state.__dict__:
state.callables = {}
state.callables[key] = fn
return _set_callable
def _expire(self, dict_, modified_set):
self.expired = True
if self.modified:
modified_set.discard(self)
self.committed_state.clear()
self.modified = False
self._strong_obj = None
if '_pending_mutations' in self.__dict__:
del self.__dict__['_pending_mutations']
if 'parents' in self.__dict__:
del self.__dict__['parents']
self.expired_attributes.update(
[impl.key for impl in self.manager._scalar_loader_impls
if impl.expire_missing or impl.key in dict_]
)
if self.callables:
for k in self.expired_attributes.intersection(self.callables):
del self.callables[k]
for k in self.manager._collection_impl_keys.intersection(dict_):
collection = dict_.pop(k)
collection._sa_adapter.invalidated = True
for key in self.manager._all_key_set.intersection(dict_):
del dict_[key]
self.manager.dispatch.expire(self, None)
def _expire_attributes(self, dict_, attribute_names):
pending = self.__dict__.get('_pending_mutations', None)
callables = self.callables
for key in attribute_names:
impl = self.manager[key].impl
if impl.accepts_scalar_loader:
self.expired_attributes.add(key)
if callables and key in callables:
del callables[key]
old = dict_.pop(key, None)
if impl.collection and old is not None:
impl._invalidate_collection(old)
self.committed_state.pop(key, None)
if pending:
pending.pop(key, None)
self.manager.dispatch.expire(self, attribute_names)
def _load_expired(self, state, passive):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable (picklable).
"""
if not passive & SQL_OK:
return PASSIVE_NO_RESULT
toload = self.expired_attributes.\
intersection(self.unmodified)
self.manager.deferred_scalar_loader(self, toload)
# if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
self.expired_attributes.clear()
return ATTR_WAS_SET
@property
def unmodified(self):
"""Return the set of keys which have no uncommitted changes"""
return set(self.manager).difference(self.committed_state)
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
return set(keys).intersection(self.manager).\
difference(self.committed_state)
@property
def unloaded(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return set(self.manager).\
difference(self.committed_state).\
difference(self.dict)
@property
def _unloaded_non_object(self):
return self.unloaded.intersection(
attr for attr in self.manager
if self.manager[attr].impl.accepts_scalar_loader
)
def _instance_dict(self):
return None
def _modified_event(
self, dict_, attr, previous, collection=False, force=False):
if not attr.send_modified_events:
return
if attr.key not in self.committed_state or force:
if collection:
if previous is NEVER_SET:
if attr.key in dict_:
previous = dict_[attr.key]
if previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
self.committed_state[attr.key] = previous
# assert self._strong_obj is None or self.modified
if (self.session_id and self._strong_obj is None) \
or not self.modified:
self.modified = True
instance_dict = self._instance_dict()
if instance_dict:
instance_dict._modified.add(self)
# only create _strong_obj link if attached
# to a session
inst = self.obj()
if self.session_id:
self._strong_obj = inst
if inst is None:
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
"collected."
% (
self.manager[attr.key],
base.state_class_str(self)
))
def _commit(self, dict_, keys):
"""Commit attributes.
This is used by a partial-attribute load operation to mark committed
those attributes which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after
this step if a value was not populated in state.dict.
"""
for key in keys:
self.committed_state.pop(key, None)
self.expired = False
self.expired_attributes.difference_update(
set(keys).intersection(dict_))
# the per-keys commit removes object-level callables,
# while that of commit_all does not. it's not clear
# if this behavior has a clear rationale, however tests do
# ensure this is what it does.
if self.callables:
for key in set(self.callables).\
intersection(keys).\
intersection(dict_):
del self.callables[key]
def _commit_all(self, dict_, instance_dict=None):
"""commit all attributes unconditionally.
This is used after a flush() or a full load/refresh
to remove all pending state from the instance.
- all attributes are marked as "committed"
- the "strong dirty reference" is removed
- the "modified" flag is set to False
- any "expired" markers for scalar attributes loaded are removed.
- lazy load callables for objects / collections *stay*
Attributes marked as "expired" can potentially remain
"expired" after this step if a value was not populated in state.dict.
"""
self._commit_all_states([(self, dict_)], instance_dict)
@classmethod
def _commit_all_states(self, iter, instance_dict=None):
"""Mass / highly inlined version of commit_all()."""
for state, dict_ in iter:
state_dict = state.__dict__
state.committed_state.clear()
if '_pending_mutations' in state_dict:
del state_dict['_pending_mutations']
state.expired_attributes.difference_update(dict_)
if instance_dict and state.modified:
instance_dict._modified.discard(state)
state.modified = state.expired = False
state._strong_obj = None
class AttributeState(object):
"""Provide an inspection interface corresponding
to a particular attribute on a particular mapped object.
The :class:`.AttributeState` object is accessed
via the :attr:`.InstanceState.attrs` collection
of a particular :class:`.InstanceState`::
from sqlalchemy import inspect
insp = inspect(some_mapped_object)
attr_state = insp.attrs.some_attribute
"""
def __init__(self, state, key):
self.state = state
self.key = key
@property
def loaded_value(self):
"""The current value of this attribute as loaded from the database.
If the value has not been loaded, or is otherwise not present
in the object's dictionary, returns NO_VALUE.
"""
return self.state.dict.get(self.key, NO_VALUE)
@property
def value(self):
"""Return the value of this attribute.
This operation is equivalent to accessing the object's
attribute directly or via ``getattr()``, and will fire
off any pending loader callables if needed.
"""
return self.state.manager[self.key].__get__(
self.state.obj(), self.state.class_)
@property
def history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method will **not** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
:func:`.attributes.get_history` - underlying function
"""
return self.state.get_history(self.key,
PASSIVE_NO_INITIALIZE)
def load_history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method **will** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:attr:`.AttributeState.history`
:func:`.attributes.get_history` - underlying function
.. versionadded:: 0.9.0
"""
return self.state.get_history(self.key,
PASSIVE_OFF ^ INIT_OK)
class PendingCollection(object):
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| gpl-2.0 |
froyobin/horizon | openstack_dashboard/dashboards/admin/images/forms.py | 52 | 2166 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images.images \
import forms as images_forms
class AdminCreateImageForm(images_forms.CreateImageForm):
pass
class AdminUpdateImageForm(images_forms.UpdateImageForm):
pass
class UpdateMetadataForm(forms.SelfHandlingForm):
def handle(self, request, data):
id = self.initial['id']
old_metadata = self.initial['metadata']
try:
new_metadata = json.loads(self.data['metadata'])
metadata = dict(
(item['key'], str(item['value']))
for item in new_metadata
)
remove_props = [key for key in old_metadata if key not in metadata]
api.glance.image_update_properties(request,
id,
remove_props,
**metadata)
message = _('Metadata successfully updated.')
messages.success(request, message)
except Exception:
exceptions.handle(request,
_('Unable to update the image metadata.'))
return False
return True
| apache-2.0 |
RealTimeWeb/Blockpy-Server | controllers/service_libraries/weather.py | 1 | 50288 | from __future__ import print_function
_USE_CLASSES = False
_START_CONNECTED = True
import sys, zlib, base64
try:
import simplejson as json
except ImportError:
import json
HEADER = {'User-Agent': 'CORGIS Weather library for educational purposes'}
PYTHON_3 = sys.version_info >= (3, 0)
if PYTHON_3:
from urllib.error import HTTPError
import urllib.request as request
from urllib.parse import quote_plus
else:
from urllib2 import HTTPError
import urllib2
from urllib import quote_plus
################################################################################
# Auxilary
################################################################################
def _parse_int(value, default=0):
"""
Attempt to cast *value* into an integer, returning *default* if it fails.
"""
if value is None:
return default
try:
return int(value)
except ValueError:
return default
def _parse_float(value, default=0.0):
"""
Attempt to cast *value* into a float, returning *default* if it fails.
"""
if value is None:
return default
try:
return float(value)
except ValueError:
return default
def _parse_boolean(value, default=False):
"""
Attempt to cast *value* into a bool, returning *default* if it fails.
"""
if value is None:
return default
try:
return bool(value)
except ValueError:
return default
def _iteritems(_dict):
"""
Internal method to factor-out Py2-to-3 differences in dictionary item
iterator methods
:param dict _dict: the dictionary to parse
:returns: the iterable dictionary
"""
if PYTHON_3:
return _dict.items()
else:
return _dict.iteritems()
def _urlencode(query, params):
"""
Internal method to combine the url and params into a single url string.
:param str query: the base url to query
:param dict params: the parameters to send to the url
:returns: a *str* of the full url
"""
return query + '?' + '&'.join(key+'='+quote_plus(str(value))
for key, value in _iteritems(params))
def _get(url):
"""
Internal method to convert a URL into it's response (a *str*).
:param str url: the url to request a response from
:returns: the *str* response
"""
if PYTHON_3:
req = request.Request(url, headers=HEADER)
response = request.urlopen(req)
return response.read().decode('utf-8')
else:
req = urllib2.Request(url, headers=HEADER)
response = urllib2.urlopen(req)
return response.read()
def _recursively_convert_unicode_to_str(input):
"""
Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list,
"""
if isinstance(input, dict):
return {_recursively_convert_unicode_to_str(key): _recursively_convert_unicode_to_str(value) for key, value in input.items()}
elif isinstance(input, list):
return [_recursively_convert_unicode_to_str(element) for element in input]
elif PYTHON_3 and isinstance(input, str):
return str(input.encode('ascii', 'replace').decode('ascii'))
else:
return input
def _from_json(data):
"""
Convert the given string data into a JSON dict/list/primitive, ensuring that
`str` are used instead of bytes.
"""
return _recursively_convert_unicode_to_str(json.loads(data))
################################################################################
# Cache
################################################################################
_CACHE = {}
_CACHE_COUNTER = {}
_EDITABLE = False
_CONNECTED = True
_PATTERN = "repeat"
def _start_editing(pattern="repeat"):
"""
Start adding seen entries to the cache. So, every time that you make a request,
it will be saved to the cache. You must :ref:`_save_cache` to save the
newly edited cache to disk, though!
"""
global _EDITABLE, _PATTERN
_EDITABLE = True
_PATTERN = pattern
def _stop_editing():
"""
Stop adding seen entries to the cache.
"""
global _EDITABLE
_EDITABLE = False
def _add_to_cache(key, value):
"""
Internal method to add a new key-value to the local cache.
:param str key: The new url to add to the cache
:param str value: The HTTP response for this key.
:returns: void
"""
if key in _CACHE:
_CACHE[key].append(value)
else:
_CACHE[key] = [_PATTERN, value]
_CACHE_COUNTER[key] = 0
def _clear_key(key):
"""
Internal method to remove a key from the local cache.
:param str key: The url to remove from the cache
"""
if key in _CACHE:
del _CACHE[key]
def _save_cache(filename="cache.json"):
"""
Internal method to save the cache in memory to a file, so that it can be used later.
:param str filename: the location to store this at.
"""
with open(filename, 'w') as f:
json.dump({"data": _CACHE, "metadata": ""}, f)
def _lookup(key):
"""
Internal method that looks up a key in the local cache.
:param key: Get the value based on the key from the cache.
:type key: string
:returns: void
"""
if key not in _CACHE:
return ""
if _CACHE_COUNTER[key] >= len(_CACHE[key][1:]):
if _CACHE[key][0] == "empty":
return ""
elif _CACHE[key][0] == "repeat" and _CACHE[key][1:]:
return _CACHE[key][-1]
elif _CACHE[key][0] == "repeat":
return ""
else:
_CACHE_COUNTER[key] = 1
else:
_CACHE_COUNTER[key] += 1
if _CACHE[key]:
return _CACHE[key][_CACHE_COUNTER[key]]
else:
return ""
def connect():
"""
Connect to the online data source in order to get up-to-date information.
:returns: void
"""
global _CONNECTED
_CONNECTED = True
def _load_from_string(data):
'''Loads the cache from the string'''
global _CACHE
if PYTHON_3:
data = json.loads(data.decode("utf-8"))
else:
data = json.loads(data)
_CACHE = _recursively_convert_unicode_to_str(data)['data']
def disconnect(filename=None):
"""
Connect to the local cache, so no internet connection is required.
:returns: void
"""
global _CONNECTED, _CACHE
if filename is not None:
try:
with open(filename, 'r') as f:
_load_from_string(f.read())
except (OSError, IOError) as e:
raise WeatherException("The cache file '{}' was not found, and I cannot disconnect without one.".format(filename))
for key in _CACHE.keys():
_CACHE_COUNTER[key] = 0
_CONNECTED = False
################################################################################
# Domain Objects
################################################################################
class Location(object):
"""
A detailed description of a location
"""
def __init__(self, latitude, longitude, elevation, name):
"""
Creates a new Location
:param self: This object
:type self: Location
:param latitude: The latitude (up-down) of this location.
:type latitude: float
:param longitude: The longitude (left-right) of this location.
:type longitude: float
:param elevation: The height above sea-level (in feet).
:type elevation: int
:param name: The city and state that this location is in.
:type name: string
:returns: Location
"""
self.latitude = latitude
self.longitude = longitude
self.elevation = elevation
self.name = name
def __unicode__(self):
return "<Location: {}>".format(self.name)
def __repr__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def __str__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def _to_dict(self):
return {'latitude': self.latitude, 'longitude': self.longitude,
'elevation': self.elevation,
'name': self.name}
@staticmethod
def _from_json(json_data):
"""
Creates a Location from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Location
"""
return Location(_parse_float(json_data.get('latitude', 0.0)),
_parse_float(json_data.get('longitude', 0.0)),
_parse_int(json_data.get('elevation', 0)),
json_data.get('areaDescription', ''))
class Weather(object):
"""
A structured representation the current weather.
"""
def __init__(self, temp, dewpoint, humidity, wind_speed, wind_direction, description, image_url, visibility, windchill, pressure):
"""
Creates a new Weather
:param self: This object
:type self: Weather
:param temp: The current temperature (in Fahrenheit).
:type temp: int
:param dewpoint: The current dewpoint temperature (in Fahrenheit).
:type dewpoint: int
:param humidity: The current relative humidity (as a percentage).
:type humidity: int
:param wind_speed: The current wind speed (in miles-per-hour).
:type wind_speed: int
:param wind_direction: The current wind direction (in degrees).
:type wind_direction: int
:param description: A human-readable description of the current weather.
:type description: string
:param image_url: A url pointing to a picture that describes the weather.
:type image_url: string
:param visibility: How far you can see (in miles).
:type visibility: float
:param windchill: The perceived temperature (in Fahrenheit).
:type windchill: int
:param pressure: The barometric pressure (in inches).
:type pressure: float
:returns: Weather
"""
self.temp = temp
self.dewpoint = dewpoint
self.humidity = humidity
self.wind_speed = wind_speed
self.wind_direction = wind_direction
self.description = description
self.image_url = image_url
self.visibility = visibility
self.windchill = windchill
self.pressure = pressure
def __unicode__(self):
return "<Weather: {}F and {}>".format(self.temperature, self.description)
def __repr__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def __str__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def _to_dict(self):
return {'temperature': self.temp,
'dewpoint': self.dewpoint,
'humidity': self.humidity,
'wind_speed': self.wind_speed,
'wind_direction': self.wind_direction,
'description': self.description,
'image_url': self.image_url,
'visibility': self.visibility,
'windchill': self.windchill,
'pressure': self.pressure}
@staticmethod
def _from_json(json_data):
"""
Creates a Weather from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Weather
"""
return Weather(_parse_int(json_data.get('Temp', 0)),
_parse_int(json_data.get('Dewp', 0)),
_parse_int(json_data.get('Relh', 0)),
_parse_int(json_data.get('Winds', 0)),
_parse_int(json_data.get('Windd', 0)),
json_data.get('Weather', ''),
json_data.get('Weatherimage', ''),
_parse_float(json_data.get('Visibility', 0.0)),
_parse_int(json_data.get('WindChill', 0)),
_parse_float(json_data.get('SLP', 0.0)))
class Forecast(object):
"""
A prediction for future weather.
"""
def __init__(self, period_name, period_time, temperature_label, temperature, probability_of_precipitation, description, image_url, long_description):
"""
Creates a new Forecast
:param self: This object
:type self: Forecast
:param period_name: A human-readable name for this time period (e.g. Tonight or Saturday).
:type period_name: string
:param period_time: A string representing the time that this period starts. Encoded as YYYY-MM-DDTHH:MM:SS, where the T is not a number, but a always present character (e.g. 2013-07-30T18:00:00).
:type period_time: string
:param temperature_label: Either 'High' or 'Low', depending on whether or not the predicted temperature is a daily high or a daily low.
:type temperature_label: string
:param temperature: The predicted temperature for this period (in Fahrenheit).
:type temperature: int
:param probability_of_precipitation: The probability of precipitation for this period (as a percentage).
:type probability_of_precipitation: int
:param description: A human-readable description of the predicted weather for this period.
:type description: string
:param image_url: A url pointing to a picture that describes the predicted weather for this period.
:type image_url: string
:param long_description: A more-detailed, human-readable description of the predicted weather for this period.
:type long_description: string
:returns: Forecast
"""
self.period_name = period_name
self.period_time = period_time
self.temperature_label = temperature_label
self.temperature = temperature
if probability_of_precipitation is None:
self.probability_of_precipitation = 0
else:
self.probability_of_precipitation = probability_of_precipitation
self.description = description
self.image_url = image_url
self.long_description = long_description
def __unicode__(self):
return "<Forecast: {}>".format(self.period_name)
def __repr__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def __str__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def _to_dict(self):
return {'period_name': self.period_name,
'period_time': self.period_time,
'temperature_label': self.temperature_label,
'temperature': self.temperature,
'probability_of_precipitation': self.probability_of_precipitation,
'description': self.description,
'image_url': self.image_url,
'long_description': self.long_description}
@staticmethod
def _from_json(json_data):
"""
Creates a Forecast from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Forecast
"""
return map(Forecast, json_data['time']['startPeriodName'],
json_data['time']['startValidTime'],
json_data['time']['tempLabel'],
map(_parse_int, json_data['data']['temperature']),
map(_parse_int, json_data['data']['pop']),
json_data['data']['weather'],
json_data['data']['iconLink'],
json_data['data']['text'])
class Report(object):
"""
A container for the weather, forecasts, and location information.
"""
def __init__(self, weather, forecasts, location):
"""
Creates a new Report
:param self: This object
:type self: Report
:param weather: The current weather for this location.
:type weather: Weather
:param forecasts: The forecast for the next 7 days and 7 nights.
:type forecasts: listof Forecast
:param location: More detailed information on this location.
:type location: Location
:returns: Report
"""
self.weather = weather
self.forecasts = forecasts
self.location = location
def __unicode__(self):
return "<Report: {}>".format(self.location)
def __repr__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def __str__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def _to_dict(self):
return {'weather': self.weather._to_dict(),
'forecasts': map(Forecast._to_dict, self.forecasts),
'location': self.location._to_dict()}
@staticmethod
def _from_json(json_data):
"""
Creates a Report from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Report
"""
return Report(Weather._from_json(json_data['currentobservation']),
Forecast._from_json(json_data),
Location._from_json(json_data['location']))
################################################################################
# Exceptions
################################################################################
class GeocodeException(Exception):
pass
class WeatherException(Exception):
pass
GEOCODE_ERRORS = {"REQUEST_DENIED": "The given address was denied.",
"ZERO_RESULTS": "The given address could not be found.",
"OVER_QUERY_LIMIT": "The service has been used too many times today.",
"INVALID_REQUEST": "The given address was invalid.",
"UNKNOWN_ERROR": "A temporary error occurred; please try again.",
"UNAVAILABLE": "The given address is not available offline."}
################################################################################
# Service call
################################################################################
def _get_report_request(latitude,longitude):
"""
Used to build the request string used by :func:`get_report`.
:param latitude: The latitude (up-down) of the location to get information about.
:type latitude: float
:param longitude: The longitude (left-right) of the location to get information about.
:type longitude: float
:returns: str
"""
arguments = dict([("lat", latitude), ("FcstType", "json"), ("lon", longitude)])
return _urlencode("http://forecast.weather.gov/MapClick.php", arguments)
def _get_report_string(latitude,longitude):
"""
Like :func:`get_report` except returns the raw data instead.
:param latitude: The latitude (up-down) of the location to get information about.
:type latitude: float
:param longitude: The longitude (left-right) of the location to get information about.
:type longitude: float
:returns: str
"""
key = _get_report_request(latitude, longitude)
result = _get(key) if _CONNECTED else _lookup(key)
if _CONNECTED and _EDITABLE:
_add_to_cache(key, result)
return result
def get_report_by_latlng(latitude,longitude):
"""
Gets a report on the current weather, forecast, and more detailed information about the location.
:param latitude: The latitude (up-down) of the location to get information about.
:type latitude: float
:param longitude: The longitude (left-right) of the location to get information about.
:type longitude: float
:returns: Report
"""
result = _get_report_string(latitude,longitude)
if result:
try:
json_result = _from_json(result)
except ValueError:
raise WeatherException("This city was outside of the continental United States.")
if _USE_CLASSES:
return Report._from_json(json_result)
else:
return Report._from_json(json_result)._to_dict()
else:
if _CONNECTED:
raise WeatherException("No response from the server.")
else:
raise WeatherException("No data was in the cache for this location.")
def _geocode_request(address):
"""
Used to build the request string used by :func:`geocode`.
:param str address: A location (e.g., "Newark, DE") somewhere in the United States
:returns: str
"""
address = address.lower()
arguments = dict([("address", address), ("sensor", "true")])
return _urlencode("http://maps.googleapis.com/maps/api/geocode/json", arguments)
def _geocode(address):
"""
Like :func:`geocode` except returns the raw data instead.
:param str address: A location (e.g., "Newark, DE") somewhere in the United States
:returns: str
"""
key = _geocode_request(address)
result = _get(key) if _CONNECTED else _lookup(key)
if _CONNECTED and _EDITABLE:
_add_to_cache(key, result)
return result
def get_report(address):
"""
Gets a report on the current weather, forecast, and more detailed information about the location.
:param str address: A location (e.g., "Newark, DE") somewhere in the United States
:returns: report
"""
response = _geocode(address)
if response == "":
if _CONNECTED:
raise GeocodeException("Nothing was returned from the server.")
else:
raise GeocodeException("The given city was not in the cache.")
try:
geocode_data = _from_json(response)
except ValueError:
raise GeocodeException("The response from the Server was invalid. Perhaps the internet is down?")
status = geocode_data.get('status', 'INVALID_RETURN')
if status == 'OK':
try:
results = geocode_data['results']
if results:
location = results[0]['geometry']['location']
latitude = location['lat']
longitude = location['lng']
else:
raise GeocodeException("The address could not be found; check that it's valid on Google Maps.")
except KeyError:
raise GeocodeException("The response from the Geocode server was invalid. Perhaps this wasn't a valid address?")
return get_report_by_latlng(latitude, longitude)
else:
raise GeocodeException(GEOCODE_ERRORS.get(status, "Unknown error occurred: "+status))
def get_temperature(address):
"""
Gets the current temperature
:param str address: A location (e.g., "Newark, DE") somewhere in the
United States
:return: an int temperature
"""
report = get_report(address)
if _USE_CLASSES:
return report.weather.temp
else:
return report['weather']['temperature']
def get_forecasts(address):
"""
Gets the high temperatures for the time period
:param str address: A location (e.g., "Newark, DE") somewhere in the
United States
:return list: a list of ints
"""
report = get_report(address)
if _USE_CLASSES:
templist = [f.temperature for f in report.forecasts]
else:
templist = [f['temperature'] for f in report['forecasts']]
highslist = templist[::2]
return highslist
_load_from_string(zlib.decompress(base64.b64decode(
'''eJztfQ13oziW9l/RZs87s3vGoZAECNJdPSeVpD6mk1ROJd3V3ZM+fYhNYqYweAFXOj3'''
'''T//29EtgGG4NIDLYTeneqyiCJR5/30b1XV//eG9ixvXfw771hHI8Prl9dvxrZ40i5C4'''
'''I7z7HHbqT0g1Hy8PoV/L5+decE/WDgXL/6VxT4f7cHg9CJotcj1x65PXTr7R38cy90x'''
'''o4d7/X2/n3tI4Su4UE08eLoeg8doH+KZ/Dfv6f/EEnSgn6Dz40D3/EXUy/nmeb0Av/u'''
'''N98eOSLD9d4Zh3K91ytIGg2DMJZMGz+MnRSD+Ejf9tz4AdLCr3EA/3bhCbz/NZ/3z94'''
'''jEe8f2wMHHQUTX3xFGn11vnxN7MHI9d0oDu3Y/er8ZoeO/ZvnfHW830gjlXvrBaE7sC'''
'''Wr9Pb0aXXAjdThB9+NnQG6jO2Yo5CqyQ+XMjXp834LJYZV5uevvdzMuQ3CkR0Dvt/SO'''
'''ZQdHT309rSHfrg8zIO53oNpPHLElyHxQhtc790ArEFU9E689qGuQ8eO4hUpkka0k9dE'''
'''V0xdN1TdWm6OJKF/JxLum6qCNaKpKrGS/5bSL3Ze0vTBJB7eO9JomGqpGpEAQ7HFdMt'''
'''cCaZ8YCWLRuwG/sp2zIIyTcswC5ooD4kQQ6MWlf3yb3y0pePh8OLi08efPpwdXp0sjc'''
'''zrva+ucz+Gbu36vFafZ37+mZ9gteVHUlY6uaGGsNhMpnP54/fXe9c+pNj7tfd0Ue37k'''
'''B4gRD008rdBYM8ASQu+ihwtCu/3DmAZu3490S2Va7OCWzRyFMSyovvsvBPdaxPdmSl6'''
'''dr5hAa7piqpjoletnhZVsKXpIJ8aWMM1TTEtFWuaBApKLGxM5UmTchtAWSaltABUHhE'''
'''x4D+rGNH2Se/n3uEbFdow550+dIRyDyJ36IQgwL9evzqzx0ee2/+ijIfjv0MTvNaYYq'''
'''gGUS39L2/7UXwFwF5zMf4XWOBe72NCFEqJylRzUX5fQ/MFYycUI8j2zjgB2DsAJHzU8'''
'''JdRGJ2LtQ4efn53ibBlarOXfViPecZjWC5FCgJdsK/C/1tXWD/A+EDH+yo7UNXCLKeB'''
'''aB/IZ6HDcQhlH+gUjUfo4vhqlmEcBoNJn2c5AtrghCL9JRQSe04PfT7MljxwY/F61nz'''
'''39/fKfThU/MC2k6aLnPtZjhE07gc/WVbFBM5lzTT4NMN8qh8kbReLEejGk7TZeD/gaf'''
'''I4kS/zt0lHkMx7B4TZ/NsEpsb83f1tkNT15HPmaeyOnD+AQYlXF//5+T9m5iWXj8dO1'''
'''A/d8azQ4+Dej+F/KG2zTJNBjtAe2EmTfrHj3zMvZt/4fPiLrmY/0k85iHh3pFKaeXfr'''
'''hk42p6FrmbcggdKPff/m7Qcx3NOG5bXKN+oDzP7vneQzX/bHmAz3fRhb2caF6RPGF07'''
'''oBoN0iP4zfQUvrwLfvRvG8+T82XASRgP7ofAhOl9K/5arIh4KHhWkvYSpHC6mnj4sSg'''
'''8SdzG1eFSQ9ixYSps8Kkh7NXGWq5g8K0j92Rn4aXrxQLz4Nd/CP8KqNrhyFxs4O9VNm'''
'''OHw//mpnkuF1SvVkEklUxaWKgtLlUWkyiJSZVGpsqhUWZpUWZpUWbpUWbpUWcZSWcvD'''
'''JnZG41P7xvHEiNk7DcSSu/cehp/4RwsPfuVYpqsL11pnVxeOj4u8SZiM6T1NrMl7hqj'''
'''znqYnv3Dyy8j9Ysmv5KFmJb9INqWevkse6rB0QsvMJhdnBmP+VX/ieb2n/iHafCovEo'''
'''GVn6UXMIG9B3TkBZPB4goS8Tew6PiFL1Znecybks84XCbkl0HZpGntmq7D0dD2+w66H'''
'''Ab3ThhxuZW8mY94tx/4p67/Jd/4FSzOHdl3wB9f3ce3169GQF4mo+tXftSPlTFnq72n'''
'''FbSucvybL/5aClpXOWsDtLYWunXu11LQuspZG6Ct67KtAxQNQzspaHlRGNp/2OEgXRJ'''
'''WvP0h9FYliJ3f48LFvC9WqB66d+MhspEX3CM75BocpBEFnU/37vAeHqVvTDQaDxVUtO'''
'''BFfPWclTYE8Yl8WGiRoa4qzOCFoRunH4xc/w4JbQHfqCPXR5Ac2bewT/ODwC/+YAl+P'''
'''f1k2ecEnJmGIvkYGrkDwfelvpipI1bQSb488WUdxQFi+e8KfcS0irB19OFh7QoaS21q'''
'''8k9h9RH9g5Xij8PLwm8zZVnOFpVr1CzXUoqE8krYZEXxJW1Wq091qfT5WZPLcIj6icA'''
'''PblGUyHwFocrPmkq6iYp/Tcr6c75lnoShA7vwm8gJvy5qEDh9GGR2xRkk/lTzMtN3TD'''
'''fxbwKHj8m3ruMNclm4RkFkwUbu+aKaQtd0k+ZTFOgqMM2qMyDRTNOjJiobNtfY5AfvF'''
'''XBskVDP4zh27pPnNP/8k+MNU2z5HaorNNO8oKXnSavh/E7l3SRKlEDnh/kMM3pcRvXS'''
'''VGKtF0kLJcWPbuTeuImyTyBQFrZLhx7XaEy1VViFtjTzk+/0ImkFVSH5Fs4reOaKMPG'''
'''SawynypWl5jgaul6iTmN4Ko/+FPsg/ken7+v0fZ2+r9P3dfq+Tt/X6fs6fV9DurJO39'''
'''fp+7ZVV9Pp+3aty7YOUKfv6/R9dSrY6fs6fV+n7+v0fRvQ9z3d9//Gs/tfoptJeNdDX'''
'''+3Nu/6/meERbbHsMbroml2Voc1Te4Ef3wUjJ3yoeWpPLt9mnf9/dMM7+KCs7/+Ph0+r'''
'''ROf7P/P9f5OZoj8ebtj1nzKFMMMiDC+3CVo6OsVMnWCzAV9wgIFNajFDAoVmqtRkTXr'''
'''980Yhls4oLapYDgum1g6c1nvGvbwLrv6WoupM0yyjyNOfWYqmaTDetJy0/va/jj8eXf'''
'''18cYKG8chDFz+8Of1wBED2+Xc/0yP+1/HVMfrp/dXZKcKKCqtg6PZj/vzkHFAvmtioE'''
'''oBMfXX16frV77xILPJPf+xHIrcyiIGxfXftfyu++vvI86PXK0rC/CRGkh0+B0kPPNu/'''
'''g9QOzLDvvoXBOeAFcYsS4gXsO/83cb/C+yOQj8Ap9q+SydBPfr5Ot9OveIHfcH4fRk7'''
'''8+sPlx33T1C1uVkLXr3iJwJI957u3adujkzAMwm+vXyWPk+/x9Z4XyB/lPpHPlRSZyz'''
'''LI2uYyGa9gR3memmBRSkGB6odfXdiFuBF6CCYhbEShZJgSITyDsYE4f/KAY6J0fKDpg'''
'''EE2bGqmD0MHBgmIMXdu50RBsoe9d27+qwDkF+fhPgiFDJgjTIvrITHUlz45fT7/vZgi'''
'''WsjaQ8L02EPcZDh/2Bc23lnuKG2DCchiKOEfTgwD0bFHBbA5LRBTfA76neM7oZiGi4m'''
'''Pj5Tx5MZzI7EjyGRZ1QvFZfB8oXsziYOnlBJyxUW+ufNzIjv/B27U92zYI4R8BSgu0I'''
'''Zl8/GIwuAmWIADmwvn995t4HncyDLPkZt5P+3/cLh/BIMSPnWzMDM+nLw+GU34SPxww'''
'''tISPNf/wofna74aPngO9IXDl3K+tM7ma18wm2Ho3MIjRZlq32zX/03VsKoyJUnB9XE2'''
'''L6kfOnyJeOoXxqHrx8WfEK/SL4hiC8r77r9Hjj8B/jhChwcCxr/7gReEB+h+CE+/QTz'''
'''t/sDpB4nnxQHyYSv0zZ8Lif9bhf/evl2ZPPsVviflE32W9634j5c5fZUtl1LLkip3GH'''
'''yF4ZLkRLxY9cQwj5aywisgjk4I+EU9lrK95XWpyKaINfW3aGR73m83gQe1uYUxtB+5f'''
'''zgHWCHOCNZuUYVp7ZB4f+/wKXTAc0AhoomRPa3tihZPajpNnMN7sKqSB1ms93bop/iS'''
'''70NdBAI0rfQnZ1BV4QeHz6mk08uKWmz+P5Ub9y7NcmuPXO8BXh6Gru310HvH++pwAtJ'''
'''Dlzb3+wjd27SheENCQkzG8bxsMcpMNdeWUwDwHW/i4Md/SZX/UtL5MM1wvttNmW5HN7'''
'''DxuRMKvf0kLWwU/Whsc8XbN2gM2yQQD/txMD5QNCgw8zmS/RxR1EY+F2fHMtTZ4u0i3'''
'''6h/AgcRi026qqXr1dwqcWt/5UZIBf6AtSld9Pj2tD+JEX+TrlhAgTh3+vYmGHBlph1x'''
'''CubbX02uwfru24H7dfaUJ3TCghdCYAHhvfNhvUiXw+++taeg/jtd94UMSbNEX9wxfAW'''
'''e2P0+bFeBZMBTwvNdv7K/+4t/E42/mRexUv7B4s27jQv3gCvOOY85/3yJ3sOedix0dt'''
'''9lkmfKXvXnt1ykfoWVfAxtPUW7uAxBqTMB+rHv2L7bFyTrMB4F0ZjrC/voMKOeCPy/R'''
'''rzDoExo6VA0fPpXwXf4EMx+YUFEzwpKkfKecAe81wLb9oK7gDeiO7pDUdjPPFX+NeZs'''
'''6N4dxEN4zFQu5cQInv6yPf7P2bhYKPw+Kijbud+HF08tmiMUxvLvVvT4zF0v9/VZEsE'''
'''c87QodAfQ+a/uQns8dPswH6YfUe7c2xmgzJj5eHiYGTSzupi5uohfSRXs4iaCL6QTZE'''
'''VNoLWaqk1+3D+qCrA+zed+bANtAwbueTDe+q7YbvHc/Em6oKVPbmB/4ITT1+lgTkRZB'''
'''ghwp/8HP6PJaGSHD2I8wEdCJFYO+NlP/Tnh4WD20HNu40yhCUCDlTRxruUyWSHfjS2K'''
'''z825KczveNNl5pZonXggwOS/Lt/Br8Z2LQjnzn30dAi5BoDdMyxPf4i1JN0hSKOB8fT'''
'''x9haWnFqgPO7pxfd5fJl37LA/hKeX4h+QU7ydru+uPwZplNLloTsYCKqe7jts+LTnCv'''
'''sG+moD60imWGb6iHFcWsbXg3EY/Mvpx5kyYIMZxYv5Z3JJ4BTP5qX838QR6tEsr5+Xd'''
'''yJ2qEkFQVhzHzLEhTqXZ3w+jOzfPce/E1OAcOtXMtWmTQO779ugP+Efd2//Jx66kZKU'''
'''/Pqvyd43SQgzOHT++r+Z93/96zdJFXKNWVyRFHk0uRm5WezvgrQVuDIAdjSwT+TY7sS'''
'''2yBZe1tnRNW26pPCpA3VaD0FHIDVM7jvX34dNYxyMDoDcpQ8491G/SSQ8/9x0HMGfof'''
'''iTLzfLSxLf2y2uSbn1pmD5CPno5CI2+V9ueANPhjSHaBwAT5nrSNwIeLj91XY98QWuU'''
'''OFyIYTdrBPxzdJUrZqR48n/krrkhuHNBOruZ9r5DbBDLmouQuerG0widJGs0YEvlHdi'''
'''5EZxED4onEf+z/9mW2nGF+T+V9ios9a8DYJ4yuG4dqtgiSdLSzxZXOLnokXNLeiJmzY'''
'''SpR6g5Fvph0Lx5+C7GcuKB8kTTq15g0J2KlR5YSqekgSiIqIA3phJT8fcYzLNnalVDH'''
'''AGePW6+MPl4VQoPFrwTmdAhlElwvctf/Eu+Mrz8cHEB88PyqWC7vhG0ocNdPzXCAV8O'''
'''XWF1usGcZW70EbNBCTJiWruSZ8V1bMWW6wzEa2xii/2BLnhY+Pax5TowsFl/zPXHnLf'''
'''2Hv7IXl36Xp8y3vJCfxdD50dI6JaWE36gH+a/1/R56mkKFpUVX13PHuQVDEZ6fARicK'''
'''S0x1RWtJR8qt+MXdeEEUweMWgeJf+qGxwbUWNsyVDM361+w8pwIvkF7oIYL4/1McpjJ'''
'''1pWYc3fJL98Ija9mE7yr1X0kYTv9DHMR+IXKPqOlG+6tPJN11H4J//tb+PMnzRvkvWk'''
'''vPDqw8fzw9P0eeTw6v3J5/Q5cmnHz8cnRyg/Orr8n1iHzLwBfbSvnXih55YbD8H4Zfk'''
'''X28nszUW7aO3Hz+h0w9vT1K5gPb3E0BCSCR/pag4UwSJ/XWqZkyTzrDy92LLyZ/ylTK'''
'''7hU2Eh1hgFjnMH+64L2w5ywzptFD5jW4e0F/+bxLE3xy5vHqXcfILQeWgMMQ9KWZMaL'''
'''q85wXwOIhyEpjrIVMcaefl0OeE/YIsStmKaIJ5VVJmI1Jya4zQlKesBZOM5JpWoZSoT'''
'''BMV85M8nhkHSSG8C0gRIZn18FQITqVbpuPhL662EEqMeOR911myOktWZ8nqLFmdJauz'''
'''ZHWWrM6S1VmyOktWZ8nqLFmdJet1Z8nqLFmdJauzZHWWrM6S1VmyOktWZ8nqLFmdJau'''
'''zZHWWrO20ZK3jYjZ+YWN3hWp3hWp3hep2nMXurlBdgaa7QrXxQ9kvps83ekT7cZI6mg'''
'''bzud+CGCppQCHJNbU0dYsC+3se/KiWqK7MsVkh/dmOgAXfCd2LVH0+dzFT1iWnM8HEN'''
'''31XKlMY1bBWeXcmD85FqM6Y0cBqDSg0S9d1XBnQg8PQqG6ppt6gcJ5fZlkqnDO3Wm67'''
'''dH7O/dy6QO4EZw3UneDsBGcnOHdqQe0E58vo5x3cyWYCgnbxQDNd2cUD7eKBboeI7uK'''
'''BFsLo4oFmc3TxQHchHujjRLQPk7tnb4FV+Mj2PAedByPZLXNVhnyThzA2nPUs9Pyswq'''
'''EPO0o3kN3dV+XIY40mN41yiQsbfsgK27LELfKfhjC3zXRar0YzXKdeNaToWoMkZ75S9'''
'''FAR8jY5jgjHbDKsVYkdEZqZWpqFGxB+HAWIYLVS+CUBog2TqM0b2WeRqkuJzjxk9WOI'''
'''zruTj2cnV58+HP12dHJ+dfKpcbbDK2XyWpkqsXClXkLUjliGYWCLqabWhLVddL6qMTX'''
'''9hNwg0A1D05NKFIxIaSI0tsPYtb3fYKIK3/oDFIcTp4wsLUnueYlNcRG+VPBAEOIQtd'''
'''Qal+ZomYkcDuxRVE8fUJ1lswLyiB8xtQeyzOro4zaKx51UBWQGfQ8dfdy0LsBSLE3Vs'''
'''Faw7UV5HaqqKaZmGPqiREBr2SZaignLsV4prwUMnWpFm9X1KQOgVTAxGOzzk//KFfcC'''
'''kmrhxykFWpeVz7nPn4FE/OrcObJW1Ir0jUnEGvoDWc1B2xLwyvGckR1+kaxGefLNysH'''
'''zILy3ZZnJuZQolxaAj4BLTaJLgl2dNA+XH3eBuS0Ub2vBeMpjqkiCLElbiDIO7v21UQ'''
'''kx+3uINxQSQHqoaDi0ySl0S6Eq7HcMbaX0RNn13VL4tfawXW9AxgAWYhqMwc5a/FelD'''
'''LcUrDILZJLV/EacY7NUQzcKQOUQ6dQ06QpEW8oxXtYYeAasAx1+dfxJHaHOc7RMO8SZ'''
'''48MvobyXWUWGPFKfH5y+CcJhEAwakXx1oEujbtQecDkZjdy4nu5DIs9mqd/HobRJ6eP'''
'''7bSR8W6D4eARqTaNElURbkrYO83scp0rXwl6ycvTQx/dI4Nm0dyVWVEtn2Ki2pWNFZy'''
'''pmdCndGrzuAAVTDdWsVLILFFilTXIojsakVGcVbhuiQYBe7AZ7qtfThkX0mZVhC3rc0'''
'''lMzTBGajjlthDkJUeGjD5Fn+wNZOV6dp12Hik9ufzgK/EE9OiKVa7OE5Ny5Rz8H0rqo'''
'''8587UrIu1CCfsCaJtiRti6QkNy176PxnJHBtmpyoim6psMmuNnNrCtYx0y3ShKwCGCa'''
'''xdAkXAIBh6KZptqDpEagMZpFSTU/SMhbwNmOXVD0voOefA0s5csO+9DnJaY6WWcqxgy'''
'''6DWHZvXpq6RR2J8PWo4xfyNOlNG6nEse15dk0nF4k8m+VVH4J7We/JD91hl7Wh1lXVs'''
'''CTRlqRtgVMli2IPpUtJD304RALRptkUbKSJqdFqBYClKUAYNN3CzcUN4miE2GZyBhQB'''
'''ilBDK/AaWR+n4k2EGS2nVAIJIab1qDO1m1D8vIx+fw6Mahf0Pud89KELMUAlcZZnaNP'''
'''2ZId2FMR2TeuTTK4uFORT67CbzIRqxDRlfY9Wp21R2zOfjzyEJRKgNh3IkimqplpGdR'''
'''hBwo0zGj9f01xMQwBDNYatSi9SgYafaTFTadYgORGoGLCOikCWAhIIzOKgj1tKUur1P'''
'''9EtLTllUxB7o+3u14lFutM104bejtM1/NTuqWP7g7DOOd/SHG0qfzz496AmR5HJtOHz'''
'''QdBiIJ/kg4UcdfqTtaG2NJ0xSbQlaVtgKdNzTJkZ2UNHh0ig2vTRJqYwlRG1+vQtDw+'''
'''FTaIRowmGInBQi6iV8jLBoZkGbZKbCDy6ig2zlJskYCg1MdsJVlK7u4llqI3xktqdbl'''
'''k0Pbv8InjJceiu0oUU05Ljtm1RH0NuX5f2Ey1L3SId+Rja/l3N6zMk8nT6kqfWYTeZC'''
'''CUmxrL6ktVpW2AiYkHpoXQeJroSDmjTuhJT0TWQAmblWVsT8+gPrGhf+vRdMqCgDHgF'''
'''k0JhWDMPlEZv++BtwzUxBXv3JUimuejNvKU8pG6Pq1hv0He3Zs8zhslLoiG7YMJ5Y3u'''
'''xOwpCWZgV6dsMpmqHDzX8jc+OO8G9LtQEEyx7yLokbYuGjtmw7YmbQzmmTSsQLIVqpi'''
'''lh92aGYliYGkxvztAh0Bi6ZlTKFYFG0yxa4Ga5Pi0Ch8MM0yoQLItYYKetFd8ktaUyX'''
'''FTOUgnBkuHEWut/SjSSKC3WOAo6SZ5L3pgkv7C9EbpywtDuO+idHQ4cX1aiyGdt+Ugz'''
'''t0wjfsTGkVU3VOZoNdxp1A/qaUuqs3TKkqfWYTc5FyzPhrxzycq0LXKuzFRMvUsA1eY'''
'''1JpRgquoF5gmU37ESxTCZYbGldGvZN1PMiEmq980chYktvR2NCcWWCsSkyq0EGgZ4y8'''
'''5oTOr0uKFbTG9UY1Kn54Hc6lrnUZI29HZ4lPzoep5956ATMQClgFZnaTtUzCQMQlkpX'''
'''pa4TV+Y0B7bw6Cm+UkqVxct9zmQqkfSlamXSTLMtyNqLo/VbmqSoUuJhpsIxcJhMOAe'''
'''rPoethSGpjd5JEc0i4qLbpyRaJItZSd1+xqrwAaa8yup2eWUMRO/IIPODrCTN47nuf7'''
'''d0B7JGnQqMrQo4T8PYQQEo3oCXiZTd3vt85Dwj0BtmUQ6HnBJ2ha9XecTkt+5iwSoTR'''
'''8YNhV+2ZtWuWnl7oa62sx9rByDppLpYZaqmK8pFlM31AJ2tDZWImAZFisSmcsNY6kGl'''
'''v30Zo8K1+txg1GMjaZoySO73tKAmNCOnWwROzmzw2hoe56sz0Zp8jYPCzt22K95aa9E'''
'''ns3yksPwi+1HtnRwmeVVp1YlOlYyR8CIocsGqy1J2yIrmU7FHjr8hASkTWtJdMVSLYt'''
'''VCgOLKAZVudK9iR2zQGESrTp6BYehy0QtfYKKJEHDKg4GCyiEGDsSr5bXCmOiqzqV8p'''
'''dpvsNNXSe6IeUoI9/tz4GCfApsWX/MJL1U5Nd1Gm9sD1rTdiNZbXtVhjbdSQCafSfrh'''
'''VSaerPR4Dg0J6zpGFOdZ8PR4PyBa/vSAeHOOzK1tsj/BjVlVTwlaVsgU3x57KH5ktJD'''
'''H86RgLRpBQ9WdEowkQjCzhSsMZBo6so4J0/f7/M4ZZYOe34pNKZadLHP2kiVaBuobkW'''
'''wlQQJMXbjPHPS4fxWJDlOJapnUEPXGzM+pb2uUk2OWQlIlmrQ1EHmZXgin9p+LT/k07'''
'''Yvb3wPi/vAfgCgX6RliESeFnnWm9D+IwjdmlFXpHJt+lLK36W1PVc/dQRlbdoepksfl'''
'''ypJ2wJB4atLD+WmYw9d/YQEqk277vJb/ExWGFkE5ffcOg/equqENRgZjqNhmLHKEBwC'''
'''DcUGKz6khNbqwitQMWZVxK3lkAg1cHHovC3lLKJy3CPWkjs3JWqpMqhlY5YpAcnggOS'''
'''OTSVjQdMt+oLsUjtxeApWfxu9C0Lpsy2VOVpkLEdDO/QCWNTrURa5bN2Bo6fWYTdZC+'''
'''Uho2QPHK1O2+aBo/mUTA4ccVSbZi2GYsG+mWnVl+0RfhGepjdz7kTAoHqlmUqgoCor4'''
'''U7rYytJ2xSBWkSEqa7LfnezJIXXSTMNzORIStrrGLI0d+iIYyLYoKYcS0nGAMbEpC/o'''
'''osXtDxr31nbDG9v/IitDKtK3SFBmSNKI4yAKVzTe6kpUZt2wN41nR1+kj1R93/GUtXn'''
'''4wpomG0WuJG17UeRmI7qHDr9HAtKGSYqhKaZmWkZ1nHOsMQV21KzI4vJkOSVgGCB3DF'''
'''k/T4FGY0St3kk/nqYIWMzUNLYS1hImYlrEkEWwUcLyEvq+oyfy1SloJkmYn4NgcDMJp'''
'''Q/ulCZvM6rtQ8RdS97Y0iFtKzJs1rvm3I4ie1JPEySRp7vD+oXyK4yZJXtfQEna9vjV'''
'''dGFJbq/miDbtXaMqJjbMoki0KCfTGFU03WzGp4Zj4AdiZM/QcCwG0TBu8rYAgUq3zPL'''
'''LAkSzcOTFOqktJVZprwOzkgzz13TnQwNKmqlq9P1zoFY7cHDqH8F4LKtcKEnbcpiZ5A'''
'''pnSdhliVtkg8/yqurO82d1TRr1/NEJNmQ9f1anbfGcVzKUE6cfDmjTp7yIwjDDRXGAU'''
'''U5kWYbCNBC3szPCDYhRAUZjVvX1yRwMPyPUIHUSYAyVVMTDEVAotXYkHI7obqpNg95t'''
'''T68bWhoWUA6TVOc/B+4EMq/G7c88Q9vU6TJW0D/gJ8hnV/7wdlyZp0VOcurU9OypytD'''
'''59Dy1DrvJR0AQ6LJ8pCRtC3xErCs9lJ+8U68efdO0hBiKDptpmSCuGFNmNWEx4RhMA2'''
'''SlFAhNx2y1kFwbKxGgTK3o5NYSInU3jkrV6mtiqlaTFy7V6nNdV42XdN/S9pvI3oTOy'''
'''AnlI8FVpG+RgnzvxpE9rsdCJPJ0kfleLBexTIpl/YtL0rZnV5pNxjQuH0DatGGJKYaq'''
'''6tSQupXYIAQX7Uqfbl5gICGZVaSJKEJBLa3R89ocjUVo0cms5RbRGdsNx+Kkr7nCR04'''
'''1ktQPGyZr8Mh20vGqJulYnGDSqDnV73S0ZCtoyckwdPwbJ7yTRFmRvk3NiI0u7D9qKk'''
'''eq82w6MJ/7RyAdSubwl46TrAu1qVOqSaItSdseJ5nNxB46/AUJSJu22FBY4lXd0Kv9S'''
'''bGm6CKafRPxgjkObFq0+t6fBAfF2GrUXsPxWKZZdFfycqMQunjhz5bSEtHdWLUsXZKW'''
'''iPphqlp6Y6eyk67XVGxI0hKBSVNVpnfHstFqXtL+sexL1/vqhCIQhazhpipHmzdU90+'''
'''dYFCPnEjk2Sw5OXN934mCWJaenHWR7taGWtepKXvUqSRti0eyM9Oxh87OkUC1aa2Jpl'''
'''gqyKDqyC2aQlRsGkaD3gUCDEjD6jDGCZhpEJlGrTgClKZqBZeBLyNiu0FUkl6HSiWqB'''
'''02uwVUekq45/QnHRJhqyLqWcEiUEPaS1Cc74J2bnEw+df1+4Pl1Actmbdl393MQ9h1+'''
'''zkn6OFdp+javkZoiqXmRlFS2DZMvfk6rP5xEThzL0pazzma1NtSwpVRlI/mVpG3Rn3c'''
'''2qoF+HSKBadP0iyiwyzZMWsAuUE7cMawwplJr8Z4CtBbZy2GohJqVjh0CBiHc+LIq5P'''
'''H6mJdAZVm03HiVtIxl7UhQnKTLTQAseRQK89unLH6TWGPUSzQ0xiQ9ElVpu0xGgaGZj'''
'''Hb3caMtIl9H9tiNAw+954xIWipK5WqRtGATNsuOHaMLD2aV7cu2uWS+DV/4EAK/ddA7'''
'''JwjvnL/WPFxVJ/Om2Vn44Nm+7A0oZ8cdMVsXaqIyjUqiLUnbIjFbWICAnh0jgWzT9ju'''
'''TX3BEJDRSzFAsbGCNFUTef7oNh8MADiBxaJ7DYIxpBVbEtfEyAYfphqbKxABKWsbSyW'''
'''6oxpIut7TpbZlSLa4bGFvN3UQuup/o01tFpSCZVLXMTjOGtoic8aM+Dohv2bM+pcnbD'''
'''FiYAqlHVaRybZajXPJZCMInhGKlXYwujzqmsjamAttaWRteSdoWmcp0VMNG4wgJSJum'''
'''KJqCLVMt8qRBealgwZ5dt6jZ5PFgDoZpZqULtgADNEWrvqjoCTxFoGG6XqE/WoFlWwk'''
'''Kr5WFsSp5LDzpd80iRnMEhUMyNEwkTXcCkoktNfWTehnqo13wMTqDRTOKZe8DLU3d5o'''
'''3ksQ2y1rMnNbUpkvk2S1KOoN1ANPmuLEE56mxc6zuXpVPpu8lL0rboZJTOyR46OkQC0'''
'''ab5CVMMU6NFNhyUEwqYYEVVVaMZCcVRUFOvjqQrUBAQmE0SE9Em5bFqEiBY3Y3AyaJG'''
'''uqZJOxSlvU2Z2pxHkQBFmJZ6FMmCojAGO5+itL23Q3NyOfHGw0mILseh69/Jig+pXC1'''
'''7El3ZI+nYhSVpWyRX713Pi6BJgsndsB69ks7ZReB5ah12k11RaqiyJ8xK0rao/hFTMo'''
'''28A3g2HXnHVFTYRVNSeazLJIqm6cwgeoO3gHI0plUdEVCAYVQt0UStjWqlTaSWe28LR'''
'''IZONdnvbjYID9SJqNSgVPpaLeh8bBpmY1RLNDMzDF2TvlULhoDJrxLZBqIl/pGuB1Bz'''
'''WEUn0+n/8fvrvWsfUuz92tsbxvH44PrV9auRPY6UuyC48xx77EYKULHk4fUr+H39CtY'''
'''AsQC9+lcU+H9PF5TXPiwMPWR7ewf/3AudsWPHe729f6dfhRQTb5HKNUX8jmzPcxBf2q'''
'''RdhMozNEf9bB8d+nHgu7JaqcoceazR5KZR/nRhww/pi1RLErfI+RrC3Daza70azZC7e'''
'''tWQ0v5Js7r6fGm+UvRQEfI2CZKlqDozGa72m7FAHlHu9dGAcOQoDIuoMlYxDXiIcDhq'''
'''mhiJptG0Ira2hIgQazeYEa+UyWslbRqDylmGYTQXpVB0vqqxaSBEuUGgG4b2ooxjO6C'''
'''EOhzYo5oGpuosG7YtBV4Q2gNZZnX0cRvF4xboPp6oVTj6uGkjjaVYGr94ufIYElbF3Y'''
'''SGvigR0Fo09pZiWoZMuBwOA/bruMngebxVMDGYRaXuuhSQVAvvyvUCz7jPn4FE/OrcO'''
'''bKRWCvSN3jUW1p/IKs5aP8yIM8Z2dJ3KZYn36wcPA/Ce+lrO8+lRHmjyn+TyJ4eXp20'''
'''jur/ERhPQcTKjvCStIUo4+DeXxuVELO/h3hDIQGkh4qGQ5ucQrcUqsJ+x9DkLlm0eAA'''
'''VTW/kQmXAQkyDMdhZJ6eWK7FglVkgk1qwUHBslmroRqk/CCDSqWnSFYi2lGO8rDHwDF'''
'''jHTjipfoYeR4dfQvlI9RUZWvYBqQNdGnWzDraT0ciNazrXVufZLPX7OJQ2KX18v42Eb'''
'''wsUH49ArWmUyLrUlqRt0aVWzMEe+vgeCTybDhmDFdXSGa6+58DEis5UzOhSujWEDAEU'''
'''TDVkTP0cBVZpozH6AI1Jqc4q3DxEgwC92A32VK+nDYvorMHrlur2uKWnZpgiNB1z2kw'''
'''IYS4qfPQhqhGZQyJPuw4Vn9z+cBT4NWMJS+XaLCE5d+7Rz4G0Lur8546UrC3ikEqxrC'''
'''dqSdo2gwlnp2UPnf+MBK5NkxOV3w8Im+xqM7emYB0z3WoiYAqHYfJgtdUuAADD0E2zD'''
'''V9UgcpgFik/+SNahl+yaOySqucF9PxzYClHbtj3arEUyNEySzl20GUgfQi5NHWLOhLh'''
'''61HHL+Rp0ruZ4HTHtufZNZ1cJPJslld9CO5lvSc/dGen13dBg6oaliTakrRt3K8tFsU'''
'''eSpeSHvpwiASiTbMp2EgTU5O40tLS+PkLTbdwg7czABohtpmcAUWAItTQmjxKLZoIM1'''
'''pOqQQSQszFu8e3lUu9lH5/DoxqF/Q+ycUKF2KASuIsz9Cm7ckObX7FUk3rk0yu7tzxU'''
'''+uwm8yEasQ0ZX2PVqdtUdszn4/J4WMOatOHj5miaqplVAZ2MQk3zmj8fE1zZ48BDNUY'''
'''tiq9SAUafqbFbP4CA4GKAeso9xxOIIHAJLuk8KnX/0S3tOYC0NXsfp1YpDtdM23o7Th'''
'''dw0/tnjq2PwjrnPMtzdGm8seDfw9qchSZTF3suRfLUixNZ0wSbUnaFqOjZGZkEn+Oo9'''
'''r00SamMJXxC4uqRAMmRMEm0YjRSGhcjoNaRJUIhMdxaKZBm+QmAo+uYqMgdswSGEpNz'''
'''HaCldTubmIZapMR6Op1umVRaysiorTDS45Dd5UupJiWHLdti/oYcvu6tJ9oWeoW6cjH'''
'''0Pbvasbrl8jT6UueWofdZCKUmFg2TH9J2haYiFhQeiidh4muhAPatK7EVHQNpIBZedb'''
'''WxCJQW9G+9Om7ZEBBGfCKyrsCBArDYiWXfa9PScLbhmtiCvbuS5BMc9GbeUt5SN0eV7'''
'''HeoO9uzZ5nDJOXREN2wYTzxvZidxSEsjAr0rdIRrqbAEtr0uj9Ophg2UPWJWlbNHTMh'''
'''m1P3AHIMW1agWApVDNNCbs3MxTDwtRgDQZZFWgMXTOqL4zmaDTNoo1escPhMMO0CgTL'''
'''IhbYaWvmLlk4ROUslRAsGU6stf6nRCOSV+3Ij4JOkueSNybJL2xvhK6cMLT5Tb52OHB'''
'''8WYkin7XlI83cMo34ERtH+ibjqhythjuN+kHNi5grs3TKkqfWYTc5FyzPhrxzycq0LX'''
'''KuzFRMvUsA1eY1JpRgquoyoe0NkxkWW0q3ln0zxYyYpHrfzFGY2NLb0ZhQHtOelN9rm'''
'''DQM8Jad0ZjU6XFDt5jeqMakTs8DudW1zqMkbejt8Cj50fU8+85BJ2IASgGtztJ2qJhJ'''
'''GISyUrwscZu+MKE9todBTfOTVK4uWu5zIFWPpCtTL5NkmG9H1Fweq93UJEOXEg03EYq'''
'''Fw2DAPVilMmAKQ2v2dkPeLCpmFfcbrmiSLWUndfsaq8AGmvMrqdnllF+3/IIMOjvATt'''
'''44nuf6d0N7JGvQqcjQooT/PIQREIzqCXiZTJuV75/taAgtHMvHFey8Xdfn7WoS6XjAJ'''
'''Wlb9HadT8ge+nyIBKhNHxg2FWZYRKvctHJ3Q11lxlKyNRwX5Rg0lUwPs0jcusuxmLqh'''
'''FrCjtbESAcuwWJHIXG4YSzWw7Kc3e1S4Xo8bjGJsNEVLHtn1lgbEhHbsZIvYyZkdRkP'''
'''b82R9NkqTt3lY2LHD/kPNo8LVeTbLSw7DL7Yf2dLBZZZXnVqV6FjJHAEjhi4brLYkbY'''
'''usZDoVe+jwExKQNq0l0RVLtSxWKQwsohhU5Ur3JnbMAoVJtOroFRyGLhO19AkqkgQNq'''
'''zgYLKAQYuxIvFpeK4yJrupUyl+m+Q43dZ3ohpSjjHy3PwcK8imwZf0xk/RSkV/Xabyx'''
'''PWhN241kte1VGdp0JwFo9p2sF1Jp6s1Gg+PQnLCmY0x1ng1Hg/MHru1LB4Q778jU2iL'''
'''/G9SUVfGUpG2BTPHlsYfmS0oPfThHAtKmFTxY0SnBRCIIO1OwxkCiqSvjnDx9v8/jlF'''
'''k67Pml0Jhq0cU+ayNVom2guhXBVhIkxNiN88xJh/NbkeQ4laieQQ1db8z4lPa6SjU5Z'''
'''iUgWapBUweZl+GJfGr7tfyQT9u+vPE9LO4D+wGAfpGWIRJ5WuRZb0L7jyB0a0Zdkcq1'''
'''6Uspf5fW9lz91BGUtWl7mC59XKokbQsEha8uPZSbjj109RMSqDbtustv8TNZYWQRlN9'''
'''z6zx4q6oT1mBkOI6GYcYqQ3AINBQbrPiQElqrC69AxZhVEbeWQyLUwMWh87aUs4jKcY'''
'''9YS+7clKilyqCWjVmmBCSDA5I7NpWMBU236AuyS+3E4SlY/W30Lgilz7ZU5miRsRwN7'''
'''dALYFGvR1nksnUHjp5ah91kLZSHjJI9cLQ6bZsHjuZTMjlwxFFtmrUYigX7ZqZVX7ZH'''
'''+EV4mt7MuRMBg+qVZiqBgqqshDutj60kbVMEahERprou+93NkhReJ800MJMjKWmvY8j'''
'''S3KEjjolgg5pyLCUZAxgTk76gixa3P2jcW9sNb2z/i6wMqUjfIkGZIUkjjoMoXNF4qy'''
'''tRmXXD3jSeHX2RPlL1fcdT1ubhC2uabBS5krTtRZGbjegeOvweCUgbJimGppiaaRnVc'''
'''c6xxhTYUbMii8uT5ZSAYYDcMWT9PAUajRG1eif9eJoiYDFT09hKWEuYiGkRQxbBRgnL'''
'''S+j7jp7IV6egmSRhfg6Cwc0klD64U5q8zai2DxF3LXljS4e0rciwWe+aczuK7Ek9TZB'''
'''Enu4O6xfKrzBmlux9ASVp2+NX04Ulub2aI9q0d42qmNgwiyLRopxMY1TRdLMZnxqOgR'''
'''+IkT1Dw7EYRMO4ydsCBCrdMssvCxDNwpEX66S2lFilvQ7MSjLMX9OdDw0oaaaq0ffPg'''
'''VrtwMGpfwTjsaxyoSRty2FmkiucJWGXJW6RDT7Lq6o7z5/VNWnU80cn2JD1/FmdtsVz'''
'''XslQTpx+OKBNn/IiCsMMF8UBRjmRZRkK00Dczs4INyBGBRiNWdXXJ3Mw/IxQg9RJgDF'''
'''UUhEPR0Ch1NqRcDiiu6k2DXq3Pb1uaGlYQDlMUp3/HLgTyLwatz/zDG1Tp8tYQf+Any'''
'''CfXfnD23FlnhY5yalT07OnKkPn0/PUOuwmHwFBoMvykZK0LfARsa70UH7yTr169E3TE'''
'''mIoOmymZYK4YkyZ1YTFhGMwDZCVUiA0HbPVQnJtrESAMrWik1tLiNTdOCpVq6+JqVpN'''
'''XrhUq891XTVe0n1L228iexM6IyeUjwRXkb5FCvK9G0f2uB4LkcjTReZ7sVzEMimW9S8'''
'''uSdueXWk2GdO4fABp04YlphiqqlND6lZigxBctCt9unmBgYRkVpEmoggFtbRGz2tzNB'''
'''ahRSezlltEZ2w3HIuTvuYKHznVSFI/bJiswSPbScermqRjcYJJo+ZUv9PRkq2gJSfD0'''
'''PFvnPBOEmVF+jY1Iza6sP+oqRypzrPpwHzuH4F0KJnDXzpOsi7Upk6pJom2JG17nGQ2'''
'''E3vo8BckIG3aYkNhiVd1Q6/2J8Waooto9k3EC+Y4sGnR6nt/EhwUY6tRew3HY5lm0V3'''
'''Jy41C6OKFP1tKS0R3Y9WydElaIuqHqWrpjZ3KTrpeU7EhSUsEJk1Vmd4dy0areUn7x7'''
'''IvXe+rE4pAFLKGm6ocbd5Q3T91gkE9ciKRZ7Pk5Mz1fScKYll6ctZFulsbal2npuxRp'''
'''5K0LR7JzkzHHjo7RwLVprUmmmKpIIOqI7doClGxaRgNehcIMCANq8MYJ2CmQWQateII'''
'''UJqqFVwGvoyI7QZRSXodKpWoHjS5Bld5SLrm9CccE2GqIetawiFRQthLUp/sgHducjL'''
'''51PX7gefXBSybtWXf3c9B2Hf4OSfp41yl6du8RmqKpOZFUlLZNky++Dmt/nASOXEsS1'''
'''vOOpvV2lDDllKVjeRXkrZFf97ZqAb6dYgEpk3TL6LALtswaQG7QDlxx7DCmEqtxXsK0'''
'''FpkL4ehEmpWOnYIGIRw48uqkMfrY14ClWXRcuNV0jKWtSNBcZIuNwGw5FEozG+fsvhN'''
'''Yo1RL9HQGJP0SFSl7TIZBYZmMtrdx422iHwd2WM3Djz0njMiaakolatF0oJN2Cw7dow'''
'''uPJhVti/b5pL5NnzhQwj81kHvnCC8c/5a83BVncybZmfhg2f7sjegnB13xGxdqInKNC'''
'''qJtiRti8RsYQECenaMBLJN2+9MfsERkdBIMUOxsIE1VhB5/+k2HA4DOIDEoXkOgzGmF'''
'''VgR18bLBBymG5oqEwMoaRlLJ7uhGku63NKmt2VKtbhuYGw1dxO56H6iT28VlYJkUtUy'''
'''O80Y2iJyxo/6OCC+Zc/6lCZvM2BhCqQeVZHKtVmOcslnIQifEIqVdjG6POqYytqYCmx'''
'''rZW14JWlbZCrTUQ0bjSMkIG2aomgKtky1yJMG5aWCBXt23aJmk8eDORimmZUu2AIM0B'''
'''St+qKiJ/AUgYbpeoX+aAWWbSUovFYWxqrksfCk3zWLGM0RFA7J0DCRNN0JSCa21NRP6'''
'''mWoj3bBx+gMFs0olr0PtDR1mzeSxzbIWs+e1NSmSObbLEk5gnYD0eS7sgTlqLNxre9c'''
'''lk6l7yYvSduik1E6J3vo6BAJRJvmJ0wxTI0W2XBQTihgghVVVY1mJBRHQU29OpKuQEF'''
'''AYDZJTESblMeqSYBgdTcCJ4sa6Zom7VCU9jZlanMeRQIUYVrqUSQLisIY7HyK0vbeDs'''
'''3J5cQbDychuhyHrn8nKz6kcrXsSXRlj6RjF5akbZFcvXc9L4ImCSZ3w3r0SjpnF4Hnq'''
'''XXYTXZFqaHKnjArSdui+kdMyTTyDuDZdOQdU1FhF01J5bEukyiapjOD6A3eAsrRmFZ1'''
'''REABhlG1RBO1NqqVNpFa7r0tEBk61WS/u9kgPFAnolKDUulrtaDzsWmYjVEt0czMMHR'''
'''N+lYtGAImv0pkG4iW+Ee6HkDNYRWdTKf/x++v9659SLH3a29vGMfjg+tX169gsXD60F'''
'''3KvWNDx4XKXfD1+tWZPT7y3P4XZTwc/x2a5jXRFaikZZjWX972o/gKYLz+VxT4f4GV8'''
'''PW+qSqEGBq16N7BP/dCZwxl7fX2oKmvoVWDsROKsWZ7Z2ItOwAgfGjxl1EYnYu1ER5+'''
'''fneJsGVqs5d9kDY847EdJyn41fb7Kvy/dUXoATYOCIGfB6pamOU0EAs85LPQ4ThEhBz'''
'''oFI1H6OT4apZhHAaDSZ9nOQICyh2dD/hxKHvkon2U2FWycjX9xsCNRcJZO97f3ytROF'''
'''T8wLaTNhzderMcI2jlD36yMIvJnsuaaflphvmycJC0YiyGqBtP0gbkHWJNk8eJRJq/F'''
'''T2iZV47ILPnn2aZN/e3QVLllA8kT2N35PwR+ElpJ//5+T965iUnAcdO1A/d8axIgs5c'''
'''z4nQ5/PPKGm9XHmhPbCTpv1ij37PvJh95O3pLyrLQu6nPEu8O1JNI/Pu1g2d1TlBaKU'''
'''f+/7sw6EY9Wmz8mrlm/QBevh7J/nMl/0xJsN9H8YYzhQHsyiML5zQDQbpUP1n+gpefv'''
'''zqhD739ZhngH9eAVuPBulNIosP0flS+rd8fD0UPCpIewlzOlxMPX1YlB7E9GJq8agg7'''
'''VmwlDZ5VJD2auIsVzF5VpD6szPw0/TigXjxa76NfwTuP7hyF5t4OumxeqWqB+L/85N+'''
'''KZUhkwqb1amwVFlYqiwiVRaRKotKlUWlytKkytKkytKlytKlyjKWyloeNrEzGp/aN44'''
'''nRszeaXDPy4L92t1Q/KOFB79yLNP1ZWDzE7Tz9YXj48JvEiZjes8gImtyre4eS/5KTn'''
'''TuMVr0K/krWd9mv/Tcr3lKaJnZ5OIbiDH/qj/xvF75H7zZoZDsv2d/0cwvnkC0/lR4J'''
'''KIrP1/PYN/iPaAjz0nvdJy+eANbjz/yC8YFTHuRNpgMHtaauODRpceXJXQ07KPLYXDv'''
'''hNETc1S8Phra3G3kETnTJoR12ucLZvp8Pu7dfuCfuv6XfMNXkDp3ZN8BeXx1H98COwE'''
'''KMxldv/JvnXtl7N/lPv+Ygu5df7CWgvyoH3eIKhCtraSh+1s0vA8joj5vYFCcv87yom'''
'''Fo0zW1GS9rXbCmI3V5zRjaf9jhIL9ivBfPggnw9qRo9HESe0HwZXUBP4RerVUnguUtc'''
'''u/ufxd7yXs79Dlxf52Q9r/Yo/E3/FlC9l8nRF885RT//vfFtEJB/NvYs/sOfk3+JvYc'''
'''f4M9x9/EnuNvb09FqnRPh1/P6ve3tH5/K6lf7PweF0qyPpdkPXTvwl7QRl5wj+yQK7+'''
'''QQZT0/mI+7BFWURwgTNFoPFTQIuP2H2ZFDGHxRz4UimCfhhK5M3vpzwu0RHmMl4dunH'''
'''4wcv07xJUryPURVAfZt7Bj9YPAV9BnyBAhaEdvgO4mkMSOku/A36QIUipN+0KaFtWOA'''
'''bYT/rGkcnhl5dKSolV1xIt19JNKJOXSeSXT9xx+tAb8tAC/XvodteA76SB4ZO1wk7Wr'''
'''bFWj9Lu44LuHvBGAs/YdP0b9hMAEtyhKSIqCUFVzlMPSBCyzFJb2qObQtrU56pVXXVF'''
'''9sUQqVeLaEM4WxZKRWWsC8eTpWpxk+nOuA5qEIeAIbiIn/LqoEOM8eJBR82Q+6U9Vio'''
'''kq6gNX7PmpDhIdumGiws5k4PoxkYHquedLOjcz/3pJ60ZJLsFMc7laAynSXcFGMVHN5'''
'''XfDx8598lyzcs8/Od4wec5yz4UUEC/UpeeDgufvJlFclHy2r+MD4q1zn2y6oqJEgoeI'''
'''lIV7iR/dyL1xE6MupMGqsrDhP/S4Vm6qecUqMKF8nS5PL5KuURWs5d7ktZQLTcqV31M'''
'''F4VJbHA1dL1ENcy1qOuzETp7/0YLuWsUHqtHprlfMok533emur+NOd70qVae77nTXne'''
'''66010/IUenu34ZmuLtQ7StKuKtBdbprjvddae7rqcO7HTXmUHQ6a473XWnu+501/nXn'''
'''e76Wequs97dI3scAZUJ7oAFjN1IATGcPLx+Bb+vX905gThe8or7cv89PS7y2neA1nzp'''
'''oYGz6M4tvMohzcRbPKrX2ME+gUW0wVLa5TN9JYlbPGh2zsccrN9ezUBDkvk2e8js2PF'''
'''saGXZ8A/HJ0+rxLM9ZfaI81vptDw+2fRJeEthWLNI9SF0EPiMUMYaifUMMAyN6NV3eH'''
'''MUJg+D1+T5LI7GpIyQglNjC1g0y9AZScJOPypUz+HFxaePP304O7xanl1rPxD/fDt79'''
'''VGs2sKisYNX/DIRk1KqFZ27sqhCDGN+k43V4vEr9YCwA6Lvq7q0CRunBPKoyoR9BRsf'''
'''dAStzHVEXBM9tKPI8fldQ9kvFRqy+3lD9nhmsG3AkC06Z87PFik1757s67wh29QzXHJ'''
'''uyr74KfM0RxKP/vPzf7KG5CJTtrjHy+ZDNMo0VpwzYmfaJM4Ysc/Of1GNLKSMEfvs/E'''
'''jVaeZdzoi9lDNrxL686IzYz9GIra80fmaN2GWp5obU1amwVFlYqiwiVRaRKotKlUWly'''
'''tKkytKkytKlytKlyjKWyloeNjtnxNaEeNvThUpiT0tM2oae+5X8RVnyMHmXLOTT7CR9'''
'''lzykePar0IidakZSC3Vqk9Zyf+k5ezVdTL7C0F1pzk7ts+jU/eJ4lTbmEpvvBl59sl2'''
'''/SBdXYELP5nh16SfDY1H/WpAvY8leV95l08wa7d3c2MbWY2xbo4F5fdZEXpS2RsPkms'''
'''riRelrhLWm1lqfEffmi78mRJG/NsPymvwWOKZ1OQg0gWl5wcgZu1e8nVuylxIsm4KnU'''
'''sD2BygeAvWEHzFsgyLkCbmAbhwOHhF71OOmWR/Zy9YTSBTfO/AOUomSqD2SsajAHgl9'''
'''hk0+mm33E7NW+hqreQux0E2IVMI+jEbuQLB7+NTRDNIYWtodu7HYTCHY6jD1/9Wy2+j'''
'''WkulafHP+9axRXNhdkevzzW3EQcJDIl6R6as6Fm2qFxrrykxhaWNgySYnaZMvVEfP1G'''
'''bW4KJbMp2z2OwLKLUylOk4wuNRte3O0JXURX1hYBSiFK+Xm7kOuMc1YXasrm45Ya2ta'''
'''Dm9FNzi1FwJMtOCZNqCS4MVcnNjQgjE5Nb2PGSP+F5+PodtFAOOofjs/02gowA4oLF9'''
'''PsqHPeT83nfGsfgWvJnmHgdR5N54TtIRObALdX1KBTO9QFnOC0ItGhmJy0mdxYIuLhY'''
'''r5h5vPYlu0PScI810XSua5JUVJmaRU07xepEHKho4AgpabdfnK3LykZLlrmYTVpr+CU'''
'''s/ujjR1YKJLr0aRQkJf3xTmOkUekwjkNqNQHE6nueLRnU/r62O8+/lZK9MJVOOkdqa5'''
'''xpCWRcHrgTMVCrj4jBTV+5fQvde2BOvjseDSbXci0XtrJn3PCjQz5a7PBRorEW6mcuD'''
'''YeTzz1we8jqVmcsDzQOauzxgsvRikOrcV3g94Lw7R9btYfVOednvoYj71/R7sCxTyYO'''
'''Zuj0QS1lw/shrtFe6PWR11/Gi2wNv9HQoCt0P/6NxewcR9g61s3dkFPOdvSN529k7cq'''
'''86e0dhqs7e0dk72rd3JOYLI7U2tGTv0DZl7+hzx6mQu3bJHZLrTB47YPJYkxq/M3l0J'''
'''o/O5LEFmJYXjPWbPGaCIFVA9tAIlqy5rUNWD1yupV6L8UKrabwwls/ddcaLznjRGS86'''
'''40VnvOiMF53xojNedMaL3TJePP3M5mg+GHsjf/PnNjOTQzTG8umfpTuYq3K0eVWgA1j'''
'''G3ChT65pAmVybPb0pGjkKYtlLAtOh/OhadMc3Z8c3M+ObG/Y2fIYTiJ+qY1J5vxmIGW'''
'''xpukabONQnhJyKNU0CBSUWNkgLF+3NTtwVVS+LKH/0btvPcT73Dt+Fw5zzM8JFpzmXj'''
'''ga3eJoTH1B6QPWaAYmxVEDiC+A+9sDxxkPX7qGLw2zxhS4NTs6lAbLBi2me9Ts18E4p'''
'''iUbMeyXrhZB3asi5Q0x9Gi7ef8g8rR2OODnnj9LICUm6jDvDICh0Zzg++UVVs14EGXe'''
'''G45MjVV3pzrCUM+PO8OH0XefO8BzdGboYxDKpuhjEW+bOQBMPhERzsaenDgypr0L6Lv'''
'''VVSP5KwxTrNPsr2dZPHR+mbhBYLgbxzGtBn3knFIclLnB8kIpBvBx8OI2lK2Wkf5wDQ'''
'''j5or9SHVmZZrcCRc2d4yvOFLyybCtfpmvClvxbT6LqiGK/NVrvu+LBrciRYV/XW107r'''
'''ArQuZ4R1leOv0WFjXc4taytp1kjLi8Pa3RCOVkXfpaaCjmxvJEwakvF2DUNBp8KcsmB'''
'''0zhv3F+z+wuoMuyYfXtcNTKrnjbRTy0tRkNMCs6mMFXjZhJV+xJSz4wlLcf7N1OALWT'''
'''wniqD+9txmPLcTzwzCT6hI1ua+0lBl5G2Efd7r035xvjpJv5TVUy+vp5xZfFV1q3xNW'''
'''E0rocZq+bIwtTB5yZAsDtC6coQtfmDR9CzVt5ZS0yRf2w7P3SHquAg9qho1v1GJGT/d'''
'''uCr0C5lvzoyrn12PT5c48HtoKS5muV01e59SkbKnNAwu05UFq+hyFNxFtZdIN7Oq6gt'''
'''W2alVleTLnVtVV0XBXX4+SC2auRcSYXDf2m5Y9DJjSS3iMrXD32JDoXlRNg9/m9UwLW'''
'''nFVppSs0qweNGUyjdg6cATm0j+R6cpnebpNKWdprTTlHaa0k5T2mlKO01ppyntNKWdp'''
'''rTTlHaa0k5T2mlKO01ppyntNKWdplTLPe80pS9KU/oYB1amEGLpjNIiB1ZTVTRMLfi/'''
'''Vu8hgZ27rtbRx9IDkPky+tg3nt3/Et1Mwrse+rGuNjaErVOj2ljoi8z8LbjPT8vqFvP'''
'''aWEL07MupPvbT+feZp7X1sfP2yjRXnNPJ3vYLdbI/Hv6iZi/Iy+pkfzw8wiSLNqeTXc'''
'''qZ0cm+OXrT6WQ7nWxRqk4n2+lk29fJJrpVI718REs0raluNdHJ5vWuLMlQqJPV02Bcy'''
'''V86k9XJklzoLVKgmp1pYo0FhaycVnamWVxUzp4le2v0NgyixZVnRZZMyKmrbACGIhVn'''
'''kbY2k7/o9Vp1timM1ThLbmMpqUJJPK4E3LJ6ZY2q23WpXNelAl4foH4cR6G9tjhJ69M'''
'''ED93fouF9GG1bCKetU+Ly/ltn7DRjC0O6ranNZ7NmebFYuyp3pg1ZpdGlqzW6F3bcHz'''
'''6gWy4hphGZTBH16COv4b0bOb3VKjhLSu1rCK0vrtb6VlWEx6JZ0MkyXrYlFwplZXShN'''
'''HAMkYlExcgSBnMemqZmDJg6IEtuikngm2O5YFUrFdusILjOE0Mlraze9AacLOYSTex6'''
'''I+toVj1NsJRit0QT/LQQV9INsaQmnfKf5NYh3nmPVSQbZVpeeaOKbtarQHWMJFNZZmh'''
'''FH2ZPVwgL5UaRQvhHN7xzfddGV05/WK4AJpjm1ZBLGqa8LrJcxxQv64ALVG0i3UwHvJ'''
'''h/qgOmK3TAZj79XAesLj0fFDzfGhXw+WF+oMzVvzjfI5Lq3x8Pl+qfiTmEN+Ao2ylmp'''
'''y87xax42ylmF0RDp5gtSNUpZjvFbKeY7RSznWK2U8x2itlOMdspZjvFbKeY7RSznWK2'''
'''U8x2itlOMfv8FbNPDwbvi3PxvYGz+TjwyRF90QTLoYMXY3SXJW4x+vuSF708fJl8m40'''
'''Af+x4NrSyI1mn4+XI1LUq0QWAnwWAT0Z3Dx2fbDj2O7UUhjWL6NZye6BcGG4e0oNQxs'''
'''wmgoHzEyYa0TGTQWFamKpNhn2fx+YuqlgOSy5I97bHfX/Gnd1+yPc/e8KKxfXJQP3+/'''
'''PP/A6N/3us='''
))) | mit |
sankhesh/VTK | ThirdParty/AutobahnPython/autobahn/twisted/resource.py | 16 | 6222 | ###############################################################################
##
## Copyright (C) 2012-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("WebSocketResource",
"HTTPChannelHixie76Aware",
"WSGIRootResource",)
from six.moves.urllib import parse
from zope.interface import implementer
from twisted.protocols.policies import ProtocolWrapper
try:
from twisted.web.error import NoResource
except:
## starting from Twisted 12.2, NoResource has moved
from twisted.web.resource import NoResource
from twisted.web.resource import IResource, Resource
## The following imports reactor at module level
## See: https://twistedmatrix.com/trac/ticket/6849
from twisted.web.http import HTTPChannel
## .. and this also, since it imports t.w.http
##
from twisted.web.server import NOT_DONE_YET
class HTTPChannelHixie76Aware(HTTPChannel):
"""
Hixie-76 is deadly broken. It includes 8 bytes of body, but then does not
set content-length header. This hacked HTTPChannel injects the missing
HTTP header upon detecting Hixie-76. We need this since otherwise
Twisted Web will silently ignore the body.
To use this, set `protocol = HTTPChannelHixie76Aware` on your
`twisted.web.server.Site <http://twistedmatrix.com/documents/current/api/twisted.web.server.Site.html>`_ instance.
See:
* `Autobahn Twisted Web site example <https://github.com/tavendo/AutobahnPython/tree/master/examples/twisted/websocket/echo_site>`_
"""
def headerReceived(self, line):
header = line.split(':')[0].lower()
if header == "sec-websocket-key1" and not self._transferDecoder:
HTTPChannel.headerReceived(self, "Content-Length: 8")
HTTPChannel.headerReceived(self, line)
class WSGIRootResource(Resource):
"""
Root resource when you want a WSGI resource be the default serving
resource for a Twisted Web site, but have subpaths served by
different resources.
This is a hack needed since
`twisted.web.wsgi.WSGIResource <http://twistedmatrix.com/documents/current/api/twisted.web.wsgi.WSGIResource.html>`_.
does not provide a `putChild()` method.
See also:
* `Autobahn Twisted Web WSGI example <https://github.com/tavendo/AutobahnPython/tree/master/examples/twisted/websocket/echo_wsgi>`_
* `Original hack <http://blog.vrplumber.com/index.php?/archives/2426-Making-your-Twisted-resources-a-url-sub-tree-of-your-WSGI-resource....html>`_
"""
def __init__(self, wsgiResource, children):
"""
Creates a Twisted Web root resource.
:param wsgiResource:
:type wsgiResource: Instance of `twisted.web.wsgi.WSGIResource <http://twistedmatrix.com/documents/current/api/twisted.web.wsgi.WSGIResource.html>`_.
:param children: A dictionary with string keys constituting URL subpaths, and Twisted Web resources as values.
:type children: dict
"""
Resource.__init__(self)
self._wsgiResource = wsgiResource
self.children = children
def getChild(self, path, request):
request.prepath.pop()
request.postpath.insert(0, path)
return self._wsgiResource
@implementer(IResource)
class WebSocketResource(object):
"""
A Twisted Web resource for WebSocket. This resource needs to be instantiated
with a factory derived from WebSocketServerFactory.
"""
isLeaf = True
def __init__(self, factory):
"""
Ctor.
:param factory: An instance of :class:`autobahn.twisted.websocket.WebSocketServerFactory`.
:type factory: obj
"""
self._factory = factory
# noinspection PyUnusedLocal
def getChildWithDefault(self, name, request):
"""
This resource cannot have children, hence this will always fail.
"""
return NoResource("No such child resource.")
def putChild(self, path, child):
"""
This resource cannot have children, hence this is always ignored.
"""
pass
def render(self, request):
"""
Render the resource. This will takeover the transport underlying
the request, create a WebSocketServerProtocol and let that do
any subsequent communication.
"""
## Create Autobahn WebSocket protocol.
##
protocol = self._factory.buildProtocol(request.transport.getPeer())
if not protocol:
## If protocol creation fails, we signal "internal server error"
request.setResponseCode(500)
return ""
## Take over the transport from Twisted Web
##
transport, request.transport = request.transport, None
## Connect the transport to our protocol. Once #3204 is fixed, there
## may be a cleaner way of doing this.
## http://twistedmatrix.com/trac/ticket/3204
##
if isinstance(transport, ProtocolWrapper):
## i.e. TLS is a wrapping protocol
transport.wrappedProtocol = protocol
else:
transport.protocol = protocol
protocol.makeConnection(transport)
## We recreate the request and forward the raw data. This is somewhat
## silly (since Twisted Web already did the HTTP request parsing
## which we will do a 2nd time), but it's totally non-invasive to our
## code. Maybe improve this.
##
data = "%s %s HTTP/1.1\x0d\x0a" % (request.method, request.uri)
for h in request.requestHeaders.getAllRawHeaders():
data += "%s: %s\x0d\x0a" % (h[0], ",".join(h[1]))
data += "\x0d\x0a"
data += request.content.read() # we need this for Hixie-76
protocol.dataReceived(data)
return NOT_DONE_YET
| bsd-3-clause |
netvigator/myPyPacks | Time/Convert.py | 2 | 14744 | #!/usr/bin/pythonTest
# -*- coding: utf-8 -*-
#
# time functions Convert
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# The GNU General Public License is available from:
# The Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor
# Boston MA 02110-1301 USA
#
# http://www.gnu.org/licenses/gpl.html
#
# Copyright 2004-2016 Rick Graves
#
class FormatMismatchError( Exception ): pass
class Finished( Exception ): pass
sFormatISOdateTime = '%Y-%m-%d %H:%M:%S'
sFormatISOdate = '%Y-%m-%d'
sFormatIsoNoSecs = '%Y-%m-%d %H:%M'
sFormatDateAm = '%m/%d/%Y'
sFormatDateAmShort = '%m/%d/%y'
sFormatDateEu = '%d/%m/%Y'
sFormatISONoSpace = '%Y-%m-%d_%H.%M.%S'
sFormatNatureUSA = '%d %m %Y'
sFormatUSAdateTime = '%m/%d/%Y %I:%M:%S %p'
from String.Find import getFinder
from Time.Output import getNowIsoDateTimeStr
_oApacheDelimiters = getFinder( '[/: ]' )
_dMonthNames = dict(
Jan = 1,
Feb = 2,
Mar = 3,
Apr = 4,
May = 5,
Jun = 6,
Jul = 7,
Aug = 8,
Sep = 9,
Oct = 10,
Nov = 11,
Dec = 12 )
def getMonthNumOffName( sMonth ):
#
sNumb = ''
#
iMonth = _dMonthNames.get( sMonth[:3].title() )
#
if iMonth:
#
sNumb = '%02d' % iMonth
#
#
return sNumb
def getIsoDateTimeFromObj( oDateTime ):
#
return oDateTime.strftime( sFormatISOdateTime )
def getIsoDateTimeStrFromSecs(
fSecsSinceEpoch = None, bWantLocal = True, sFormat = sFormatISOdateTime ):
#
from time import strftime, gmtime, time, localtime
#
from Utils.ImIf import ImIf
#
getTime = ImIf( bWantLocal, localtime, gmtime )
#
if fSecsSinceEpoch is None: fSecsSinceEpoch = time()
#
tSayTime = getTime( fSecsSinceEpoch )
#
return strftime( sFormat, tSayTime )
def getIsoDateTimeStrFromSecsNoSpace(
fSecsSinceEpoch = None, bWantLocal = True, sFormat = sFormatISONoSpace ):
#
return getIsoDateTimeStrFromSecs( fSecsSinceEpoch, bWantLocal, sFormat )
def getNormalDateFromSecs( fSecsSinceEpoch = None, bWantLocal = True ):
#
sFormat = '%d %B %Y'
#
return getIsoDateTimeStrFromSecs( fSecsSinceEpoch, bWantLocal, sFormat = sFormat )
def getTimeFromISODateTime( sDateTime = getNowIsoDateTimeStr() ):
#
# not used anywhere
#
return sDateTime[ 11 : ]
def getHrMinFromISODateTime( sDateTime = getNowIsoDateTimeStr() ):
#
# not used anywhere
#
return sDateTime[ 11 : 16 ]
def getDateTimeTupleFromString( sDateTime, sFormat = sFormatISOdateTime ):
#
from time import strptime, tzname
#
if '_' in sDateTime: sDateTime = sDateTime.replace( '_', ' ' )
#
if sDateTime[ -3 : ] in tzname:
#
sDateTime = sDateTime[ : -4 ]
#
#
return strptime( sDateTime, sFormat )
def getSecsSinceEpochFromString(
sDateTime,
sFormat = sFormatISOdateTime,
bAdjust2UTC = False ):
#
# sFormatDateAm
#
from time import mktime, timezone
from Utils.ImIf import ImIf
#
tDateTime = getDateTimeTupleFromString( sDateTime, sFormat )
#
iAdjust4TZ = ImIf( bAdjust2UTC, timezone, 0 )
#
return int( mktime( tDateTime ) ) + iAdjust4TZ
def getDateTimeObjFromString(
sDateTime,
sFormat = sFormatISOdateTime,
bAdjust2UTC = False ):
#
from time import timezone
from Utils.ImIf import ImIf
from datetime import datetime, timedelta
#
tDateTime = getDateTimeTupleFromString( sDateTime, sFormat )
#
lDateTime = list( tDateTime[ : 6 ] )
#
lDateTime.extend( [ 0, None ] )
#
oDateTimeObj = datetime( *lDateTime )
#
iAdjust4TZ = ImIf( bAdjust2UTC, timezone, 0 )
#
oAdjust4TZ = timedelta( seconds = iAdjust4TZ )
#
return oDateTimeObj + oAdjust4TZ
def getSecsFromDuration( sHrsMinSec ):
#
'''
converts string 00:00:00 into integer seconds
'''
#
from Iter.AllVers import lMap
#
lParts = lMap( int, sHrsMinSec.split( ":" ) )
#
iSecs = lParts[0] * 3600
#
if len( lParts ) > 1: iSecs += lParts[1] * 60
#
if len( lParts ) > 2: iSecs += lParts[2]
#
return iSecs
def getDurationFromSecs( iSecs ):
#
'''
converts xseconds into string 00:00:00
seconds can be integer or float
see also getSayDurationAsDaysHrsMinsSecs in Time.Output
'''
#
iHrs, iSecs = divmod( round( iSecs ), 3600 )
iMins, iSecs = divmod( iSecs, 60 )
#
return '%02d:%02d:%02d' % ( iHrs, iMins, iSecs )
def getIsoOffApacheDateTime( sDateTime ):
#
'''
sApacheDateTime = '23/Sep/2012:06:40:18 +0800'
'''
lParts = _oApacheDelimiters.split( sDateTime )
#
sD, sMonth, sY, sH, sMins, sS, sOffset = lParts
#
sMonth = getMonthNumOffName( sMonth )
#
return '%s-%s-%s %s:%s:%s' % ( sY, sMonth, sD, sH, sMins, sS )
def getIsoOffApacheDate( sDate ):
#
'''
sApacheDate = '23-Sep-2012'
default display date format on directory listing
'''
lParts = sDate.split( '-' )
#
sD, sMonth, sY = lParts
#
sMonth = getMonthNumOffName( sMonth )
#
return '%s-%s-%s' % ( sY, sMonth, sD )
def getIsoDateFromOther( sDate,
sFormatIn = sFormatNatureUSA, sFormatWant = sFormatISOdateTime ):
#
'''
handles coversions from formats such as 17 Jan 2014
returns string date in desired format
'''
from time import strptime, strftime
#
from Iter.AllVers import getEnumerator
#
sNewDate = sDate
#
setDelimitersGot = frozenset( _oApacheDelimiters.findall( sDate ) )
setDelimitFormat = frozenset( _oApacheDelimiters.findall( sFormatIn ) )
#
if setDelimitersGot != setDelimitFormat:
#
raise FormatMismatchError
#
elif sFormatIn == sFormatNatureUSA:
#
# strip leading / trailing blanks
#
lParts = [ s for s in _oApacheDelimiters.split( sDate ) if s ]
#
for i, sPart in getEnumerator( lParts ):
#
sPartTitlized = sPart[:3].title()
#
if sPartTitlized in _dMonthNames:
#
lParts[ i ] = str( _dMonthNames[ sPartTitlized ] )
#
break
#
#
#
sNewDate = ' '.join( lParts )
else:
#
raise NotImplementedError
#
#
tDate = strptime( sNewDate, sFormatIn )
#
return strftime( sFormatWant, tDate )[ : 10 ]
def getOtherTimeDatefromISO( sTimeStamp, sFormat = sFormatUSAdateTime ):
#
from Time.Convert import getDateTimeObjFromString
#
oDateTime = getDateTimeObjFromString( sTimeStamp )
#
return oDateTime.strftime( sFormat )
def getDateTimeUSAfromISO( sTimeStamp, sFormat = sFormatUSAdateTime ):
#
return getOtherTimeDatefromISO( sTimeStamp, sFormat )
def getOtherDatefromISO( sDate, sFormat = sFormatISOdate ):
#
from Time.Convert import getDateTimeObjFromString
#
oDateTime = getDateTimeObjFromString( sDate, sFormatISOdate )
#
return oDateTime.strftime( sFormat )
def getDateUSAfromISO( sDate, sFormat = sFormatDateAm ):
#
return getOtherDatefromISO( sDate, sFormat )
def getIsoDateTimeFromOther( sDateTime, sFormat = sFormatUSAdateTime ):
#
from Time.Convert import getDateTimeObjFromString
#
oDateTime = getDateTimeObjFromString( sDateTime, sFormat )
#
return getIsoDateTimeFromObj( oDateTime )
def getIsoDateTimeFromOtherStr( sDate, sFormat = sFormatUSAdateTime ):
#
from Time.Convert import getDateTimeObjFromString
#
oDateTime = getDateTimeObjFromString( sDate, sFormat )
#
sISO = getIsoDateTimeFromObj( oDateTime )
#
return sISO
def getIsoDateFromOtherStr( sDate, sFormat = sFormatDateAmShort ):
#
sISO = getIsoDateTimeFromOtherStr( sDate, sFormat )
#
return sISO[ : 10 ]
def getIsoDateFromUSAdate( sDate ):
#
try:
if not sDate:
sISO = ''
raise Finished
try:
sISO = getIsoDateFromOtherStr( sDate, sFormatDateAm )
raise Finished
except ValueError:
sISO = getIsoDateFromOtherStr( sDate, sFormatDateAmShort )
#
except Finished: pass
#
return sISO
def getIsoDateTimeFromUSAdateTime( sDateTime ):
#
try:
if not sDateTime:
sISO = ''
raise Finished
try:
sISO = getIsoDateTimeFromOther( sDateTime, sFormatUSAdateTime )
raise Finished
except ValueError:
pass
try:
# two digit year, drop seconds, no AM/PM
sISO = getIsoDateTimeFromOther( sDateTime, '%m/%d/%y %H:%M' )
raise Finished
except ValueError:
pass
try:
# try 4 digit year, no seconds, no AM/PM
sISO = getIsoDateTimeFromOther( sDateTime, '%m/%d/%Y %H:%M' )
raise Finished
except ValueError:
raise
#
except Finished: pass
#
return sISO
if __name__ == "__main__":
#
from time import time
from Test import isISOdatetime
from datetime import datetime
#
from six import print_ as print3
#
from Utils.Result import sayTestResult
#
lProblems = []
#
if getMonthNumOffName( 'jan' ) != '01':
#
lProblems.append( 'getMonthNumOffName() short Jan' )
#
#
if getMonthNumOffName( 'december' ) != '12':
#
lProblems.append( 'getMonthNumOffName() long Dec' )
#
#
iNow = int( time() )
sNow = getIsoDateTimeStrFromSecs( iNow )
oNow = datetime.fromtimestamp( iNow )
sNoSpace = getIsoDateTimeStrFromSecsNoSpace( iNow )
#
if getIsoDateTimeFromObj( oNow ) != getIsoDateTimeStrFromSecs( iNow ):
#
lProblems.append( 'getIsoDateTimeFromObj()' )
#
if not isISOdatetime( getIsoDateTimeStrFromSecs( iNow ) ):
#
lProblems.append( 'getIsoDateTimeStrFromSecs()' )
#
if getTimeFromISODateTime( sNow ) != sNow[ -8 : ]:
#
lProblems.append( 'getTimeFromISODateTime()' )
#
if getHrMinFromISODateTime( sNow ) != sNow[ -8 : -3 ]:
#
lProblems.append( 'getHrMinFromISODateTime()' )
#
if getSecsSinceEpochFromString( sNow ) < 1176833194 or \
getSecsSinceEpochFromString( sNow ) > 2000000000:
#
lProblems.append( 'getSecsSinceEpochFromString()' )
#
#
lParts = tuple( sNow.split() )
#
sReconstitute = '%s_%s' % lParts
#
if sNoSpace != sReconstitute.replace( ':', '.' ):
#
print3( sNoSpace, lParts )
lProblems.append( 'getIsoDateTimeStrFromSecsNoSpace()' )
#
#
tNow = getDateTimeTupleFromString( sNow )
#
if len( tNow ) != 9:
#
lProblems.append( 'getDateTimeTupleFromString()' )
#
if repr( getDateTimeObjFromString( sNow ) ) != 'datetime.datetime' + repr( tNow[ : 6 ] ):
#
lProblems.append( 'getDateTimeObjFromString()' )
#
#
if getSecsFromDuration( '01:01:01' ) != 3661:
#
lProblems.append( 'getSecsFromDuration()' )
#
#
if getDurationFromSecs( getSecsFromDuration( '01:01:01' ) ) != '01:01:01':
#
lProblems.append( 'getDurationFromSecs()' )
#
#
sApacheDateTime = '23/Sep/2012:06:40:18 +0800'
#
if getIsoOffApacheDateTime( sApacheDateTime ) != '2012-09-23 06:40:18':
#
lProblems.append( 'getIsoOffApacheDateTime()' )
#
#
sApacheDate = '23-Sep-2012'
#
if getIsoOffApacheDate( sApacheDate ) != '2012-09-23':
#
lProblems.append( 'getIsoOffApacheDate()' )
#
#
if getIsoDateFromOther( '17 Jan 2014' ) != '2014-01-17':
#
lProblems.append( 'getIsoDateFromOther() "17 Jan 2014"' )
#
#
if getIsoDateFromOther( ' 17 Jan 2014 ' ) != '2014-01-17':
#
lProblems.append( 'getIsoDateFromOther() " 17 Jan 2014 "' )
#
#
if getIsoDateFromOther( ' 17 January 2014 ' ) != '2014-01-17':
#
lProblems.append( 'getIsoDateFromOther() " 17 January 2014 "' )
#
#
if getDateTimeUSAfromISO( '2009-04-24 08:02:41' ) != '04/24/2009 08:02:41 AM':
#
lProblems.append( 'getDateTimeUSAfromISO() "2009-04-24 08:02:41"' )
#
#
if getDateTimeUSAfromISO( '2009-01-27 19:34:33' ) != '01/27/2009 07:34:33 PM':
#
lProblems.append( 'getDateTimeUSAfromISO() "2009-01-27 19:34:33"' )
#
#
if getDateUSAfromISO( '2009-01-27' ) != '01/27/2009':
#
print3( getDateUSAfromISO( '2009-01-27' ) )
lProblems.append( 'getDateUSAfromISO() "2009-01-27"' )
#
#
sExpectDateTime = '2008-06-30 10:29:00'
#
if getIsoDateTimeFromOther(
'06/30/2008 10:29:00 AM' ) != sExpectDateTime:
#
lProblems.append( 'getIsoDateTimeFromOther() "06/30/2008 10:29:00 AM"' )
#
#
if getIsoDateTimeFromUSAdateTime(
'06/30/2008 10:29:00 AM' ) != sExpectDateTime:
#
lProblems.append( 'getIsoDateTimeFromUSAdateTime() "06/30/2008 10:29:00 AM"' )
#
#
sNBdateTime = '6/11/12 0:00'
#
if getIsoDateTimeFromUSAdateTime( sNBdateTime ) != '2012-06-11 00:00:00':
#
lProblems.append( 'getIsoDateTimeFromUSAdateTime() "%s"' % sTryDate )
#
#
#
sTryDate = sNBdateTime[ : -5 ]
#
if getIsoDateFromUSAdate( sTryDate ) != '2012-06-11':
#
lProblems.append( 'getIsoDateFromUSAdate() "%s"' % sTryDate )
#
#
#
# sNBdateTime = '06/11/12 0:00'
#
# print3( 'getIsoDateFromUSAdate( "%s" ):' % sTryDate, getIsoDateFromUSAdate( sTryDate ) )
#
#
sayTestResult( lProblems )
| gpl-2.0 |
dreibh/planetlab-lxc-plcapi | PLC/Methods/UpdateIlink.py | 1 | 2016 | #
# Thierry Parmentelat - INRIA
#
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Auth import Auth
from PLC.Ilinks import Ilink, Ilinks
from PLC.Interfaces import Interface, Interfaces
from PLC.TagTypes import TagType, TagTypes
from PLC.Sites import Sites
from PLC.AuthorizeHelpers import AuthorizeHelpers
class UpdateIlink(Method):
"""
Updates the value of an existing ilink
Access rights depend on the tag type.
Returns 1 if successful, faults otherwise.
"""
roles = ['admin', 'pi', 'tech', 'user']
accepts = [
Auth(),
Ilink.fields['ilink_id'],
Ilink.fields['value']
]
returns = Parameter(int, '1 if successful')
object_type = 'Interface'
def call(self, auth, ilink_id, value):
ilinks = Ilinks(self.api, [ilink_id])
if not ilinks:
raise PLCInvalidArgument("No such ilink %r"%ilink_id)
ilink = ilinks[0]
src_if=Interfaces(self.api,ilink['src_interface_id'])[0]
dst_if=Interfaces(self.api,ilink['dst_interface_id'])[0]
tag_type_id = ilink['tag_type_id']
tag_type = TagTypes (self.api,[tag_type_id])[0]
# check authorizations
if 'admin' in self.caller['roles']:
pass
elif not AuthorizeHelpers.caller_may_access_tag_type (self.api, self.caller, tag_type):
raise PLCPermissionDenied("%s, forbidden tag %s"%(self.name,tag_type['tagname']))
elif AuthorizeHelpers.interface_belongs_to_person (self.api, src_if, self.caller):
pass
elif src_if_id != dst_if_id and AuthorizeHelpers.interface_belongs_to_person (self.api, dst_if, self.caller):
pass
else:
raise PLCPermissionDenied("%s: you must own either the src or dst interface"%self.name)
ilink['value'] = value
ilink.sync()
self.object_ids = [ilink['src_interface_id'],ilink['dst_interface_id']]
return 1
| bsd-3-clause |
oaron/NVDARemote | addon/globalPlugins/remoteClient/session.py | 2 | 5808 | import threading
import time
import speech
import ui
import tones
class RemoteSession(object):
def __init__(self, local_machine, transport):
self.local_machine = local_machine
self.transport = transport
class SlaveSession(RemoteSession):
"""Session that runs on the slave and manages state."""
def __init__(self, *args, **kwargs):
super(SlaveSession, self).__init__(*args, **kwargs)
self.transport.callback_manager.register_callback('msg_client_joined', self.handle_client_connected)
self.transport.callback_manager.register_callback('msg_client_left', self.handle_client_disconnected)
self.transport.callback_manager.register_callback('msg_key', self.local_machine.send_key)
self.masters = {}
self.last_client_index = None
self.transport.callback_manager.register_callback('msg_index', self.update_index)
self.transport.callback_manager.register_callback('transport_closing', self.handle_transport_closing)
self.patch_callbacks_added = False
self.transport.callback_manager.register_callback('msg_channel_joined', self.handle_channel_joined)
self.transport.callback_manager.register_callback('msg_set_clipboard_text', self.local_machine.set_clipboard_text)
self.transport.callback_manager.register_callback('msg_send_SAS', self.local_machine.send_SAS)
def handle_client_connected(self, user_id=None):
self.local_machine.patcher.patch()
if not self.patch_callbacks_added:
self.add_patch_callbacks()
self.patch_callbacks_added = True
self.local_machine.patcher.orig_beep(1000, 300)
self.masters[user_id] = True
def handle_channel_joined(self, channel=None, user_ids=None):
for user in user_ids:
self.handle_client_connected(user_id=user)
def handle_transport_closing(self):
self.local_machine.patcher.unpatch()
if self.patch_callbacks_added:
self.remove_patch_callbacks()
self.patch_callbacks_added = False
def handle_transport_disconnected(self):
self.local_machine.patcher.orig_beep(1000, 300)
self.local_machine.patcher.unpatch()
def handle_client_disconnected(self, user_id=None):
self.local_machine.patcher.orig_beep(108, 300)
del self.masters[user_id]
if not self.masters:
self.local_machine.patcher.unpatch()
def add_patch_callbacks(self):
patcher_callbacks = (('speak', self.speak), ('beep', self.beep), ('wave', self.playWaveFile), ('cancel_speech', self.cancel_speech))
for event, callback in patcher_callbacks:
self.local_machine.patcher.register_callback(event, callback)
self.local_machine.patcher.set_last_index_callback(self._get_lastIndex)
def remove_patch_callbacks(self):
patcher_callbacks = (('speak', self.speak), ('beep', self.beep), ('wave', self.playWaveFile), ('cancel_speech', self.cancel_speech))
for event, callback in patcher_callbacks:
self.local_machine.patcher.unregister_callback(event, callback)
def speak(self, speechSequence):
self.transport.send(type="speak", sequence=speechSequence)
def cancel_speech(self):
self.transport.send(type="cancel")
def _get_lastIndex(self):
return self.last_client_index
def beep(self, hz, length, left=50, right=50):
self.transport.send(type='tone', hz=hz, length=length, left=left, right=right)
def playWaveFile(self, fileName, async=True):
self.transport.send(type='wave', fileName=fileName, async=async)
def update_index(self, index=None):
self.last_client_index = index
class MasterSession(RemoteSession):
def __init__(self, *args, **kwargs):
super(MasterSession, self).__init__(*args, **kwargs)
self.slaves = []
self.index_thread = None
self.transport.callback_manager.register_callback('msg_speak', self.local_machine.speak)
self.transport.callback_manager.register_callback('msg_cancel', self.local_machine.cancel_speech)
self.transport.callback_manager.register_callback('msg_tone', self.local_machine.beep)
self.transport.callback_manager.register_callback('msg_wave', self.local_machine.play_wave)
self.transport.callback_manager.register_callback('msg_nvda_not_connected', self.handle_nvda_not_connected)
self.transport.callback_manager.register_callback('msg_client_joined', self.handle_client_connected)
self.transport.callback_manager.register_callback('msg_client_left', self.handle_client_disconnected)
self.transport.callback_manager.register_callback('msg_channel_joined', self.handle_channel_joined)
self.transport.callback_manager.register_callback('msg_set_clipboard_text', self.local_machine.set_clipboard_text)
self.transport.callback_manager.register_callback('transport_connected', self.handle_connected)
self.transport.callback_manager.register_callback('transport_disconnected', self.handle_disconnected)
def handle_nvda_not_connected(self):
speech.cancelSpeech()
ui.message(_("Remote NVDA not connected."))
def handle_connected(self):
if self.index_thread is not None:
return
self.index_thread = threading.Thread(target=self.send_indexes)
self.index_thread.daemon = True
self.index_thread.start()
def handle_disconnected(self):
self.index_thread = None
def handle_channel_joined(self, channel=None, user_ids=None):
for user in user_ids:
self.handle_client_connected(user_id=user)
def handle_client_connected(self, user_id=None):
tones.beep(1000, 300)
def handle_client_disconnected(self, user_id=None):
tones.beep(108, 300)
def send_indexes(self):
last = None
POLL_TIME = 0.05
while self.transport.connected:
synth = speech.getSynth()
if synth is None: #While switching synths
time.sleep(POLL_TIME)
continue
index = synth.lastIndex
if index != last:
self.transport.send(type="index", index=index)
last = index
time.sleep(POLL_TIME)
| gpl-2.0 |
huangkebo/zulip | zerver/tornadoviews.py | 120 | 4177 | from __future__ import absolute_import
from django.views.decorators.csrf import csrf_exempt
from zerver.models import get_client
from zerver.decorator import asynchronous, \
authenticated_json_post_view, internal_notify_view, RespondAsynchronously, \
has_request_variables, REQ
from zerver.lib.response import json_success, json_error
from zerver.lib.validator import check_bool, check_list, check_string
from zerver.lib.event_queue import allocate_client_descriptor, get_client_descriptor, \
process_notification
from zerver.lib.narrow import check_supported_events_narrow_filter
import ujson
import logging
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
@internal_notify_view
def notify(request):
process_notification(ujson.loads(request.POST['data']))
return json_success()
@has_request_variables
def cleanup_event_queue(request, user_profile, queue_id=REQ()):
client = get_client_descriptor(queue_id)
if client is None:
return json_error("Bad event queue id: %s" % (queue_id,))
if user_profile.id != client.user_profile_id:
return json_error("You are not authorized to access this queue")
request._log_data['extra'] = "[%s]" % (queue_id,)
client.cleanup()
return json_success()
@authenticated_json_post_view
def json_get_events(request, user_profile):
return get_events_backend(request, user_profile, apply_markdown=True)
@asynchronous
@has_request_variables
def get_events_backend(request, user_profile, handler = None,
user_client = REQ(converter=get_client, default=None),
last_event_id = REQ(converter=int, default=None),
queue_id = REQ(default=None),
apply_markdown = REQ(default=False, validator=check_bool),
all_public_streams = REQ(default=False, validator=check_bool),
event_types = REQ(default=None, validator=check_list(check_string)),
dont_block = REQ(default=False, validator=check_bool),
narrow = REQ(default=[], validator=check_list(None)),
lifespan_secs = REQ(default=0, converter=int)):
if user_client is None:
user_client = request.client
was_connected = False
orig_queue_id = queue_id
if queue_id is None:
if dont_block:
client = allocate_client_descriptor(user_profile.id, user_profile.realm.id,
event_types, user_client, apply_markdown,
all_public_streams, lifespan_secs,
narrow=narrow)
queue_id = client.event_queue.id
else:
return json_error("Missing 'queue_id' argument")
else:
if last_event_id is None:
return json_error("Missing 'last_event_id' argument")
client = get_client_descriptor(queue_id)
if client is None:
return json_error("Bad event queue id: %s" % (queue_id,))
if user_profile.id != client.user_profile_id:
return json_error("You are not authorized to get events from this queue")
client.event_queue.prune(last_event_id)
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
ret = {'events': client.event_queue.contents()}
if orig_queue_id is None:
ret['queue_id'] = queue_id
request._log_data['extra'] = "[%s/%s]" % (queue_id, len(ret["events"]))
if was_connected:
request._log_data['extra'] += " [was connected]"
return json_success(ret)
handler._request = request
if was_connected:
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile.email,
user_client.name))
client.connect_handler(handler)
# runtornado recognizes this special return value.
return RespondAsynchronously
| apache-2.0 |
funkring/fdoo | addons/l10n_nl/__openerp__.py | 260 | 6040 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 Veritos - Jan Verlaan - www.veritos.nl
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Veritos.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
#
# Deze module werkt in OpenERP 5.0.0 (en waarschijnlijk hoger).
# Deze module werkt niet in OpenERP versie 4 en lager.
#
# Status 1.0 - getest op OpenERP 5.0.3
#
# Versie 5.0.0.1
# account.account.type
# Basis gelegd voor alle account type
#
# account.account.template
# Basis gelegd met alle benodigde grootboekrekeningen welke via een menu-
# structuur gelinkt zijn aan rubrieken 1 t/m 9.
# De grootboekrekeningen gelinkt aan de account.account.type
# Deze links moeten nog eens goed nagelopen worden.
#
# account.chart.template
# Basis gelegd voor het koppelen van rekeningen aan debiteuren, crediteuren,
# bank, inkoop en verkoop boeken en de BTW configuratie.
#
# Versie 5.0.0.2
# account.tax.code.template
# Basis gelegd voor de BTW configuratie (structuur)
# Heb als basis het BTW aangifte formulier gebruikt. Of dit werkt?
#
# account.tax.template
# De BTW rekeningen aangemaakt en deze gekoppeld aan de betreffende
# grootboekrekeningen
#
# Versie 5.0.0.3
# Opschonen van de code en verwijderen van niet gebruikte componenten.
# Versie 5.0.0.4
# Aanpassen a_expense van 3000 -> 7000
# record id='btw_code_5b' op negatieve waarde gezet
# Versie 5.0.0.5
# BTW rekeningen hebben typeaanduiding gekregen t.b.v. purchase of sale
# Versie 5.0.0.6
# Opschonen van module.
# Versie 5.0.0.7
# Opschonen van module.
# Versie 5.0.0.8
# Foutje in l10n_nl_wizard.xml gecorrigeerd waardoor de module niet volledig installeerde.
# Versie 5.0.0.9
# Account Receivable en Payable goed gedefinieerd.
# Versie 5.0.1.0
# Alle user_type_xxx velden goed gedefinieerd.
# Specifieke bouw en garage gerelateerde grootboeken verwijderd om een standaard module te creeeren.
# Deze module kan dan als basis worden gebruikt voor specifieke doelgroep modules te creeeren.
# Versie 5.0.1.1
# Correctie van rekening 7010 (stond dubbel met 7014 waardoor installatie verkeerd ging)
# versie 5.0.1.2
# Correctie op diverse rekening types van user_type_asset -> user_type_liability en user_type_equity
# versie 5.0.1.3
# Kleine correctie op BTW te vorderen hoog, id was hetzelfde voor beide, waardoor hoog werd overschreven door # overig. Verduidelijking van omschrijvingen in belastingcodes t.b.v. aangifte overzicht.
# versie 5.0.1.4
# BTW omschrijvingen aangepast, zodat rapporten er beter uitzien. 2a en 5b e.d. verwijderd en enkele omschrijvingen toegevoegd.
# versie 5.0.1.5 - Switch to English
# Added properties_stock_xxx accounts for correct stock valuation, changed 7000-accounts from type cash to type expense
# Changed naming of 7020 and 7030 to Kostprijs omzet xxxx
{
'name' : 'Netherlands - Accounting',
'version' : '1.5',
'category': 'Localization/Account Charts',
'description': """
This is the module to manage the accounting chart for Netherlands in OpenERP.
=============================================================================
Read changelog in file __openerp__.py for version information.
Dit is een basismodule om een uitgebreid grootboek- en BTW schema voor
Nederlandse bedrijven te installeren in OpenERP versie 7.0.
De BTW rekeningen zijn waar nodig gekoppeld om de juiste rapportage te genereren,
denk b.v. aan intracommunautaire verwervingen waarbij u 21% BTW moet opvoeren,
maar tegelijkertijd ook 21% als voorheffing weer mag aftrekken.
Na installatie van deze module word de configuratie wizard voor 'Accounting' aangeroepen.
* U krijgt een lijst met grootboektemplates aangeboden waarin zich ook het
Nederlandse grootboekschema bevind.
* Als de configuratie wizard start, wordt u gevraagd om de naam van uw bedrijf
in te voeren, welke grootboekschema te installeren, uit hoeveel cijfers een
grootboekrekening mag bestaan, het rekeningnummer van uw bank en de currency
om Journalen te creeren.
Let op!! -> De template van het Nederlandse rekeningschema is opgebouwd uit 4
cijfers. Dit is het minimale aantal welk u moet invullen, u mag het aantal verhogen.
De extra cijfers worden dan achter het rekeningnummer aangevult met 'nullen'.
""",
'author' : 'Veritos - Jan Verlaan',
'website' : 'http://www.veritos.nl',
'depends' : ['account',
'base_vat',
'base_iban',
'account_chart'
],
'data' : ['account_chart_netherlands.xml',
"account_fiscal_position_template.xml",
"account_fiscal_position_tax_template.xml",
"account_fiscal_position_account_template.xml",
'l10n_nl_wizard.xml'
],
'demo' : [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ESOedX/edx-platform | openedx/core/lib/tests/assertions/events.py | 2 | 9874 | """Assertions related to event validation"""
from __future__ import absolute_import
import json
import pprint
import six
def assert_event_matches(expected, actual, tolerate=None):
"""
Compare two event dictionaries.
Fail if any discrepancies exist, and output the list of all discrepancies. The intent is to produce clearer
error messages than "{ some massive dict } != { some other massive dict }", instead enumerating the keys that
differ. Produces period separated "paths" to keys in the output, so "context.foo" refers to the following
structure:
{
'context': {
'foo': 'bar' # this key, value pair
}
}
The other key difference between this comparison and `assertEquals` is that it supports differing levels of
tolerance for discrepancies. We don't want to litter our tests full of exact match tests because then anytime we
add a field to all events, we have to go update every single test that has a hardcoded complete event structure in
it. Instead we support making partial assertions about structure and content of the event. So if I say my expected
event looks like this:
{
'event_type': 'foo.bar',
'event': {
'user_id': 10
}
}
This method will raise an assertion error if the actual event either does not contain the above fields in their
exact locations in the hierarchy, or if it does contain them but has different values for them. Note that it will
*not* necessarily raise an assertion error if the actual event contains other fields that are not listed in the
expected event. For example, the following event would not raise an assertion error:
{
'event_type': 'foo.bar',
'referer': 'http://example.com'
'event': {
'user_id': 10
}
}
Note that the extra "referer" field is not considered an error by default.
The `tolerate` parameter takes a set that allows you to specify varying degrees of tolerance for some common
eventing related issues. See the `EventMatchTolerates` class for more information about the various flags that are
supported here.
Example output if an error is found:
Unexpected differences found in structs:
* <path>: not found in actual
* <path>: <expected_value> != <actual_value> (expected != actual)
Expected:
{ <expected event }
Actual:
{ <actual event> }
"<path>" is a "." separated string indicating the key that differed. In the examples above "event.user_id" would
refer to the value of the "user_id" field contained within the dictionary referred to by the "event" field in the
root dictionary.
"""
differences = get_event_differences(expected, actual, tolerate=tolerate)
if len(differences) > 0:
debug_info = [
'',
'Expected:',
block_indent(expected),
'Actual:',
block_indent(actual),
'Tolerating:',
block_indent(EventMatchTolerates.default_if_not_defined(tolerate)),
]
differences = ['* ' + d for d in differences]
message_lines = differences + debug_info
raise AssertionError('Unexpected differences found in structs:\n\n' + '\n'.join(message_lines))
class EventMatchTolerates(object):
"""
Represents groups of flags that specify the level of tolerance for deviation between an expected event and an actual
event.
These are common event specific deviations that we don't want to handle with special case logic throughout our
tests.
"""
# Allow the "event" field to be a string, currently this is the case for all browser events.
STRING_PAYLOAD = 'string_payload'
# Allow unexpected fields to exist in the top level event dictionary.
ROOT_EXTRA_FIELDS = 'root_extra_fields'
# Allow unexpected fields to exist in the "context" dictionary. This is where new fields that appear in multiple
# events are most commonly added, so we frequently want to tolerate variation here.
CONTEXT_EXTRA_FIELDS = 'context_extra_fields'
# Allow unexpected fields to exist in the "event" dictionary. Typically in unit tests we don't want to allow this
# type of variance since there are typically only a small number of tests for a particular event type.
PAYLOAD_EXTRA_FIELDS = 'payload_extra_fields'
@classmethod
def default(cls):
"""A reasonable set of tolerated variations."""
# NOTE: "payload_extra_fields" is deliberately excluded from this list since we want to detect erroneously added
# fields in the payload by default.
return {
cls.STRING_PAYLOAD,
cls.ROOT_EXTRA_FIELDS,
cls.CONTEXT_EXTRA_FIELDS,
}
@classmethod
def lenient(cls):
"""Allow all known variations."""
return cls.default() | {
cls.PAYLOAD_EXTRA_FIELDS
}
@classmethod
def strict(cls):
"""Allow no variation at all."""
return frozenset()
@classmethod
def default_if_not_defined(cls, tolerates=None):
"""Use the provided tolerance or provide a default one if None was specified."""
if tolerates is None:
return cls.default()
else:
return tolerates
def assert_events_equal(expected, actual):
"""
Strict comparison of two events.
This asserts that every field in the real event exactly matches the expected event.
"""
assert_event_matches(expected, actual, tolerate=EventMatchTolerates.strict())
def get_event_differences(expected, actual, tolerate=None):
"""Given two events, gather a list of differences between them given some set of tolerated variances."""
tolerate = EventMatchTolerates.default_if_not_defined(tolerate)
# Some events store their payload in a JSON string instead of a dict. Comparing these strings can be problematic
# since the keys may be in different orders, so we parse the string here if we were expecting a dict.
if EventMatchTolerates.STRING_PAYLOAD in tolerate:
expected = parse_event_payload(expected)
actual = parse_event_payload(actual)
def should_strict_compare(path):
"""
We want to be able to vary the degree of strictness we apply depending on the testing context.
Some tests will want to assert that the entire event matches exactly, others will tolerate some variance in the
context or root fields, but not in the payload (for example).
"""
if path == [] and EventMatchTolerates.ROOT_EXTRA_FIELDS in tolerate:
return False
elif path == ['event'] and EventMatchTolerates.PAYLOAD_EXTRA_FIELDS in tolerate:
return False
elif path == ['context'] and EventMatchTolerates.CONTEXT_EXTRA_FIELDS in tolerate:
return False
else:
return True
return compare_structs(expected, actual, should_strict_compare=should_strict_compare)
def block_indent(text, spaces=4):
"""
Given a multi-line string, indent every line of it by the given number of spaces.
If `text` is not a string it is formatted using pprint.pformat.
"""
return '\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])
def parse_event_payload(event):
"""
Given an event, parse the "event" field as a JSON string.
Note that this may simply return the same event unchanged, or return a new copy of the event with the payload
parsed. It will never modify the event in place.
"""
if 'event' in event and isinstance(event['event'], six.string_types):
event = event.copy()
try:
event['event'] = json.loads(event['event'])
except ValueError:
pass
return event
def compare_structs(expected, actual, should_strict_compare=None, path=None):
"""
Traverse two structures to ensure that the `actual` structure contains all of the elements within the `expected`
one.
Note that this performs a "deep" comparison, descending into dictionaries, lists and ohter collections to ensure
that the structure matches the expectation.
If a particular value is not recognized, it is simply compared using the "!=" operator.
"""
if path is None:
path = []
differences = []
if isinstance(expected, dict) and isinstance(actual, dict):
expected_keys = frozenset(list(expected.keys()))
actual_keys = frozenset(list(actual.keys()))
for key in expected_keys - actual_keys:
differences.append(u'{0}: not found in actual'.format(_path_to_string(path + [key])))
if should_strict_compare is not None and should_strict_compare(path):
for key in actual_keys - expected_keys:
differences.append(u'{0}: only defined in actual'.format(_path_to_string(path + [key])))
for key in expected_keys & actual_keys:
child_differences = compare_structs(expected[key], actual[key], should_strict_compare, path + [key])
differences.extend(child_differences)
elif expected != actual:
differences.append(u'{path}: {a} != {b} (expected != actual)'.format(
path=_path_to_string(path),
a=repr(expected),
b=repr(actual)
))
return differences
def is_matching_event(expected_event, actual_event, tolerate=None):
"""Return True iff the `actual_event` matches the `expected_event` given the tolerances."""
return len(get_event_differences(expected_event, actual_event, tolerate=tolerate)) == 0
def _path_to_string(path):
"""Convert a list of path elements into a single path string."""
return '.'.join(path)
| agpl-3.0 |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/SocketServer.py | 30 | 23321 | """Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to read all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
"""
# Author of the BaseServer patch: Luke Kenneth Casson Leighton
# XXX Warning!
# There is a test suite for this module, but it cannot be run by the
# standard regression test.
# To run it manually, run Lib/test/test_socketserver.py.
__version__ = "0.4"
import socket
import select
import sys
import os
try:
import threading
except ImportError:
import dummy_threading as threading
__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
"ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
"StreamRequestHandler","DatagramRequestHandler",
"ThreadingMixIn", "ForkingMixIn"]
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer","UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- allow_reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
timeout = None
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self, poll_interval=0.5):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__is_shut_down.clear()
try:
while not self.__shutdown_request:
# XXX: Consider using another file descriptor or
# connecting to the socket to wake this up instead of
# polling. Polling reduces our responsiveness to a
# shutdown request and wastes cpu at all other times.
r, w, e = select.select([self], [], [], poll_interval)
if self in r:
self._handle_request_noblock()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__shutdown_request = True
self.__is_shut_down.wait()
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls
# select, get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process
# or create a new thread to finish the request
# - finish_request() instantiates the request handler class;
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def handle_timeout(self):
"""Called if no new request arrives within self.timeout.
Overridden by ForkingMixIn.
"""
pass
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return True if we should proceed with this request.
"""
return True
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.shutdown_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print '-'*40
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass, bind_and_activate=True)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- allow_reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
if bind_and_activate:
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by select().
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
try:
#explicitly shutdown. socket.close() merely releases
#the socket and waits for GC to perform the actual close.
request.shutdown(socket.SHUT_WR)
except socket.error:
pass #some platforms may raise ENOTCONN here
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
class UDPServer(TCPServer):
"""UDP server class."""
allow_reuse_address = False
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
def shutdown_request(self, request):
# No need to shutdown anything.
self.close_request(request)
def close_request(self, request):
# No need to close anything.
pass
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
timeout = 300
active_children = None
max_children = 40
def collect_children(self):
"""Internal routine to wait for children that have exited."""
if self.active_children is None: return
while len(self.active_children) >= self.max_children:
# XXX: This will wait for any child process, not just ones
# spawned by this library. This could confuse other
# libraries that expect to be able to wait for their own
# children.
try:
pid, status = os.waitpid(0, 0)
except os.error:
pid = None
if pid not in self.active_children: continue
self.active_children.remove(pid)
# XXX: This loop runs more system calls than it ought
# to. There should be a way to put the active_children into a
# process group and then use os.waitpid(-pgid) to wait for any
# of that set, but I couldn't find a way to allocate pgids
# that couldn't collide.
for child in self.active_children:
try:
pid, status = os.waitpid(child, os.WNOHANG)
except os.error:
pid = None
if not pid: continue
try:
self.active_children.remove(pid)
except ValueError, e:
raise ValueError('%s. x=%d and list=%r' % (e.message, pid,
self.active_children))
def handle_timeout(self):
"""Wait for zombies after self.timeout seconds of inactivity.
May be extended, do not override.
"""
self.collect_children()
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
self.collect_children()
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
self.close_request(request) #close handle in parent process
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
self.shutdown_request(request)
finally:
os._exit(1)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
t.daemon = self.daemon_threads
t.start()
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define arbitrary other instance variariables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
self.setup()
try:
self.handle()
finally:
self.finish()
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = 0
# A timeout to apply to the request socket, if not None.
timeout = None
# Disable nagle algorithm for this socket, if True.
# Use only when wbufsize != 0, to avoid small packets.
disable_nagle_algorithm = False
def setup(self):
self.connection = self.request
if self.timeout is not None:
self.connection.settimeout(self.timeout)
if self.disable_nagle_algorithm:
self.connection.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, True)
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
class DatagramRequestHandler(BaseRequestHandler):
# XXX Regrettably, I cannot get this working on Linux;
# s.recvfrom() doesn't return a meaningful client address.
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
self.packet, self.socket = self.request
self.rfile = StringIO(self.packet)
self.wfile = StringIO()
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)
| gpl-2.0 |
followloda/PornGuys | FlaskServer/venv/Lib/encodings/cp862.py | 593 | 33626 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp862',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x05d0, # HEBREW LETTER ALEF
0x0081: 0x05d1, # HEBREW LETTER BET
0x0082: 0x05d2, # HEBREW LETTER GIMEL
0x0083: 0x05d3, # HEBREW LETTER DALET
0x0084: 0x05d4, # HEBREW LETTER HE
0x0085: 0x05d5, # HEBREW LETTER VAV
0x0086: 0x05d6, # HEBREW LETTER ZAYIN
0x0087: 0x05d7, # HEBREW LETTER HET
0x0088: 0x05d8, # HEBREW LETTER TET
0x0089: 0x05d9, # HEBREW LETTER YOD
0x008a: 0x05da, # HEBREW LETTER FINAL KAF
0x008b: 0x05db, # HEBREW LETTER KAF
0x008c: 0x05dc, # HEBREW LETTER LAMED
0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
0x008e: 0x05de, # HEBREW LETTER MEM
0x008f: 0x05df, # HEBREW LETTER FINAL NUN
0x0090: 0x05e0, # HEBREW LETTER NUN
0x0091: 0x05e1, # HEBREW LETTER SAMEKH
0x0092: 0x05e2, # HEBREW LETTER AYIN
0x0093: 0x05e3, # HEBREW LETTER FINAL PE
0x0094: 0x05e4, # HEBREW LETTER PE
0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0096: 0x05e6, # HEBREW LETTER TSADI
0x0097: 0x05e7, # HEBREW LETTER QOF
0x0098: 0x05e8, # HEBREW LETTER RESH
0x0099: 0x05e9, # HEBREW LETTER SHIN
0x009a: 0x05ea, # HEBREW LETTER TAV
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u05d0' # 0x0080 -> HEBREW LETTER ALEF
u'\u05d1' # 0x0081 -> HEBREW LETTER BET
u'\u05d2' # 0x0082 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x0083 -> HEBREW LETTER DALET
u'\u05d4' # 0x0084 -> HEBREW LETTER HE
u'\u05d5' # 0x0085 -> HEBREW LETTER VAV
u'\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x0087 -> HEBREW LETTER HET
u'\u05d8' # 0x0088 -> HEBREW LETTER TET
u'\u05d9' # 0x0089 -> HEBREW LETTER YOD
u'\u05da' # 0x008a -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x008b -> HEBREW LETTER KAF
u'\u05dc' # 0x008c -> HEBREW LETTER LAMED
u'\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x008e -> HEBREW LETTER MEM
u'\u05df' # 0x008f -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x0090 -> HEBREW LETTER NUN
u'\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0x0092 -> HEBREW LETTER AYIN
u'\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x0094 -> HEBREW LETTER PE
u'\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x0096 -> HEBREW LETTER TSADI
u'\u05e7' # 0x0097 -> HEBREW LETTER QOF
u'\u05e8' # 0x0098 -> HEBREW LETTER RESH
u'\u05e9' # 0x0099 -> HEBREW LETTER SHIN
u'\u05ea' # 0x009a -> HEBREW LETTER TAV
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xa5' # 0x009d -> YEN SIGN
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x05d0: 0x0080, # HEBREW LETTER ALEF
0x05d1: 0x0081, # HEBREW LETTER BET
0x05d2: 0x0082, # HEBREW LETTER GIMEL
0x05d3: 0x0083, # HEBREW LETTER DALET
0x05d4: 0x0084, # HEBREW LETTER HE
0x05d5: 0x0085, # HEBREW LETTER VAV
0x05d6: 0x0086, # HEBREW LETTER ZAYIN
0x05d7: 0x0087, # HEBREW LETTER HET
0x05d8: 0x0088, # HEBREW LETTER TET
0x05d9: 0x0089, # HEBREW LETTER YOD
0x05da: 0x008a, # HEBREW LETTER FINAL KAF
0x05db: 0x008b, # HEBREW LETTER KAF
0x05dc: 0x008c, # HEBREW LETTER LAMED
0x05dd: 0x008d, # HEBREW LETTER FINAL MEM
0x05de: 0x008e, # HEBREW LETTER MEM
0x05df: 0x008f, # HEBREW LETTER FINAL NUN
0x05e0: 0x0090, # HEBREW LETTER NUN
0x05e1: 0x0091, # HEBREW LETTER SAMEKH
0x05e2: 0x0092, # HEBREW LETTER AYIN
0x05e3: 0x0093, # HEBREW LETTER FINAL PE
0x05e4: 0x0094, # HEBREW LETTER PE
0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI
0x05e6: 0x0096, # HEBREW LETTER TSADI
0x05e7: 0x0097, # HEBREW LETTER QOF
0x05e8: 0x0098, # HEBREW LETTER RESH
0x05e9: 0x0099, # HEBREW LETTER SHIN
0x05ea: 0x009a, # HEBREW LETTER TAV
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-3.0 |
nikhilprathapani/python-for-android | python3-alpha/python3-src/Lib/pdb.py | 47 | 56534 | #! /usr/bin/env python3
"""
The Python Debugger Pdb
=======================
To use the debugger in its simplest form:
>>> import pdb
>>> pdb.run('<a statement>')
The debugger's prompt is '(Pdb) '. This will stop in the first
function call in <a statement>.
Alternatively, if a statement terminated with an unhandled exception,
you can use pdb's post-mortem facility to inspect the contents of the
traceback:
>>> <a statement>
<exception traceback>
>>> import pdb
>>> pdb.pm()
The commands recognized by the debugger are listed in the next
section. Most can be abbreviated as indicated; e.g., h(elp) means
that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel',
nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in
square brackets. Alternatives in the command syntax are separated
by a vertical bar (|).
A blank line repeats the previous command literally, except for
'list', where it lists the next 11 lines.
Commands that the debugger doesn't recognize are assumed to be Python
statements and are executed in the context of the program being
debugged. Python statements can also be prefixed with an exclamation
point ('!'). This is a powerful way to inspect the program being
debugged; it is even possible to change variables or call functions.
When an exception occurs in such a statement, the exception name is
printed but the debugger's state is not changed.
The debugger supports aliases, which can save typing. And aliases can
have parameters (see the alias help entry) which allows one a certain
level of adaptability to the context under examination.
Multiple commands may be entered on a single line, separated by the
pair ';;'. No intelligence is applied to separating the commands; the
input is split at the first ';;', even if it is in the middle of a
quoted string.
If a file ".pdbrc" exists in your home directory or in the current
directory, it is read in and executed as if it had been typed at the
debugger prompt. This is particularly useful for aliases. If both
files exist, the one in the home directory is read first and aliases
defined there can be overriden by the local file.
Aside from aliases, the debugger is not directly programmable; but it
is implemented as a class from which you can derive your own debugger
class, which you can make as fancy as you like.
Debugger commands
=================
"""
# NOTE: the actual command documentation is collected from docstrings of the
# commands and is appended to __doc__ after the class has been defined.
import os
import re
import sys
import cmd
import bdb
import dis
import code
import pprint
import signal
import inspect
import traceback
import linecache
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while True:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno += 1
fp.close()
return answer
def getsourcelines(obj):
lines, lineno = inspect.findsource(obj)
if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
# must be a module frame: do not try to cut a block out of it
return lines, 1
elif inspect.ismodule(obj):
return lines, 1
return inspect.getblock(lines[lineno:]), lineno+1
def lasti2lineno(code, lasti):
linestarts = list(dis.findlinestarts(code))
linestarts.reverse()
for i, lineno in linestarts:
if lasti >= i:
return lineno
return 0
class _rstr(str):
"""String that doesn't quote its repr."""
def __repr__(self):
return self
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None,
nosigint=False):
bdb.Bdb.__init__(self, skip=skip)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.displaying = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = False
self.tb_lineno = {}
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
self.allow_kbdint = False
self.nosigint = nosigint
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
with open(os.path.join(envHome, ".pdbrc")) as rcFile:
self.rcLines.extend(rcFile)
except IOError:
pass
try:
with open(".pdbrc") as rcFile:
self.rcLines.extend(rcFile)
except IOError:
pass
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt
# must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace
# must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining
# a command list
self.commands_bnum = None # The breakpoint number for which we are
# defining a list
def sigint_handler(self, signum, frame):
if self.allow_kbdint:
raise KeyboardInterrupt
self.message("\nProgram interrupted. (Use 'cont' to resume).")
self.set_step()
self.set_trace(frame)
# restore previous signal handler
signal.signal(signal.SIGINT, self._previous_sigint_handler)
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
self.tb_lineno.clear()
def setup(self, f, tb):
self.forget()
self.stack, self.curindex = self.get_stack(f, tb)
while tb:
# when setting up post-mortem debugging with a traceback, save all
# the original line numbers to be displayed along the current line
# numbers (which can be different, e.g. due to finally clauses)
lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti)
self.tb_lineno[tb.tb_frame] = lineno
tb = tb.tb_next
self.curframe = self.stack[self.curindex][0]
# The f_locals dictionary is updated from the actual frame
# locals whenever the .f_locals accessor is called, so we
# cache it here to ensure that modifications are not overwritten.
self.curframe_locals = self.curframe.f_locals
return self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if not self.rcLines:
return
# local copy because of recursion
rcLines = self.rcLines
rcLines.reverse()
# execute every line only once
self.rcLines = []
while rcLines:
line = rcLines.pop().strip()
if line and line[0] != '#':
if self.onecmd(line):
# if onecmd returns True, the command wants to exit
# from the interaction, save leftover rc lines
# to execute before next interaction
self.rcLines += reversed(rcLines)
return True
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
self.message('--Call--')
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = False
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self, frame):
"""Call every command that was set for the current active breakpoint
(if there is one).
Returns True if the normal interaction function must be called,
False otherwise."""
# self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit
if getattr(self, "currentbp", False) and \
self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self._cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
if self._wait_for_mainpyfile:
return
frame.f_locals['__return__'] = return_value
self.message('--Return--')
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
if self._wait_for_mainpyfile:
return
exc_type, exc_value, exc_traceback = exc_info
frame.f_locals['__exception__'] = exc_type, exc_value
self.message(traceback.format_exception_only(exc_type,
exc_value)[-1].strip())
self.interaction(frame, exc_traceback)
# General interaction function
def _cmdloop(self):
while True:
try:
# keyboard interrupts allow for an easy way to cancel
# the current command, so allow them during interactive input
self.allow_kbdint = True
self.cmdloop()
self.allow_kbdint = False
break
except KeyboardInterrupt:
self.message('--KeyboardInterrupt--')
# Called before loop, handles display expressions
def preloop(self):
displaying = self.displaying.get(self.curframe)
if displaying:
for expr, oldvalue in displaying.items():
newvalue = self._getval_except(expr)
# check for identity first; this prevents custom __eq__ to
# be called at every loop, and also prevents instances whose
# fields are changed to be displayed
if newvalue is not oldvalue and newvalue != oldvalue:
displaying[expr] = newvalue
self.message('display %s: %r [old: %r]' %
(expr, newvalue, oldvalue))
def interaction(self, frame, traceback):
if self.setup(frame, traceback):
# no interaction desired at this time (happens if .pdbrc contains
# a command like "continue")
self.forget()
return
self.print_stack_entry(self.stack[self.curindex])
self._cmdloop()
self.forget()
def displayhook(self, obj):
"""Custom displayhook for the exec in default(), which prevents
assignment of the _ variable in the builtins.
"""
# reproduce the behavior of the standard displayhook, not printing None
if obj is not None:
self.message(repr(obj))
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec(code, globals, locals)
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii += 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self, line):
"""Handles one command line during command list definition."""
cmd, arg, line = self.parseline(line)
if not cmd:
return
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if arg:
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
# one of the resuming commands
if func.__name__ in self.commands_resuming:
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# interface abstraction functions
def message(self, msg):
print(msg, file=self.stdout)
def error(self, msg):
print('***', msg, file=self.stdout)
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
def do_commands(self, arg):
"""commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber.
The commands themselves are entered on the following lines.
Type a line containing just 'end' to terminate the commands.
The commands are executed when the breakpoint is hit.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up
again. Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations)
terminates the command list (as if that command was
immediately followed by end). This is because any time you
resume execution (even with a simple next or step), you may
encounter another breakpoint -- which could have its own
command list, leading to ambiguities about which list to
execute.
If you use the 'silent' command in the command list, the usual
message about stopping at a breakpoint is not printed. This
may be desirable for breakpoints that are to print a specific
message and then continue. If none of the other commands
print anything, you will see no sign that the breakpoint was
reached.
"""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber) - 1
else:
try:
bnum = int(arg)
except:
self.error("Usage: commands [bnum]\n ...\n end")
return
self.commands_bnum = bnum
# Save old definitions for the case of a keyboard interrupt.
if bnum in self.commands:
old_command_defs = (self.commands[bnum],
self.commands_doprompt[bnum],
self.commands_silent[bnum])
else:
old_command_defs = None
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
try:
self.cmdloop()
except KeyboardInterrupt:
# Restore old definitions.
if old_command_defs:
self.commands[bnum] = old_command_defs[0]
self.commands_doprompt[bnum] = old_command_defs[1]
self.commands_silent[bnum] = old_command_defs[2]
else:
del self.commands[bnum]
del self.commands_doprompt[bnum]
del self.commands_silent[bnum]
self.error('command definition aborted, old commands restored')
finally:
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
"""b(reak) [ ([filename:]lineno | function) [, condition] ]
Without argument, list all breaks.
With a line number argument, set a break at this line in the
current file. With a function name, set a break at the first
executable line of that function. If a second argument is
present, it is a string specifying an expression which must
evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on
sys.path; the .py suffix may be omitted.
"""
if not arg:
if self.breaks: # There's at least one
self.message("Num Type Disp Enb Where")
for bp in bdb.Breakpoint.bpbynumber:
if bp:
self.message(bp.bpformat())
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
self.error('%r not found from sys.path' % filename)
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError:
self.error('Bad lineno: %s' % arg)
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe_locals)
except:
func = arg
try:
if hasattr(func, '__func__'):
func = func.__func__
code = func.__code__
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
self.error('The specified object %r is not a function '
'or was not found along sys.path.' % arg)
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err:
self.error(err, file=self.stdout)
else:
bp = self.get_breaks(filename, line)[-1]
self.message("Breakpoint %d at %s:%d" %
(bp.number, bp.file, bp.line))
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
"""tbreak [ ([filename:]lineno | function) [, condition] ]
Same arguments as break, but sets a temporary breakpoint: it
is automatically deleted when first hit.
"""
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
# this method should be callable before starting debugging, so default
# to "no globals" if there is no current frame
globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
line = linecache.getline(filename, lineno, globs)
if not line:
self.message('End of file')
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
self.error('Blank or comment')
return 0
return lineno
def do_enable(self, arg):
"""enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
breakpoint numbers.
"""
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
bp.enable()
self.message('Enabled %s' % bp)
def do_disable(self, arg):
"""disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
breakpoint numbers. Disabling a breakpoint means it cannot
cause the program to stop execution, but unlike clearing a
breakpoint, it remains in the list of breakpoints and can be
(re-)enabled.
"""
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
bp.disable()
self.message('Disabled %s' % bp)
def do_condition(self, arg):
"""condition bpnumber [condition]
Set a new condition for the breakpoint, an expression which
must evaluate to true before the breakpoint is honored. If
condition is absent, any existing condition is removed; i.e.,
the breakpoint is made unconditional.
"""
args = arg.split(' ', 1)
try:
cond = args[1]
except IndexError:
cond = None
try:
bp = self.get_bpbynumber(args[0].strip())
except ValueError as err:
self.error(err)
else:
bp.cond = cond
if not cond:
self.message('Breakpoint %d is now unconditional.' % bp.number)
else:
self.message('New condition set for breakpoint %d.' % bp.number)
def do_ignore(self, arg):
"""ignore bpnumber [count]
Set the ignore count for the given breakpoint number. If
count is omitted, the ignore count is set to 0. A breakpoint
becomes active when the ignore count is zero. When non-zero,
the count is decremented each time the breakpoint is reached
and the breakpoint is not disabled and any associated
condition evaluates to true.
"""
args = arg.split()
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = self.get_bpbynumber(args[0].strip())
except ValueError as err:
self.error(err)
else:
bp.ignore = count
if count > 0:
if count > 1:
countstr = '%d crossings' % count
else:
countstr = '1 crossing'
self.message('Will ignore next %s of breakpoint %d.' %
(countstr, bp.number))
else:
self.message('Will stop next time breakpoint %d is reached.'
% bp.number)
def do_clear(self, arg):
"""cl(ear) filename:lineno\ncl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
"""
if not arg:
try:
reply = input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp]
self.clear_all_breaks()
for bp in bplist:
self.message('Deleted %s' % bp)
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
bplist = self.get_breaks(filename, lineno)
err = self.clear_break(filename, lineno)
if err:
self.error(err)
else:
for bp in bplist:
self.message('Deleted %s' % bp)
return
numberlist = arg.split()
for i in numberlist:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
self.clear_bpbynumber(i)
self.message('Deleted %s' % bp)
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
"""w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command.
"""
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def _select_frame(self, number):
assert 0 <= number < len(self.stack)
self.curindex = number
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
def do_up(self, arg):
"""u(p) [count]
Move the current frame count (default one) levels up in the
stack trace (to an older frame).
"""
if self.curindex == 0:
self.error('Oldest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = 0
else:
newframe = max(0, self.curindex - count)
self._select_frame(newframe)
do_u = do_up
def do_down(self, arg):
"""d(own) [count]
Move the current frame count (default one) levels down in the
stack trace (to a newer frame).
"""
if self.curindex + 1 == len(self.stack):
self.error('Newest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = len(self.stack) - 1
else:
newframe = min(len(self.stack) - 1, self.curindex + count)
self._select_frame(newframe)
do_d = do_down
def do_until(self, arg):
"""unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
"""
if arg:
try:
lineno = int(arg)
except ValueError:
self.error('Error in argument: %r' % arg)
return
if lineno <= self.curframe.f_lineno:
self.error('"until" line number is smaller than current '
'line number')
return
else:
lineno = None
self.set_until(self.curframe, lineno)
return 1
do_unt = do_until
def do_step(self, arg):
"""s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current
function).
"""
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
"""n(ext)
Continue execution until the next line in the current function
is reached or it returns.
"""
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""run [args...]
Restart the debugged python program. If a string is supplied
it is splitted with "shlex", and the result is used as the new
sys.argv. History, breakpoints, actions and debugger options
are preserved. "restart" is an alias for "run".
"""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
# this is caught in the main debugger loop
raise Restart
do_restart = do_run
def do_return(self, arg):
"""r(eturn)
Continue execution until the current function returns.
"""
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
"""c(ont(inue))
Continue execution, only stop when a breakpoint is encountered.
"""
if not self.nosigint:
self._previous_sigint_handler = \
signal.signal(signal.SIGINT, self.sigint_handler)
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
"""j(ump) lineno
Set the next line that will be executed. Only available in
the bottom-most frame. This lets you jump back and execute
code again, or jump forward to skip code that you don't want
to run.
It should be noted that not all jumps are allowed -- for
instance it is not possible to jump into the middle of a
for loop or out of a finally clause.
"""
if self.curindex + 1 != len(self.stack):
self.error('You can only jump within the bottom frame')
return
try:
arg = int(arg)
except ValueError:
self.error("The 'jump' command requires a line number")
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError as e:
self.error('Jump failed: %s' % e)
do_j = do_jump
def do_debug(self, arg):
"""debug code
Enter a recursive debugger that steps through the code
argument (which is an arbitrary expression or statement to be
executed in the current environment).
"""
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
self.message("ENTERING RECURSIVE DEBUGGER")
sys.call_tracing(p.run, (arg, globals, locals))
self.message("LEAVING RECURSIVE DEBUGGER")
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
"""q(uit)\nexit
Quit from the debugger. The program being executed is aborted.
"""
self._user_requested_quit = True
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
"""EOF
Handles the receipt of EOF as a command.
"""
self.message('')
self._user_requested_quit = True
self.set_quit()
return 1
def do_args(self, arg):
"""a(rgs)
Print the argument list of the current function.
"""
co = self.curframe.f_code
dict = self.curframe_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
if name in dict:
self.message('%s = %r' % (name, dict[name]))
else:
self.message('%s = *** undefined ***' % (name,))
do_a = do_args
def do_retval(self, arg):
"""retval
Print the return value for the last return of a function.
"""
if '__return__' in self.curframe_locals:
self.message(repr(self.curframe_locals['__return__']))
else:
self.error('Not yet returned!')
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
raise
def _getval_except(self, arg, frame=None):
try:
if frame is None:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
else:
return eval(arg, frame.f_globals, frame.f_locals)
except:
exc_info = sys.exc_info()[:2]
err = traceback.format_exception_only(*exc_info)[-1].strip()
return _rstr('** raised %s **' % err)
def do_p(self, arg):
"""p(rint) expression
Print the value of the expression.
"""
try:
self.message(repr(self._getval(arg)))
except:
pass
# make "print" an alias of "p" since print isn't a Python statement anymore
do_print = do_p
def do_pp(self, arg):
"""pp expression
Pretty-print the value of the expression.
"""
try:
self.message(pprint.pformat(self._getval(arg)))
except:
pass
def do_list(self, arg):
"""l(ist) [first [,last] | .]
List source code for the current file. Without arguments,
list 11 lines around the current line or continue the previous
listing. With . as argument, list 11 lines around the current
line. With one argument, list 11 lines starting at that line.
With two arguments, list the given range; if the second
argument is less than the first, it is a count.
The current line in the current frame is indicated by "->".
If an exception is being debugged, the line where the
exception was originally raised or propagated is indicated by
">>", if it differs from the current line.
"""
self.lastcmd = 'list'
last = None
if arg and arg != '.':
try:
if ',' in arg:
first, last = arg.split(',')
first = int(first.strip())
last = int(last.strip())
if last < first:
# assume it's a count
last = first + last
else:
first = int(arg.strip())
first = max(1, first - 5)
except ValueError:
self.error('Error in argument: %r' % arg)
return
elif self.lineno is None or arg == '.':
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines = linecache.getlines(filename, self.curframe.f_globals)
self._print_lines(lines[first-1:last], first, breaklist,
self.curframe)
self.lineno = min(last, len(lines))
if len(lines) < last:
self.message('[EOF]')
except KeyboardInterrupt:
pass
do_l = do_list
def do_longlist(self, arg):
"""longlist | ll
List the whole source code for the current function or frame.
"""
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines, lineno = getsourcelines(self.curframe)
except IOError as err:
self.error(err)
return
self._print_lines(lines, lineno, breaklist, self.curframe)
do_ll = do_longlist
def do_source(self, arg):
"""source expression
Try to get source code for the given object and display it.
"""
try:
obj = self._getval(arg)
except:
return
try:
lines, lineno = getsourcelines(obj)
except (IOError, TypeError) as err:
self.error(err)
return
self._print_lines(lines, lineno)
def _print_lines(self, lines, start, breaks=(), frame=None):
"""Print a range of lines."""
if frame:
current_lineno = frame.f_lineno
exc_lineno = self.tb_lineno.get(frame, -1)
else:
current_lineno = exc_lineno = -1
for lineno, line in enumerate(lines, start):
s = str(lineno).rjust(3)
if len(s) < 4:
s += ' '
if lineno in breaks:
s += 'B'
else:
s += ' '
if lineno == current_lineno:
s += '->'
elif lineno == exc_lineno:
s += '>>'
self.message(s + '\t' + line.rstrip())
def do_whatis(self, arg):
"""whatis arg
Print the type of the argument.
"""
try:
value = self._getval(arg)
except:
# _getval() already printed the error
return
code = None
# Is it a function?
try:
code = value.__code__
except Exception:
pass
if code:
self.message('Function %s' % code.co_name)
return
# Is it an instance method?
try:
code = value.__func__.__code__
except Exception:
pass
if code:
self.message('Method %s' % code.co_name)
return
# Is it a class?
if value.__class__ is type:
self.message('Class %s.%s' % (value.__module__, value.__name__))
return
# None of the above...
self.message(type(value))
def do_display(self, arg):
"""display [expression]
Display the value of the expression if it changed, each time execution
stops in the current frame.
Without expression, list all display expressions for the current frame.
"""
if not arg:
self.message('Currently displaying:')
for item in self.displaying.get(self.curframe, {}).items():
self.message('%s: %r' % item)
else:
val = self._getval_except(arg)
self.displaying.setdefault(self.curframe, {})[arg] = val
self.message('display %s: %r' % (arg, val))
def do_undisplay(self, arg):
"""undisplay [expression]
Do not display the expression any more in the current frame.
Without expression, clear all display expressions for the current frame.
"""
if arg:
try:
del self.displaying.get(self.curframe, {})[arg]
except KeyError:
self.error('not displaying %s' % arg)
else:
self.displaying.pop(self.curframe, None)
def do_interact(self, arg):
"""interact
Start an interative interpreter whose global namespace
contains all the (global and local) names found in the current scope.
"""
ns = self.curframe.f_globals.copy()
ns.update(self.curframe_locals)
code.interact("*interactive*", local=ns)
def do_alias(self, arg):
"""alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
# Print instance variables in self
alias ps pi self
"""
args = arg.split()
if len(args) == 0:
keys = sorted(self.aliases.keys())
for alias in keys:
self.message("%s = %s" % (alias, self.aliases[alias]))
return
if args[0] in self.aliases and len(args) == 1:
self.message("%s = %s" % (args[0], self.aliases[args[0]]))
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
"""unalias name
Delete the specified alias.
"""
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
# List of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
prefix = '> '
else:
prefix = ' '
self.message(prefix +
self.format_stack_entry(frame_lineno, prompt_prefix))
# Provide help
def do_help(self, arg):
"""h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command.
"help pdb" shows the full pdb documentation.
"help exec" gives help on the ! command.
"""
if not arg:
return cmd.Cmd.do_help(self, arg)
try:
try:
topic = getattr(self, 'help_' + arg)
return topic()
except AttributeError:
command = getattr(self, 'do_' + arg)
except AttributeError:
self.error('No help for %r' % arg)
else:
if sys.flags.optimize >= 2:
self.error('No help for %r; please do not run Python with -OO '
'if you need command help' % arg)
return
self.message(command.__doc__.rstrip())
do_h = do_help
def help_exec(self):
"""(!) statement
Execute the (one-line) statement in the context of the current
stack frame. The exclamation point can be omitted unless the
first word of the statement resembles a debugger command. To
assign to a global variable you must always prefix the command
with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)
"""
self.message((self.help_exec.__doc__ or '').strip())
def help_pdb(self):
help()
# other helper functions
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = True
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = False
with open(filename, "rb") as fp:
statement = "exec(compile(%r, %r, 'exec'))" % \
(fp.read(), self.mainpyfile)
self.run(statement)
# Collect all command help into docstring, if not run with -OO
if __doc__ is not None:
# unfortunately we can't guess this order from the class definition
_help_order = [
'help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable',
'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until',
'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist',
'args', 'print', 'pp', 'whatis', 'source', 'display', 'undisplay',
'interact', 'alias', 'unalias', 'debug', 'quit',
]
for _command in _help_order:
__doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n'
__doc__ += Pdb.help_exec.__doc__
del _help_order, _command
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
import pydoc
pydoc.pager(__doc__)
_usage = """\
usage: pdb.py [-c command] ... pyfile [arg] ...
Debug the Python program given by pyfile.
Initial commands are read from .pdbrc files in your home directory
and in the current directory, if they exist. Commands supplied with
-c are executed after commands from .pdbrc files.
To let the script run until an exception occurs, use "-c continue".
To let the script run up to a given line X in the debugged file, use
"-c 'until X'"."""
def main():
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'hc:', ['--help', '--command='])
if not args:
print(_usage)
sys.exit(2)
commands = []
for opt, optarg in opts:
if opt in ['-h', '--help']:
print(_usage)
sys.exit()
elif opt in ['-c', '--command']:
commands.append(optarg)
mainpyfile = args[0] # Get script filename
if not os.path.exists(mainpyfile):
print('Error:', mainpyfile, 'does not exist')
sys.exit(1)
sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
pdb.rcLines.extend(commands)
while True:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print("The program finished and will be restarted")
except Restart:
print("Restarting", mainpyfile, "with arguments:")
print("\t" + " ".join(args))
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print("The program exited via sys.exit(). Exit status:", end=' ')
print(sys.exc_info()[1])
except:
traceback.print_exc()
print("Uncaught exception. Entering post mortem debugging")
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
print("Post mortem debugger finished. The " + mainpyfile +
" will be restarted")
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
| apache-2.0 |
theheros/kbengine | kbe/src/lib/python/Tools/freeze/makeconfig.py | 49 | 1664 | import re
import sys
# Write the config.c file
never = ['marshal', 'imp', '_ast', '__main__', 'builtins',
'sys', 'gc', '_warnings']
def makeconfig(infp, outfp, modules, with_ifdef=0):
m1 = re.compile('-- ADDMODULE MARKER 1 --')
m2 = re.compile('-- ADDMODULE MARKER 2 --')
for line in infp:
outfp.write(line)
if m1 and m1.search(line):
m1 = None
for mod in modules:
if mod in never:
continue
if with_ifdef:
outfp.write("#ifndef PyInit_%s\n"%mod)
outfp.write('extern PyObject* PyInit_%s(void);\n' % mod)
if with_ifdef:
outfp.write("#endif\n")
elif m2 and m2.search(line):
m2 = None
for mod in modules:
if mod in never:
continue
outfp.write('\t{"%s", PyInit_%s},\n' %
(mod, mod))
if m1:
sys.stderr.write('MARKER 1 never found\n')
elif m2:
sys.stderr.write('MARKER 2 never found\n')
# Test program.
def test():
if not sys.argv[3:]:
print('usage: python makeconfig.py config.c.in outputfile', end=' ')
print('modulename ...')
sys.exit(2)
if sys.argv[1] == '-':
infp = sys.stdin
else:
infp = open(sys.argv[1])
if sys.argv[2] == '-':
outfp = sys.stdout
else:
outfp = open(sys.argv[2], 'w')
makeconfig(infp, outfp, sys.argv[3:])
if outfp != sys.stdout:
outfp.close()
if infp != sys.stdin:
infp.close()
if __name__ == '__main__':
test()
| lgpl-3.0 |
codecollision/DropboxToFlickr | django/contrib/syndication/feeds.py | 245 | 1367 | from django.contrib.syndication import views
from django.core.exceptions import ObjectDoesNotExist
import warnings
# This is part of the deprecated API
from django.contrib.syndication.views import FeedDoesNotExist, add_domain
class Feed(views.Feed):
"""Provided for backwards compatibility."""
def __init__(self, slug, request):
warnings.warn('The syndication feeds.Feed class is deprecated. Please '
'use the new class based view API.',
category=DeprecationWarning)
self.slug = slug
self.request = request
self.feed_url = getattr(self, 'feed_url', None) or request.path
self.title_template = self.title_template or ('feeds/%s_title.html' % slug)
self.description_template = self.description_template or ('feeds/%s_description.html' % slug)
def get_object(self, bits):
return None
def get_feed(self, url=None):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
if url:
bits = url.split('/')
else:
bits = []
try:
obj = self.get_object(bits)
except ObjectDoesNotExist:
raise FeedDoesNotExist
return super(Feed, self).get_feed(obj, self.request)
| bsd-3-clause |
chanceraine/nupic | nupic/swarming/ModelRunner.py | 31 | 37223 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import time
import logging
import os
import sys
import shutil
import StringIO
import threading
import traceback
from collections import deque
from nupic.database.ClientJobsDAO import ClientJobsDAO
from nupic.frameworks.opf import opfhelpers
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.opfbasicenvironment import BasicPredictionLogger
from nupic.frameworks.opf.opfutils import matchPatterns
from nupic.frameworks.opf.periodic import (PeriodicActivityMgr,
PeriodicActivityRequest)
from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager
from nupic.support.configuration import Configuration
from nupic.swarming.hypersearch.errorcodes import ErrorCodes
from nupic.swarming.hypersearch.experimentutils import InferenceElement
from nupic.swarming import regression
from nupic.swarming.hypersearch import utils
class OPFModelRunner(object):
"""This class runs an a given Model"""
# The minimum number of records that need to have been read for this model
# to be a candidate for 'best model'
_MIN_RECORDS_TO_BE_BEST = None
# The number of points we look at when trying to figure out whether or not a
# model has matured
_MATURITY_NUM_POINTS = None
# The maximum rate of change in the model's metric for it to be considered 'mature'
_MATURITY_MAX_CHANGE = None
def __init__(self,
modelID,
jobID,
predictedField,
experimentDir,
reportKeyPatterns,
optimizeKeyPattern,
jobsDAO,
modelCheckpointGUID,
logLevel=None,
predictionCacheMaxRecords=None):
"""
Parameters:
-------------------------------------------------------------------------
modelID: ID for this model in the models table
jobID: ID for this hypersearch job in the jobs table
predictedField: Name of the input field for which this model is being
optimized
experimentDir: Directory path containing the experiment's
description.py script
reportKeyPatterns: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKeyPattern: Which report item, if any, we will be optimizing for.
This can also be a regular expression, but is an error
if it matches more than one key from the experiment's
results.
jobsDAO: Jobs data access object - the interface to the
jobs database which has the model's table.
modelCheckpointGUID:
A persistent, globally-unique identifier for
constructing the model checkpoint key. If None, then
don't bother creating a model checkpoint.
logLevel: override logging level to this value, if not None
predictionCacheMaxRecords:
Maximum number of records for the prediction output cache.
Pass None for default value.
"""
# -----------------------------------------------------------------------
# Initialize class constants
# -----------------------------------------------------------------------
self._MIN_RECORDS_TO_BE_BEST = int(Configuration.get('nupic.hypersearch.bestModelMinRecords'))
self._MATURITY_MAX_CHANGE = float(Configuration.get('nupic.hypersearch.maturityPctChange'))
self._MATURITY_NUM_POINTS = int(Configuration.get('nupic.hypersearch.maturityNumPoints'))
# -----------------------------------------------------------------------
# Initialize instance variables
# -----------------------------------------------------------------------
self._modelID = modelID
self._jobID = jobID
self._predictedField = predictedField
self._experimentDir = experimentDir
self._reportKeyPatterns = reportKeyPatterns
self._optimizeKeyPattern = optimizeKeyPattern
self._jobsDAO = jobsDAO
self._modelCheckpointGUID = modelCheckpointGUID
self._predictionCacheMaxRecords = predictionCacheMaxRecords
self._isMaturityEnabled = bool(int(Configuration.get('nupic.hypersearch.enableModelMaturity')))
self._logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
self._optimizedMetricLabel = None
self._reportMetricLabels = []
# Our default completion reason
self._cmpReason = ClientJobsDAO.CMPL_REASON_EOF
if logLevel is not None:
self._logger.setLevel(logLevel)
# The manager object to compute the metrics for this model
self.__metricMgr = None
# Will be set to a new instance of OPFTaskDriver by __runTask()
#self.__taskDriver = None
# Current task control parameters. Will be set by __runTask()
self.__task = None
# Will be set to a new instance of PeriodicActivityManager by __runTask()
self._periodic = None
# Will be set to streamDef string by _runTask()
self._streamDef = None
# Will be set to new OpfExperiment instance by run()
self._model = None
# Will be set to new InputSource by __runTask()
self._inputSource = None
# 0-based index of the record being processed;
# Initialized and updated by __runTask()
self._currentRecordIndex = None
# Interface to write predictions to a persistent storage
self._predictionLogger = None
# In-memory cache for predictions. Predictions are written here for speed
# when they don't need to be written to a persistent store
self.__predictionCache = deque()
# Flag to see if this is the best model in the job (as determined by the
# model chooser logic). This is essentially a cache of the value in the
# ClientJobsDB
self._isBestModel = False
# Flag to see if there is a best model (not necessarily this one)
# stored in the DB
self._isBestModelStored = False
# -----------------------------------------------------------------------
# Flags for model cancelation/checkpointing
# -----------------------------------------------------------------------
# Flag to see if the job that this model is part of
self._isCanceled = False
# Flag to see if model was killed, either by the model terminator or by the
# hypsersearch implementation (ex. the a swarm is killed/matured)
self._isKilled = False
# Flag to see if the model is matured. In most cases, this means that we
# should stop running the model. The only execption is if this model is the
# best model for the job, in which case it should continue running.
self._isMature = False
# Event to see if interrupt signal has been sent
self._isInterrupted = threading.Event()
# -----------------------------------------------------------------------
# Facilities for measuring model maturity
# -----------------------------------------------------------------------
# List of tuples, (iteration, metric), used to see if the model has 'matured'
self._metricRegression = regression.AveragePctChange(windowSize=self._MATURITY_NUM_POINTS)
self.__loggedMetricPatterns = []
def run(self):
""" Runs the OPF Model
Parameters:
-------------------------------------------------------------------------
retval: (completionReason, completionMsg)
where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX
equates.
"""
# -----------------------------------------------------------------------
# Load the experiment's description.py module
descriptionPyModule = opfhelpers.loadExperimentDescriptionScriptFromDir(
self._experimentDir)
expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
expIface.normalizeStreamSources()
modelDescription = expIface.getModelDescription()
self._modelControl = expIface.getModelControl()
# -----------------------------------------------------------------------
# Create the input data stream for this task
streamDef = self._modelControl['dataset']
from nupic.data.stream_reader import StreamReader
readTimeout = 0
self._inputSource = StreamReader(streamDef, isBlocking=False,
maxTimeout=readTimeout)
# -----------------------------------------------------------------------
#Get field statistics from the input source
fieldStats = self._getFieldStats()
# -----------------------------------------------------------------------
# Construct the model instance
self._model = ModelFactory.create(modelDescription)
self._model.setFieldStatistics(fieldStats)
self._model.enableLearning()
self._model.enableInference(self._modelControl.get("inferenceArgs", None))
# -----------------------------------------------------------------------
# Instantiate the metrics
self.__metricMgr = MetricsManager(self._modelControl.get('metrics',None),
self._model.getFieldInfo(),
self._model.getInferenceType())
self.__loggedMetricPatterns = self._modelControl.get("loggedMetrics", [])
self._optimizedMetricLabel = self.__getOptimizedMetricLabel()
self._reportMetricLabels = matchPatterns(self._reportKeyPatterns,
self._getMetricLabels())
# -----------------------------------------------------------------------
# Initialize periodic activities (e.g., for model result updates)
self._periodic = self._initPeriodicActivities()
# -----------------------------------------------------------------------
# Create our top-level loop-control iterator
numIters = self._modelControl.get('iterationCount', -1)
# Are we asked to turn off learning for a certain # of iterations near the
# end?
learningOffAt = None
iterationCountInferOnly = self._modelControl.get('iterationCountInferOnly', 0)
if iterationCountInferOnly == -1:
self._model.disableLearning()
elif iterationCountInferOnly > 0:
assert numIters > iterationCountInferOnly, "when iterationCountInferOnly " \
"is specified, iterationCount must be greater than " \
"iterationCountInferOnly."
learningOffAt = numIters - iterationCountInferOnly
self.__runTaskMainLoop(numIters, learningOffAt=learningOffAt)
# -----------------------------------------------------------------------
# Perform final operations for model
self._finalize()
return (self._cmpReason, None)
def __runTaskMainLoop(self, numIters, learningOffAt=None):
""" Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number
"""
## Reset sequence states in the model, so it starts looking for a new
## sequence
self._model.resetSequenceStates()
self._currentRecordIndex = -1
while True:
# If killed by a terminator, stop running
if self._isKilled:
break
# If job stops or hypersearch ends, stop running
if self._isCanceled:
break
# If the process is about to be killed, set as orphaned
if self._isInterrupted.isSet():
self.__setAsOrphaned()
break
# If model is mature, stop running ONLY IF we are not the best model
# for the job. Otherwise, keep running so we can keep returning
# predictions to the user
if self._isMature:
if not self._isBestModel:
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
# Turn off learning?
if learningOffAt is not None \
and self._currentRecordIndex == learningOffAt:
self._model.disableLearning()
# Read input record. Note that any failure here is a critical JOB failure
# and results in the job being immediately canceled and marked as
# failed. The runModelXXX code in hypesearch.utils, if it sees an
# exception of type utils.JobFailException, will cancel the job and
# copy the error message into the job record.
try:
inputRecord = self._inputSource.getNextRecordDict()
if self._currentRecordIndex < 0:
self._inputSource.setTimeout(10)
except Exception, e:
raise utils.JobFailException(ErrorCodes.streamReading, str(e.args),
traceback.format_exc())
if inputRecord is None:
# EOF
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
break
if inputRecord:
# Process input record
self._currentRecordIndex += 1
result = self._model.run(inputRecord=inputRecord)
# Compute metrics.
result.metrics = self.__metricMgr.update(result)
# If there are None, use defaults. see MetricsManager.getMetrics()
# TODO remove this when JAVA API server is gone
if not result.metrics:
result.metrics = self.__metricMgr.getMetrics()
# Write the result to the output cache. Don't write encodings, if they
# were computed
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
result.sensorInput.dataEncodings = None
self._writePrediction(result)
# Run periodic activities
self._periodic.tick()
if numIters >= 0 and self._currentRecordIndex >= numIters-1:
break
else:
# Input source returned an empty record.
#
# NOTE: This is okay with Stream-based Source (when it times out
# waiting for next record), but not okay with FileSource, which should
# always return either with a valid record or None for EOF.
raise ValueError("Got an empty record from FileSource: %r" %
inputRecord)
def _finalize(self):
"""Run final activities after a model has run. These include recording and
logging the final score"""
self._logger.info(
"Finished: modelID=%r; %r records processed. Performing final activities",
self._modelID, self._currentRecordIndex + 1)
# =========================================================================
# Dump the experiment metrics at the end of the task
# =========================================================================
self._updateModelDBResults()
# =========================================================================
# Check if the current model is the best. Create a milestone if necessary
# If the model has been killed, it is not a candidate for "best model",
# and its output cache should be destroyed
# =========================================================================
if not self._isKilled:
self.__updateJobResults()
else:
self.__deleteOutputCache(self._modelID)
# =========================================================================
# Close output stream, if necessary
# =========================================================================
if self._predictionLogger:
self._predictionLogger.close()
def __createModelCheckpoint(self):
""" Create a checkpoint from the current model, and store it in a dir named
after checkpoint GUID, and finally store the GUID in the Models DB """
if self._model is None or self._modelCheckpointGUID is None:
return
# Create an output store, if one doesn't exist already
if self._predictionLogger is None:
self._createPredictionLogger()
predictions = StringIO.StringIO()
self._predictionLogger.checkpoint(
checkpointSink=predictions,
maxRows=int(Configuration.get('nupic.model.checkpoint.maxPredictionRows')))
self._model.save(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))
self._jobsDAO.modelSetFields(modelID,
{'modelCheckpointId':str(self._modelCheckpointGUID)},
ignoreUnchanged=True)
self._logger.info("Checkpointed Hypersearch Model: modelID: %r, "
"checkpointID: %r", self._modelID, checkpointID)
return
def __deleteModelCheckpoint(self, modelID):
"""
Delete the stored checkpoint for the specified modelID. This function is
called if the current model is now the best model, making the old model's
checkpoint obsolete
Parameters:
-----------------------------------------------------------------------
modelID: The modelID for the checkpoint to delete. This is NOT the
unique checkpointID
"""
checkpointID = \
self._jobsDAO.modelsGetFields(modelID, ['modelCheckpointId'])[0]
if checkpointID is None:
return
try:
shutil.rmtree(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))
except:
self._logger.warn("Failed to delete model checkpoint %s. "\
"Assuming that another worker has already deleted it",
checkpointID)
return
self._jobsDAO.modelSetFields(modelID,
{'modelCheckpointId':None},
ignoreUnchanged=True)
return
def _createPredictionLogger(self):
"""
Creates the model's PredictionLogger object, which is an interface to write
model results to a permanent storage location
"""
# Write results to a file
self._predictionLogger = BasicPredictionLogger(
fields=self._model.getFieldInfo(),
experimentDir=self._experimentDir,
label = "hypersearch-worker",
inferenceType=self._model.getInferenceType())
if self.__loggedMetricPatterns:
metricLabels = self.__metricMgr.getMetricLabels()
loggedMetrics = matchPatterns(self.__loggedMetricPatterns, metricLabels)
self._predictionLogger.setLoggedMetrics(loggedMetrics)
def __getOptimizedMetricLabel(self):
""" Get the label for the metric being optimized. This function also caches
the label in the instance variable self._optimizedMetricLabel
Parameters:
-----------------------------------------------------------------------
metricLabels: A sequence of all the labels being computed for this model
Returns: The label for the metric being optmized over
"""
matchingKeys = matchPatterns([self._optimizeKeyPattern],
self._getMetricLabels())
if len(matchingKeys) == 0:
raise Exception("None of the generated metrics match the specified "
"optimization pattern: %s. Available metrics are %s" % \
(self._optimizeKeyPattern, self._getMetricLabels()))
elif len(matchingKeys) > 1:
raise Exception("The specified optimization pattern '%s' matches more "
"than one metric: %s" % (self._optimizeKeyPattern, matchingKeys))
return matchingKeys[0]
def _getMetricLabels(self):
"""
Returns: A list of labels that correspond to metrics being computed
"""
return self.__metricMgr.getMetricLabels()
def _getFieldStats(self):
"""
Method which returns a dictionary of field statistics received from the
input source.
Returns:
fieldStats: dict of dicts where the first level is the field name and
the second level is the statistic. ie. fieldStats['pounds']['min']
"""
fieldStats = dict()
fieldNames = self._inputSource.getFieldNames()
for field in fieldNames:
curStats = dict()
curStats['min'] = self._inputSource.getFieldMin(field)
curStats['max'] = self._inputSource.getFieldMax(field)
fieldStats[field] = curStats
return fieldStats
def _getMetrics(self):
""" Protected function that can be overriden by subclasses. Its main purpose
is to allow the the OPFDummyModelRunner to override this with deterministic
values
Returns: All the metrics being computed for this model
"""
return self.__metricMgr.getMetrics()
def _updateModelDBResults(self):
""" Retrieves the current results and updates the model's record in
the Model database.
"""
# -----------------------------------------------------------------------
# Get metrics
metrics = self._getMetrics()
# -----------------------------------------------------------------------
# Extract report metrics that match the requested report REs
reportDict = dict([(k,metrics[k]) for k in self._reportMetricLabels])
# -----------------------------------------------------------------------
# Extract the report item that matches the optimize key RE
# TODO cache optimizedMetricLabel sooner
metrics = self._getMetrics()
optimizeDict = dict()
if self._optimizeKeyPattern is not None:
optimizeDict[self._optimizedMetricLabel] = \
metrics[self._optimizedMetricLabel]
# -----------------------------------------------------------------------
# Update model results
results = json.dumps((metrics , optimizeDict))
self._jobsDAO.modelUpdateResults(self._modelID, results=results,
metricValue=optimizeDict.values()[0],
numRecords=(self._currentRecordIndex + 1))
self._logger.debug(
"Model Results: modelID=%s; numRecords=%s; results=%s" % \
(self._modelID, self._currentRecordIndex + 1, results))
return
def __updateJobResultsPeriodic(self):
"""
Periodic check to see if this is the best model. This should only have an
effect if this is the *first* model to report its progress
"""
if self._isBestModelStored and not self._isBestModel:
return
while True:
jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is None:
jobResults = {}
else:
self._isBestModelStored = True
if not self._isBestModel:
return
jobResults = json.loads(jobResultsStr)
bestModel = jobResults.get('bestModel', None)
bestMetric = jobResults.get('bestValue', None)
isSaved = jobResults.get('saved', False)
# If there is a best model, and it is not the same as the current model
# we should wait till we have processed all of our records to see if
# we are the the best
if (bestModel is not None) and (self._modelID != bestModel):
self._isBestModel = False
return
# Make sure prediction output stream is ready before we present our model
# as "bestModel"; sometimes this takes a long time, so update the model's
# timestamp to help avoid getting orphaned
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = False
newResults = json.dumps(jobResults)
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=newResults)
if isUpdated or (not isUpdated and newResults==jobResultsStr):
self._isBestModel = True
break
def __checkIfBestCompletedModel(self):
"""
Reads the current "best model" for the job and returns whether or not the
current model is better than the "best model" stored for the job
Returns: (isBetter, storedBest, origResultsStr)
isBetter:
True if the current model is better than the stored "best model"
storedResults:
A dict of the currently stored results in the jobs table record
origResultsStr:
The json-encoded string that currently resides in the "results" field
of the jobs record (used to create atomicity)
"""
jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is None:
jobResults = {}
else:
jobResults = json.loads(jobResultsStr)
isSaved = jobResults.get('saved', False)
bestMetric = jobResults.get('bestValue', None)
currentMetric = self._getMetrics()[self._optimizedMetricLabel]
self._isBestModel = (not isSaved) \
or (currentMetric < bestMetric)
return self._isBestModel, jobResults, jobResultsStr
def __updateJobResults(self):
""""
Check if this is the best model
If so:
1) Write it's checkpoint
2) Record this model as the best
3) Delete the previous best's output cache
Otherwise:
1) Delete our output cache
"""
isSaved = False
while True:
self._isBestModel, jobResults, jobResultsStr = \
self.__checkIfBestCompletedModel()
# -----------------------------------------------------------------------
# If the current model is the best:
# 1) Save the model's predictions
# 2) Checkpoint the model state
# 3) Update the results for the job
if self._isBestModel:
# Save the current model and its results
if not isSaved:
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__createModelCheckpoint()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
isSaved = True
# Now record the model as the best for the job
prevBest = jobResults.get('bestModel', None)
prevWasSaved = jobResults.get('saved', False)
# If the current model is the best, it shouldn't already be checkpointed
if prevBest == self._modelID:
assert not prevWasSaved
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = True
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
if prevWasSaved:
self.__deleteOutputCache(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self._logger.info("Model %d chosen as best model", self._modelID)
break
# -----------------------------------------------------------------------
# If the current model is not the best, delete its outputs
else:
# NOTE: we update model timestamp around these occasionally-lengthy
# operations to help prevent the model from becoming orphaned
self.__deleteOutputCache(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
break
def _writePrediction(self, result):
"""
Writes the results of one iteration of a model. The results are written to
this ModelRunner's in-memory cache unless this model is the "best model" for
the job. If this model is the "best model", the predictions are written out
to a permanent store via a prediction output stream instance
Parameters:
-----------------------------------------------------------------------
result: A opfutils.ModelResult object, which contains the input and
output for this iteration
"""
self.__predictionCache.append(result)
if self._isBestModel:
self.__flushPredictionCache()
def __writeRecordsCallback(self):
""" This callback is called by self.__predictionLogger.writeRecords()
between each batch of records it writes. It gives us a chance to say that
the model is 'still alive' during long write operations.
"""
# This updates the engLastUpdateTime of the model record so that other
# worker's don't think that this model is orphaned.
self._jobsDAO.modelUpdateResults(self._modelID)
def __flushPredictionCache(self):
"""
Writes the contents of this model's in-memory prediction cache to a permanent
store via the prediction output stream instance
"""
if not self.__predictionCache:
return
# Create an output store, if one doesn't exist already
if self._predictionLogger is None:
self._createPredictionLogger()
startTime = time.time()
self._predictionLogger.writeRecords(self.__predictionCache,
progressCB=self.__writeRecordsCallback)
self._logger.info("Flushed prediction cache; numrows=%s; elapsed=%s sec.",
len(self.__predictionCache), time.time() - startTime)
self.__predictionCache.clear()
def __deleteOutputCache(self, modelID):
"""
Delete's the output cache associated with the given modelID. This actually
clears up the resources associated with the cache, rather than deleting al
the records in the cache
Parameters:
-----------------------------------------------------------------------
modelID: The id of the model whose output cache is being deleted
"""
# If this is our output, we should close the connection
if modelID == self._modelID and self._predictionLogger is not None:
self._predictionLogger.close()
del self.__predictionCache
self._predictionLogger = None
self.__predictionCache = None
def _initPeriodicActivities(self):
""" Creates and returns a PeriodicActivityMgr instance initialized with
our periodic activities
Parameters:
-------------------------------------------------------------------------
retval: a PeriodicActivityMgr instance
"""
# Activity to update the metrics for this model
# in the models table
updateModelDBResults = PeriodicActivityRequest(repeating=True,
period=100,
cb=self._updateModelDBResults)
updateJobResults = PeriodicActivityRequest(repeating=True,
period=100,
cb=self.__updateJobResultsPeriodic)
checkCancelation = PeriodicActivityRequest(repeating=True,
period=50,
cb=self.__checkCancelation)
checkMaturity = PeriodicActivityRequest(repeating=True,
period=10,
cb=self.__checkMaturity)
# Do an initial update of the job record after 2 iterations to make
# sure that it is populated with something without having to wait too long
updateJobResultsFirst = PeriodicActivityRequest(repeating=False,
period=2,
cb=self.__updateJobResultsPeriodic)
periodicActivities = [updateModelDBResults,
updateJobResultsFirst,
updateJobResults,
checkCancelation]
if self._isMaturityEnabled:
periodicActivities.append(checkMaturity)
return PeriodicActivityMgr(requestedActivities=periodicActivities)
def __checkCancelation(self):
""" Check if the cancelation flag has been set for this model
in the Model DB"""
# Update a hadoop job counter at least once every 600 seconds so it doesn't
# think our map task is dead
print >>sys.stderr, "reporter:counter:HypersearchWorker,numRecords,50"
# See if the job got cancelled
jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED
self._isCanceled = True
self._logger.info("Model %s canceled because Job %s was stopped.",
self._modelID, self._jobID)
else:
stopReason = self._jobsDAO.modelsGetFields(self._modelID, ['engStop'])[0]
if stopReason is None:
pass
elif stopReason == ClientJobsDAO.STOP_REASON_KILLED:
self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED
self._isKilled = True
self._logger.info("Model %s canceled because it was killed by hypersearch",
self._modelID)
elif stopReason == ClientJobsDAO.STOP_REASON_STOPPED:
self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED
self._isCanceled = True
self._logger.info("Model %s stopped because hypersearch ended", self._modelID)
else:
raise RuntimeError ("Unexpected stop reason encountered: %s" % (stopReason))
def __checkMaturity(self):
""" Save the current metric value and see if the model's performance has
'leveled off.' We do this by looking at some number of previous number of
recordings """
if self._currentRecordIndex+1 < self._MIN_RECORDS_TO_BE_BEST:
return
# If we are already mature, don't need to check anything
if self._isMature:
return
metric = self._getMetrics()[self._optimizedMetricLabel]
self._metricRegression.addPoint(x=self._currentRecordIndex, y=metric)
# Perform a linear regression to see if the error is leveled off
#pctChange = self._metricRegression.getPctChange()
#if pctChange is not None and abs(pctChange ) <= self._MATURITY_MAX_CHANGE:
pctChange, absPctChange = self._metricRegression.getPctChanges()
if pctChange is not None and absPctChange <= self._MATURITY_MAX_CHANGE:
self._jobsDAO.modelSetFields(self._modelID,
{'engMatured':True})
# TODO: Don't stop if we are currently the best model. Also, if we
# are still running after maturity, we have to periodically check to
# see if we are still the best model. As soon we lose to some other
# model, then we should stop at that point.
self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED
self._isMature = True
self._logger.info("Model %d has matured (pctChange=%s, n=%d). \n"\
"Scores = %s\n"\
"Stopping execution",self._modelID, pctChange,
self._MATURITY_NUM_POINTS,
self._metricRegression._window)
def handleWarningSignal(self, signum, frame):
"""
Handles a "warning signal" from the scheduler. This is received when the
scheduler is about to kill the the current process so that the worker can be
allocated to another job.
Right now, this function just sets the current model to the "Orphaned" state
in the models table so that another worker can eventually re-run this model
Parameters:
-----------------------------------------------------------------------
"""
self._isInterrupted.set()
def __setAsOrphaned(self):
"""
Sets the current model as orphaned. This is called when the scheduler is
about to kill the process to reallocate the worker to a different process.
"""
cmplReason = ClientJobsDAO.CMPL_REASON_ORPHAN
cmplMessage = "Killed by Scheduler"
self._jobsDAO.modelSetCompleted(self._modelID, cmplReason, cmplMessage)
| agpl-3.0 |
SecComm/volatilitux | core/mm/arch/arm.py | 8 | 1128 | from ...raw_dump import *
from ..addr import *
# TODO: Handle big pages
# Cf ARM reference manual, page 1282
def va_to_pa(va, pgd):
r = RawDump.getInstance()
# Split the virtual address into a PD index, PT index and page index
(pd_i, pt_i, page_i) = ((va >> 20), ((va >> 12) & (1 << 8)-1), (va & (1 << 12) - 1))
# Read the PDE
pde = r.read_dword(pgd + pd_i*4)
# Page table
if(pde & 0b11 == 0b01):
# Read the PTE
pte = r.read_dword(((pde >> 10) << 10) + pt_i*4)
if(pte & 0b10 != 0b10):
raise Exception("Error translating VA "+hex(va)+", invalid pte: "+hex(pde))
return ((pte >> 12) << 12) + page_i
# Section / supersection
elif(pde & 0b11 == 0b10):
supersection = int(pde & 1 << 18)
# Section, cf page 1291
if(not supersection):
return ((pde >> 20) << 20) + (va & (1 << 20) - 1)
# Supersection
else:
raise Exception("Error translating VA "+hex(va)+", pde: "+hex(pde) + " is a supersection, not handled yet... Sorry.")
else:
raise Exception("Error translating VA "+hex(va)+", invalid pde: "+hex(pde))
| gpl-2.0 |
rruebner/odoo | addons/crm/validate_email.py | 462 | 5978 | # RFC 2822 - style email validation for Python
# (c) 2012 Syrus Akbary <me@syrusakbary.com>
# Extended from (c) 2011 Noel Bush <noel@aitools.org>
# for support of mx and user check
# This code is made available to you under the GNU LGPL v3.
#
# This module provides a single method, valid_email_address(),
# which returns True or False to indicate whether a given address
# is valid according to the 'addr-spec' part of the specification
# given in RFC 2822. Ideally, we would like to find this
# in some other library, already thoroughly tested and well-
# maintained. The standard Python library email.utils
# contains a parse_addr() function, but it is not sufficient
# to detect many malformed addresses.
#
# This implementation aims to be faithful to the RFC, with the
# exception of a circular definition (see comments below), and
# with the omission of the pattern components marked as "obsolete".
import re
import smtplib
import socket
try:
import DNS
ServerError = DNS.ServerError
except:
DNS = None
class ServerError(Exception): pass
# All we are really doing is comparing the input string to one
# gigantic regular expression. But building that regexp, and
# ensuring its correctness, is made much easier by assembling it
# from the "tokens" defined by the RFC. Each of these tokens is
# tested in the accompanying unit test file.
#
# The section of RFC 2822 from which each pattern component is
# derived is given in an accompanying comment.
#
# (To make things simple, every string below is given as 'raw',
# even when it's not strictly necessary. This way we don't forget
# when it is necessary.)
#
WSP = r'[ \t]' # see 2.2.2. Structured Header Field Bodies
CRLF = r'(?:\r\n)' # see 2.2.3. Long Header Fields
NO_WS_CTL = r'\x01-\x08\x0b\x0c\x0f-\x1f\x7f' # see 3.2.1. Primitive Tokens
QUOTED_PAIR = r'(?:\\.)' # see 3.2.2. Quoted characters
FWS = r'(?:(?:' + WSP + r'*' + CRLF + r')?' + \
WSP + r'+)' # see 3.2.3. Folding white space and comments
CTEXT = r'[' + NO_WS_CTL + \
r'\x21-\x27\x2a-\x5b\x5d-\x7e]' # see 3.2.3
CCONTENT = r'(?:' + CTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.2.3 (NB: The RFC includes COMMENT here
# as well, but that would be circular.)
COMMENT = r'\((?:' + FWS + r'?' + CCONTENT + \
r')*' + FWS + r'?\)' # see 3.2.3
CFWS = r'(?:' + FWS + r'?' + COMMENT + ')*(?:' + \
FWS + '?' + COMMENT + '|' + FWS + ')' # see 3.2.3
ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]' # see 3.2.4. Atom
ATOM = CFWS + r'?' + ATEXT + r'+' + CFWS + r'?' # see 3.2.4
DOT_ATOM_TEXT = ATEXT + r'+(?:\.' + ATEXT + r'+)*' # see 3.2.4
DOT_ATOM = CFWS + r'?' + DOT_ATOM_TEXT + CFWS + r'?' # see 3.2.4
QTEXT = r'[' + NO_WS_CTL + \
r'\x21\x23-\x5b\x5d-\x7e]' # see 3.2.5. Quoted strings
QCONTENT = r'(?:' + QTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.2.5
QUOTED_STRING = CFWS + r'?' + r'"(?:' + FWS + \
r'?' + QCONTENT + r')*' + FWS + \
r'?' + r'"' + CFWS + r'?'
LOCAL_PART = r'(?:' + DOT_ATOM + r'|' + \
QUOTED_STRING + r')' # see 3.4.1. Addr-spec specification
DTEXT = r'[' + NO_WS_CTL + r'\x21-\x5a\x5e-\x7e]' # see 3.4.1
DCONTENT = r'(?:' + DTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.4.1
DOMAIN_LITERAL = CFWS + r'?' + r'\[' + \
r'(?:' + FWS + r'?' + DCONTENT + \
r')*' + FWS + r'?\]' + CFWS + r'?' # see 3.4.1
DOMAIN = r'(?:' + DOT_ATOM + r'|' + \
DOMAIN_LITERAL + r')' # see 3.4.1
ADDR_SPEC = LOCAL_PART + r'@' + DOMAIN # see 3.4.1
# A valid address will match exactly the 3.4.1 addr-spec.
VALID_ADDRESS_REGEXP = '^' + ADDR_SPEC + '$'
def validate_email(email, check_mx=False,verify=False):
"""Indicate whether the given string is a valid email address
according to the 'addr-spec' portion of RFC 2822 (see section
3.4.1). Parts of the spec that are marked obsolete are *not*
included in this test, and certain arcane constructions that
depend on circular definitions in the spec may not pass, but in
general this should correctly identify any email address likely
to be in use as of 2011."""
try:
assert re.match(VALID_ADDRESS_REGEXP, email) is not None
check_mx |= verify
if check_mx:
if not DNS: raise Exception('For check the mx records or check if the email exists you must have installed pyDNS python package')
DNS.DiscoverNameServers()
hostname = email[email.find('@')+1:]
mx_hosts = DNS.mxlookup(hostname)
for mx in mx_hosts:
try:
smtp = smtplib.SMTP()
smtp.connect(mx[1])
if not verify: return True
status, _ = smtp.helo()
if status != 250: continue
smtp.mail('')
status, _ = smtp.rcpt(email)
if status != 250: return False
break
except smtplib.SMTPServerDisconnected: #Server not permits verify user
break
except smtplib.SMTPConnectError:
continue
except (AssertionError, ServerError):
return False
return True
# import sys
# sys.modules[__name__],sys.modules['validate_email_module'] = validate_email,sys.modules[__name__]
# from validate_email_module import *
| agpl-3.0 |
shizhai/wprobe | staging_dir/host/lib/scons-2.1.0/SCons/Scanner/RC.py | 21 | 2075 | """SCons.Scanner.RC
This module implements the depenency scanner for RC (Interface
Definition Language) files.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/RC.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Node.FS
import SCons.Scanner
import re
def RCScan():
"""Return a prototype Scanner instance for scanning RC source files"""
res_re= r'^(?:\s*#\s*(?:include)|' \
'.*?\s+(?:ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)' \
'\s*.*?)' \
'\s*(<|"| )([^>"\s]+)(?:[>"\s])*$'
resScanner = SCons.Scanner.ClassicCPP( "ResourceScanner",
"$RCSUFFIXES",
"CPPPATH",
res_re )
return resScanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
davidzchen/tensorflow | tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py | 3 | 21315 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import random
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers.preprocessing import integer_lookup
from tensorflow.python.keras.layers.preprocessing import integer_lookup_v1
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.saving import save
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def get_layer_class():
if context.executing_eagerly():
return integer_lookup.IntegerLookup
else:
return integer_lookup_v1.IntegerLookup
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name":
"test_ints_soft_vocab_cap",
# Create an array where 1138 is the most frequent term, followed by
# 1729, then 725, then 42. This ensures that the vocab accumulator
# is sorting by frequency.
"vocab_data":
np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],
[1729], [725], [725]],
dtype=np.int64),
"input_data":
np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],
dtype=np.int64),
"kwargs": {
"max_values": None,
"dtype": dtypes.int64,
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
"input_dtype":
dtypes.int64
},)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes
class IntegerLookupLayerTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output,
input_dtype):
cls = get_layer_class()
expected_output_dtype = dtypes.int64
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# IntegerLookup), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = dataset_ops.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = dataset_ops.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
with CustomObjectScope({"IntegerLookup": cls}):
output_data = testing_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=input_dtype,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
self.assertAllClose(expected_output, output_data)
@keras_parameterized.run_all_keras_modes
class CategoricalEncodingInputTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_sparse_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 32], dtype=np.int64),
dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [5, 1]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
layer = get_layer_class()(max_values=None)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = ragged_factory_ops.constant([[10, 11, 13], [13, 12, 10, 42]],
dtype=np.int64)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, ragged=True)
layer = get_layer_class()(max_values=None)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes
class CategoricalEncodingMultiOOVTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_sparse_int_input_multi_bucket(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 133], dtype=np.int64),
dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [6, 2]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
layer = get_layer_class()(
max_values=None,
dtype=dtypes.int64,
num_oov_indices=2,
mask_value=0,
oov_value=-1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_int_input_multi_bucket(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = ragged_factory_ops.constant([[10, 11, 13], [13, 12, 10, 133]],
dtype=np.int64)
expected_output = [[3, 4, 6], [6, 5, 3, 2]]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, ragged=True)
layer = get_layer_class()(max_values=None, num_oov_indices=2)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes
class CategoricalEncodingAdaptTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_sparse_adapt(self):
vocab_data = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 2]],
values=[203, 1729, 203],
dense_shape=[3, 4])
vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
layer = get_layer_class()()
layer.adapt(vocab_dataset)
expected_vocabulary = [0, -1, 203, 1729]
self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())
def test_ragged_adapt(self):
vocab_data = ragged_factory_ops.constant([[203], [1729, 203]])
vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
layer = get_layer_class()()
layer.adapt(vocab_dataset)
expected_vocabulary = [0, -1, 203, 1729]
self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())
def test_sparse_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 32], dtype=np.int64),
dense_shape=[3, 4])
expected_indices = [[0, 0], [1, 2]]
expected_values = [5, 1]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
layer = get_layer_class()(max_values=None)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = ragged_factory_ops.constant([[10, 11, 13], [13, 12, 10, 42]],
dtype=np.int64)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, ragged=True)
layer = get_layer_class()(max_values=None)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_int_generator_dataset(self):
def word_gen():
for _ in itertools.count(1):
yield random.randint(0, 100)
ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.int64,
tensor_shape.TensorShape([]))
batched_ds = ds.take(2)
input_t = keras.Input(shape=(), dtype=dtypes.int64)
layer = get_layer_class()(
max_values=10, num_oov_indices=0, mask_value=None, oov_value=None)
_ = layer(input_t)
layer.adapt(batched_ds)
@keras_parameterized.run_all_keras_modes
class IntegerLookupOutputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_int_output(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()()
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_output_shape(self):
input_data = keras.Input(shape=(4,), dtype=dtypes.int64)
layer = get_layer_class()(max_values=None, num_oov_indices=1)
int_data = layer(input_data)
self.assertAllEqual(int_data.shape[1:], input_data.shape[1:])
def test_int_output_no_reserved_zero(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()(max_values=None, mask_value=None)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_explicit_vocab(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()(
vocabulary=vocab_data,
max_values=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_inverse_output(self):
vocab_data = [0, -1, 42, 1138, 725, 1729]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 1]])
expected_output = np.array([[42, 1138, 725, 1729], [1729, 725, 42, -1]])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()(invert=True)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_forward_backward_output(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = np.array([[42, 1138, 725, 1729], [1729, 725, 42, -1]])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()()
inverse_layer = get_layer_class()()
layer.set_vocabulary(vocab_data)
inverse_layer = get_layer_class()(
vocabulary=layer.get_vocabulary(), invert=True)
int_data = layer(input_data)
inverse_data = inverse_layer(int_data)
model = keras.Model(inputs=input_data, outputs=inverse_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes
class IntegerLookupVocabularyTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(str(vocab) + "\n")
writer.flush()
writer.close()
return vocab_path
def test_int_output_explicit_vocab(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_get_vocab_returns_int(self):
vocab_data = [42, 1138, 725, 1729]
expected_vocab = [0, -1, 42, 1138, 725, 1729]
layer = get_layer_class()(vocabulary=vocab_data)
layer_vocab = layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], np.int64)
def test_int_output_explicit_vocab_from_file(self):
vocab_list = [42, 1138, 725, 1729]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()(vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_non_unique_vocab_fails(self):
vocab_data = [42, 1138, 725, 1729, 1729]
with self.assertRaisesRegex(ValueError, ".*repeated term.*1729.*"):
_ = get_layer_class()(vocabulary=vocab_data)
def test_non_unique_vocab_from_file_fails(self):
vocab_list = [42, 1138, 725, 1729, 42]
vocab_path = self._write_to_temp_file("repeat_vocab_file", vocab_list)
with self.assertRaisesRegex(ValueError, ".*repeated term.*42.*"):
_ = get_layer_class()(vocabulary=vocab_path)
@keras_parameterized.run_all_keras_modes(always_skip_eager=True)
class IntegerLookupSaveableTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_ops_are_not_added_with_multiple_get_set_weights(self):
vocab_data = [42, 1138, 725, 1729]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()(max_values=10)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
weights = model.get_weights()
model.set_weights(weights)
keras.backend.get_session().graph.finalize()
weights = model.get_weights()
model.set_weights(weights)
def test_layer_saving_with_h5(self):
vocab_data = [42, 1138, 725, 1729]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()(max_values=10)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
path = os.path.join(self.get_temp_dir(), "model")
with self.assertRaisesRegex(NotImplementedError,
"Save or restore weights that is not.*"):
save.save_model(model, path, save_format="h5")
@keras_parameterized.run_all_keras_modes
class IntegerLookupErrorTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_too_long_vocab_fails_in_single_setting(self):
vocab_data = [42, 1138, 725, 1729]
layer = get_layer_class()(max_values=4, num_oov_indices=1)
with self.assertRaisesRegex(ValueError,
"vocabulary larger than the maximum vocab.*"):
layer.set_vocabulary(vocab_data)
def test_zero_max_values_fails(self):
with self.assertRaisesRegex(ValueError, ".*max_values.*"):
_ = get_layer_class()(max_values=0, num_oov_indices=1)
@keras_parameterized.run_all_keras_modes
class IntegerLookupSavingTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_vocabulary_persistence_across_saving(self):
vocab_data = [42, 1138, 725, 1729]
input_array = np.array([[42, 1138, 725, 1729], [1729, 725, 42, 203]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = get_layer_class()(max_values=None, num_oov_indices=1)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
# TODO(b/149526183): Can't clear session when TF2 is disabled.
if tf2.enabled():
keras.backend.clear_session()
loaded_model = keras.models.load_model(
output_path, custom_objects={"IntegerLookup": get_layer_class()})
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mariotristan/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/_common_error.py | 13 | 1505 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.common import (
AzureHttpError,
)
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_ASYNC_OP_FAILURE = 'Asynchronous operation did not succeed.'
_ERROR_ASYNC_OP_TIMEOUT = 'Timed out waiting for async operation to complete.'
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
message = str(http_error)
if http_error.respbody is not None:
message += '\n' + http_error.respbody.decode('utf-8-sig')
raise AzureHttpError(message, http_error.status)
def _validate_not_none(param_name, param):
if param is None:
raise ValueError(_ERROR_VALUE_NONE.format(param_name))
| apache-2.0 |
zoofIO/flexx | flexxamples/howtos/flask_server.py | 2 | 2976 | """
Example showing an implementation of a flask server serving a flexx application.
If assets are needed (jpg, files, etc.) they can be placed a folder called static
and accessed through each flexx blueprints (e.g. http://my_flexx/picture.jpg). The
name of that folder can be changed when registering the blueprint.
All functions needed for the implementation are found in flx_flask. More help can
be found in flexx/app/_flaskhelpers.py.
"""
from flask import Flask, current_app, url_for
from flask_sockets import Sockets
app = Flask(__name__)
from flexx import ui, flx, flx_flask
######################## The flexx application #########################
class Example(flx.Widget):
def init(self):
content = "# Welcome\n\n" \
"This flexx app is served within flask! "
ui.Markdown(content=content, style='background:#EAECFF;')
flx_flask.serve(Example)
@app.route("/")
def site_map(): # list available applications and URLs
"""
This function lists all the URLs server by the flask application
including the flexx application that have been registered.
"""
def has_no_empty_params(rule):
defaults = rule.defaults if rule.defaults is not None else ()
arguments = rule.arguments if rule.arguments is not None else ()
return len(defaults) >= len(arguments)
links = []
for rule in current_app.url_map.iter_rules():
# Filter out rules we can't navigate to in a browser
# and rules that require parameters
if "GET" in rule.methods and has_no_empty_params(rule):
url = url_for(rule.endpoint, **(rule.defaults or {}))
links.append((url, rule.endpoint))
# links is now a list of url, endpoint tuples
html = ["<h> URLs served by this server </h>", "<ul>"]
for link in links:
html.append(f'<li><a href="{link[0]}">{link[1]}</a></li>')
html.append("</ul>")
return '\n'.join(html)
@app.route('/favicon.ico')
def ico_file():
return "None"
####################### Registration of blueprints #####################
sockets = Sockets(app) # keep at the end
flx_flask.register_blueprints(app, sockets, static_folder='static')
####################### Start flexx in thread #####################
flx_flask.start_thread()
######### Start flask server (using gevent that supports web sockets) #########
if __name__ == "__main__":
@app.errorhandler(Exception)
def internal_error(e):
import traceback; traceback.print_exc() # get the trace stack
err_str = str(traceback.format_exc()) # to get the string
err_str = err_str.replace("\n", "<br>")
return "<h3>" + str(e) + "</h3><br>" + err_str
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
def RunServer():
server = pywsgi.WSGIServer(('127.0.0.1', 5000), app, handler_class=WebSocketHandler)
print("Server Started!")
server.serve_forever()
RunServer()
| bsd-2-clause |
rhololkeolke/apo-website | src/werkzeug/debug/tbtools.py | 74 | 16019 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
import os
import sys
import inspect
import traceback
import codecs
from tokenize import TokenError
from werkzeug.utils import cached_property, escape
from werkzeug.debug.console import Console
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_line_re = re.compile(r'^(.*?)$(?m)')
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
UTF8_COOKIE = '\xef\xbb\xbf'
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css" type="text/css">
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
SECRET = "%(secret)s";
</script>
</head>
<body>
<div class="debugger">
'''
FOOTER = u'''\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
</body>
</html>
'''
PAGE_HTML = HEADER + u'''\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="%(lodgeit_url)s" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into LodgeIt:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
''' + FOOTER + '''
<!--
%(plaintext_cs)s
-->
'''
CONSOLE_HTML = HEADER + u'''\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
''' + FOOTER
SUMMARY_HTML = u'''\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
'''
FRAME_HTML = u'''\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<pre>%(current_line)s</pre>
</div>
'''
SOURCE_TABLE_HTML = u'<table class=source>%s</table>'
SOURCE_LINE_HTML = u'''\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
'''
def render_console_html(secret):
return CONSOLE_HTML % {
'evalex': 'true',
'console': 'true',
'title': 'Console',
'secret': secret,
'traceback_id': -1
}
def get_current_traceback(ignore_system_exceptions=False,
show_hidden_frames=False, skip=0):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
raise
for x in xrange(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ('lineno', 'code', 'in_frame', 'current')
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
def classes(self):
rv = ['line']
if self.in_frame:
rv.append('in-frame')
if self.current:
rv.append('current')
return rv
classes = property(classes)
def render(self):
return SOURCE_LINE_HTML % {
'classes': u' '.join(self.classes),
'lineno': self.lineno,
'code': escape(self.code)
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
if not isinstance(exc_type, str):
exception_type = exc_type.__name__
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
exception_type = exc_type.__module__ + '.' + exception_type
else:
exception_type = exc_type
self.exception_type = exception_type
# we only add frames to the list that are not hidden. This follows
# the the magic variables as defined by paste.exceptions.collector
self.frames = []
while tb:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
if not self.frames:
return
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ('before', 'before_and_this'):
new_frames = []
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
is_syntax_error = property(is_syntax_error)
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
return ''.join(buf).strip().decode('utf-8', 'replace')
exception = property(exception)
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.encode('utf-8', 'replace').rstrip() + '\n'
logfile.write(tb)
def paste(self, lodgeit_url):
"""Create a paste and return the paste id."""
from xmlrpclib import ServerProxy
srv = ServerProxy('%sxmlrpc/' % lodgeit_url)
return srv.pastes.newPaste('pytb', self.plaintext, '', '', '', True)
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ''
description = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = u'Syntax Error'
else:
title = u'Traceback <em>(most recent call last)</em>:'
for frame in self.frames:
frames.append(u'<li%s>%s' % (
frame.info and u' title="%s"' % escape(frame.info) or u'',
frame.render()
))
if self.is_syntax_error:
description_wrapper = u'<pre class=syntaxerror>%s</pre>'
else:
description_wrapper = u'<blockquote>%s</blockquote>'
return SUMMARY_HTML % {
'classes': u' '.join(classes),
'title': title and u'<h3>%s</h3>' % title or u'',
'frames': u'\n'.join(frames),
'description': description_wrapper % escape(self.exception)
}
def render_full(self, evalex=False, lodgeit_url=None,
secret=None):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'lodgeit_url': escape(lodgeit_url),
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': self.render_summary(include_title=False),
'plaintext': self.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'secret': secret
}
def generate_plaintext_traceback(self):
"""Like the plaintext attribute but returns a generator"""
yield u'Traceback (most recent call last):'
for frame in self.frames:
yield u' File "%s", line %s, in %s' % (
frame.filename,
frame.lineno,
frame.function_name
)
yield u' ' + frame.current_line.strip()
yield self.exception
def plaintext(self):
return u'\n'.join(self.generate_plaintext_traceback())
plaintext = cached_property(plaintext)
id = property(lambda x: id(x))
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in ('.pyo', '.pyc'):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = fn
self.module = self.globals.get('__name__')
self.loader = self.globals.get('__loader__')
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get('__traceback_hide__', False)
info = self.locals.get('__traceback_info__')
if info is not None:
try:
info = unicode(info)
except UnicodeError:
info = str(info).decode('utf-8', 'replace')
self.info = info
def render(self):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
'id': self.id,
'filename': escape(self.filename),
'lineno': self.lineno,
'function_name': escape(self.function_name),
'current_line': escape(self.current_line.strip())
}
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, 'co_firstlineno'):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + '\n' for x
in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno:lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def render_source(self):
"""Render the sourcecode."""
return SOURCE_TABLE_HTML % u'\n'.join(line.render() for line in
self.get_annotated_lines())
def eval(self, code, mode='single'):
"""Evaluate code in the context of the frame."""
if isinstance(code, basestring):
if isinstance(code, unicode):
code = UTF8_COOKIE + code.encode('utf-8')
code = compile(code, '<interactive>', mode)
if mode != 'exec':
return eval(code, self.globals, self.locals)
exec code in self.globals, self.locals
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, 'get_source'):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, 'get_source_by_code'):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = file(self.filename)
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, unicode):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = 'utf-8'
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _line_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
try:
codecs.lookup(charset)
except LookupError:
charset = 'utf-8'
return source.decode(charset, 'replace').splitlines()
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u''
@cached_property
def console(self):
return Console(self.globals, self.locals)
id = property(lambda x: id(x))
| bsd-3-clause |
lhupfeldt/jenkinsflow | cli/set_build_description.py | 1 | 1855 | # Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import click
from ..utils import set_build_description as usbd
@click.command()
@click.option('--description', help="The description to set on the build")
@click.option('--replace/--no-replace', default=False, help="Replace existing description, if any, instead of appending.")
@click.option('--separator', default='\n', help="A separator to insert between any existing description and the new 'description' if 'replace' is not specified.")
@click.option('--username', help="User Name for Jenkin authentication with secured Jenkins")
@click.option('--password', help="Password of Jenkins User")
@click.option('--build-url', help='Build URL', envvar='BUILD_URL')
@click.option('--job-name', help='Job Name', envvar='JOB_NAME')
@click.option('--build-number', help="Build Number", type=click.INT, envvar='BUILD_NUMBER')
@click.option(
'--direct-url',
default=None,
help="Jenkins URL - preferably non-proxied. If not specified, the value of JENKINS_URL or HUDSON_URL environment variables will be used.")
def set_build_description(description, replace, separator, username, password, build_url, job_name, build_number, direct_url):
"""Utility to set/append build description on a job build.
When called from a Jenkins job you can leave out the '--build-url', '--job-name' and '--build-number' arguments, the BUILD_URL env variable will be used.
"""
# %(file)s --job-name <job_name> --build-number <build_number> --description <description> [--direct-url <direct_url>] [--replace | --separator <separator>] [(--username <user_name> --password <password>)]
usbd.set_build_description(description, replace, separator, username, password, build_url, job_name, build_number, direct_url)
| bsd-3-clause |
stefanfoulis/django-filer | filer/tests/dump.py | 2 | 5446 | #-*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import os
import tempfile
from django.conf import settings
from django.core.files import File as DjangoFile
from django.core.management import call_command
from django.test import TestCase
from django.utils.six import StringIO
from .. import settings as filer_settings
from ..models import Folder
from ..models.filemodels import File
from ..settings import FILER_IMAGE_MODEL
from ..tests.helpers import (
SettingsOverride,
create_folder_structure,
create_image,
create_superuser,
)
from ..utils.loader import load_model
Image = load_model(FILER_IMAGE_MODEL)
class DumpDataTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.img = create_image()
self.image_name = 'test_file.jpg'
self.filename = os.path.join(settings.FILE_UPLOAD_TEMP_DIR, self.image_name)
self.img.save(self.filename, 'JPEG')
def tearDown(self):
os.remove(self.filename)
for f in File.objects.all():
f.delete()
pass
def create_filer_image(self, folder=None):
file_obj = DjangoFile(open(self.filename, 'rb'), name=self.image_name)
image = Image.objects.create(owner=self.superuser,
original_filename=self.image_name,
file=file_obj, folder=folder)
return image
def create_filer_file(self, folder=None):
file_obj = DjangoFile(open(self.filename, 'rb'), name=self.image_name)
fileobj = File.objects.create(owner=self.superuser,
original_filename=self.image_name,
file=file_obj, folder=folder)
return fileobj
def test_dump_data_base(self):
"""
Testing the case of dump full and empty dataset
"""
fileobj = self.create_filer_file()
jdata, jdata2 = StringIO(), StringIO()
call_command("dumpdata", "filer", stdout=jdata)
fileobj.delete()
call_command("dumpdata", "filer", stdout=jdata2)
data = json.loads(jdata.getvalue())
data2 = json.loads(jdata2.getvalue())
self.assertEqual(len(data), 1)
self.assertEqual(len(data2), 0)
def test_dump_load_data(self):
"""
Testing the dump / load with no dump of file content data
"""
# Initialize the test data
create_folder_structure(1,1)
fileobj = self.create_filer_file(Folder.objects.all()[0])
self.assertEqual(Image.objects.count(), 0)
image = self.create_filer_image()
image.save()
image_size = image._width, image._height
self.assertEqual(Image.objects.count(), 1)
jdata = StringIO()
# Dump the current data
fobj = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
call_command("dumpdata", "filer", stdout=jdata, indent=3)
# Delete database and filesystem data
complete = os.path.join(fileobj.file.storage.location, fileobj.path)
os.unlink(complete)
fileobj.delete()
# Dump data to json file
fobj.write(jdata.getvalue().encode('utf-8'))
fobj.seek(0)
# Load data back
call_command("loaddata", fobj.name, stdout=jdata)
# Database data is restored
self.assertEqual(Folder.objects.all().count(), 1)
self.assertEqual(File.objects.all().count(), 2)
self.assertEqual(File.objects.all()[0].original_filename, self.image_name)
self.assertEqual(Image.objects.count(), 1)
fileobj = File.objects.all()[0]
image = Image.objects.all()[0]
self.assertEqual(image._width, image_size[0])
self.assertEqual(image._height, image_size[1])
complete = os.path.join(fileobj.file.storage.location, fileobj.path)
# Filesystem data is not
self.assertFalse(os.path.exists(complete))
def test_dump_load_data_content(self):
"""
Testing the dump / load with full dump of file content data
"""
with SettingsOverride(filer_settings, FILER_DUMP_PAYLOAD=True):
# Initialize the test data
create_folder_structure(1,1)
fileobj = self.create_filer_file(Folder.objects.all()[0])
jdata = StringIO()
# Dump the current data
fobj = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
call_command("dumpdata", "filer", stdout=jdata, indent=3)
# Delete database and filesystem data and
complete = os.path.join(fileobj.file.storage.location, fileobj.path)
os.unlink(complete)
fileobj.delete()
# Dump data to json file
fobj.write(jdata.getvalue().encode('utf-8'))
fobj.seek(0)
# Load data back
call_command("loaddata", fobj.name, stdout=jdata)
# Database data is restored
self.assertEqual(Folder.objects.all().count(), 1)
self.assertEqual(File.objects.all().count(), 1)
self.assertEqual(File.objects.all()[0].original_filename, self.image_name)
fileobj = File.objects.all()[0]
complete = os.path.join(fileobj.file.storage.location, fileobj.path)
# Filesystem data too!
self.assertTrue(os.path.exists(complete))
| bsd-3-clause |
godbyk/vrjuggler-upstream-old | build_windows.py | 5 | 114492 | #python
# ************** <auto-copyright.pl BEGIN do not edit this line> **************
#
# VR Juggler is (C) Copyright 1998-2011 by Iowa State University
#
# Original Authors:
# Allen Bierbaum, Christopher Just,
# Patrick Hartling, Kevin Meinert,
# Carolina Cruz-Neira, Albert Baker
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# *************** <auto-copyright.pl END do not edit this line> ***************
import glob
import os, os.path
import fnmatch
import re
import shutil
import sys
import time
import traceback
import getopt
import subprocess
pj = os.path.join
EXIT_STATUS_SUCCESS = 0
EXIT_STATUS_NO_MSVS = 1
EXIT_STATUS_MISSING_DATA_FILE = 2
EXIT_STATUS_MSVS_START_ERROR = 3
EXIT_STATUS_INVALID_PATH = 4
EXIT_STATUS_MISSING_REQ_VALUE = 5
EXIT_STATUS_UNSUPPORTED_COMPILER = 6
EXIT_STATUS_INVALID_ARGUMENT = 7
gJugglerDir = os.path.dirname(os.path.abspath(sys.argv[0]))
gOptionsFileName = "options.cache"
gBuild64 = False
gUnattended = False
gJobLimit = None
gInstallDebug = False
gValidBuildConfigs = ("ReleaseDLL", "DebugDLL", "DebugRtDll")
gJdomJars = [
'jdom.jar',
'jaxen-core.jar',
'jar',
'jaxen-jdom.jar',
'xerces.jar',
'xml-apis.jar',
'saxpath.jar'
]
gTweekJars = [
'Tweek.jar',
'TweekBeanDelivery.jar',
'TweekBeans.jar',
'TweekEvents.jar',
'TweekNet.jar',
'TweekServices.jar',
'kunststoff-mod.jar'
]
gTweekExtJars = [
'ui.jar',
'wizard.jar'
]
gJcclJars = [
'jccl_config.jar',
'jccl_editors.jar'
]
gJcclRtrcJars = [
'jccl_rtrc.jar'
]
gHaveTk = False
try:
import Tkinter
import tkMessageBox
import tkFileDialog
import threading
gHaveTk = True
except ImportError, ex:
print ex
class BuildOption:
def __init__(self, envVar, desc, defaultValue, isDirectory = True,
required = True):
self.envVar = envVar
self.desc = desc
self.default = defaultValue
self.required = required
self.isDirectory = isDirectory
def detectVisualStudioVersion(reattempt = False):
(cl_stdin, cl_stdout, cl_stderr) = os.popen3('cl')
cl_version_line = cl_stderr.readline()
cl_ver_match = re.compile(r'Compiler Version ((\d+)\.(\d+)\.(\d+))')
ver_string_match = cl_ver_match.search(cl_version_line)
cl_major = 0
cl_minor = 0
if ver_string_match is not None:
cl_major = int(ver_string_match.group(2))
cl_minor = int(ver_string_match.group(3))
if cl_major <= 14:
printStatus("Visual Studio 2005 and older are not supported")
sys.exit(EXIT_STATUS_UNSUPPORTED_COMPILER)
elif cl_major == 15:
vs_ver = '2008'
else:
printStatus("Warning: unrecognized compiler version %s.%s, will treat it like VS2010"
% (cl_major, cl_minor))
vs_ver = '2010'
printStatus("It appears that we will be using Visual Studio %s"%vs_ver)
#printStatus(" compiler version: %s.%s"%(cl_major,cl_minor))
in_status = cl_stdin.close()
out_status = cl_stdout.close()
err_status = cl_stderr.close()
# If there was an error closing any of the streams returned by
# os.popen3(), then the command was not opened correctly. That means
# that CL.EXE is not in the user's path.
if in_status is not None or out_status is not None or err_status is not None:
# If this is not a reattempt to guess the Visual Studio version, then
# be nice and extend the user's path to include their Visual Studio
# installation.
if not reattempt:
printStatus("Visual studio not in path, attempting to find...")
# Common installation directories for Visual Studio.
vs_dirs = [r'C:\Program Files (x86)\Microsoft Visual Studio 10.0',
r'C:\Program Files (x86)\Microsoft Visual Studio 9.0',
r'C:\Program Files\Microsoft Visual Studio 9.0',
r'C:\Program Files\Microsoft Visual Studio 8'
]
for d in vs_dirs:
printStatus("Trying path: %s"%d)
if not os.path.exists(d):
printStatus(" does not exist.")
else:
printStatus(" Using: %s"%d)
printStatus(" " + d)
vs_path = [os.path.join(d, r'Common7\IDE'),
os.path.join(d, r'VC7\BIN'),
os.path.join(d, r'VC\BIN'),
os.path.join(d, r'Common7\Tools'),
os.path.join(d, r'Common7\Tools\bin\prerelease'),
os.path.join(d, r'Common7\Tools\bin')]
path_add = ';'.join(vs_path)
os.environ['PATH'] = path_add + os.pathsep + os.getenv('PATH', '')
# Try again to guess the Visual Studio version.
return detectVisualStudioVersion(True)
# If execution reaches this point, our attempts to guess the
# location of a Visual Studio 7.x installation failed.
noVisualStudioError()
# If this is a reattempt to guess the Visual Studio version, then
# something is wrong with the user's Visual Studio installation.
else:
noVisualStudioError()
return (cl_major, cl_minor)
def chooseVisualStudioDir():
(cl_ver_major, cl_ver_minor) = detectVisualStudioVersion()
needs_upgrade = False
# We do not support Visual Studio 2005 (version 8.0) or older.
if cl_ver_major <= 14:
print "ERROR: Visual C++ 8.0 and older are not supported!"
sys.exit(EXIT_STATUS_UNSUPPORTED_COMPILER)
# Otherwise, we use the solution in the vc9 subtree.
elif cl_ver_major == 15:
vc_dir = 'vc9'
else:
vc_dir = 'vc10'
if cl_ver_major > 16:
# Will need to upgrade the solution
needs_upgrade = True
return (cl_ver_major, cl_ver_minor, vc_dir, needs_upgrade)
def printStatus(msg):
'''
This is a simple wrapper around the standard Python print function.
We will use a wrapper function for key status messages so that they
can be redirected either to the console or to a GUI easily.
'''
print msg
def noVisualStudioError():
print "ERROR: Visual Studio commands are not in your path!"
print "Run vsvars32.bat in this shell or update the %PATH% environment"
print "variable on your system."
sys.exit(EXIT_STATUS_NO_MSVS)
def getCacheFileName():
return os.path.join(gJugglerDir, gOptionsFileName)
def processInput(optionDict, envVar, inputDesc, required = False):
default_value = optionDict[envVar]
if gUnattended:
print ' %s = "%s" (%s)' % (envVar, default_value, inputDesc)
input_str = ''
else:
print " %s [%s]: " % (inputDesc, default_value),
input_str = sys.stdin.readline().strip(" \n")
if input_str == '':
if required and (default_value is None or default_value == ''):
print "ERROR: %s value required" % inputDesc
sys.exit(EXIT_STATUS_MISSING_REQ_VALUE)
else:
value_str = default_value
else:
value_str = input_str
optionDict[envVar] = value_str
os.environ[envVar] = value_str
return value_str
def getDefaultVars(clVerMajor, clVerMinor):
required = []
required.append(BuildOption('BOOST_ROOT',
'Boost C++ installation directory', ''))
required.append(BuildOption('BOOST_VERSION', 'Boost C++ version',
'1_31', False))
required.append(BuildOption('BOOST_INCLUDES',
'Directory containing the Boost C++ header tree',
''))
required.append(BuildOption('CPPDOM_ROOT', 'CppDOM installation directory',
''))
required.append(BuildOption('CPPDOM_INCLUDES',
'Directory containing the CppDOM header tree',
''))
required.append(BuildOption('GMTL_ROOT', 'GMTL installation directory', ''))
required.append(BuildOption('GMTL_INCLUDES',
'Directory containing the GMTL header tree',
''))
optional = []
optional.append(BuildOption('JAVA_HOME', 'Java installation directory',
r'C:\java', required = False))
optional.append(BuildOption('JOGL_HOME', 'Jogl installation directory',
os.getenv('JAVA_HOME', ''), required = False))
optional.append(BuildOption('JAVA3D_HOME', 'Java 3D installation directory',
os.getenv('JAVA_HOME', ''), required = False))
optional.append(BuildOption('OMNIORB_ROOT',
'omniORB installation directory', '',
required = False))
optional.append(BuildOption('PFROOT',
'OpenGL Performer installation directory',
r'C:\Program Files\Silicon Graphics\OpenGL Performer',
required = False))
optional.append(BuildOption('MS_SPEECH_SDK_ROOT',
'Microsoft Speech SDK directory', '',
required = False))
optional.append(BuildOption('VRPN_ROOT', 'VRPN installation directory', '',
required = False))
optional.append(BuildOption('AUDIERE_ROOT',
'Audiere installation directory', '',
required = False))
optional.append(BuildOption('OPENAL_ROOT',
'OpenAL SDK installation directory', '',
required = False))
optional.append(BuildOption('ALUT_ROOT', 'ALUT SDK installation directory',
'', required = False))
optional.append(BuildOption('TRACKD_API_ROOT',
'TrackdAPI installation directory', '',
required = False))
optional.append(BuildOption('FTD2XX_ROOT',
'FTD2XX SDK installation directory', '',
required = False))
optional.append(BuildOption('MOTION_NODE_SDK_ROOT',
'MotionNode SDK installation directory', '',
required = False))
optional.append(BuildOption('DOOZER_ROOT',
'Doozer installation directory', '',
required = False))
optional.append(BuildOption('SDL_ROOT',
'Simple DirectMedia Layer installation directory', '',
required = False))
options = {
'prefix' : r'C:\vrjuggler',
'deps-prefix' : r'C:\vrjuggler-deps'
}
for opt in required + optional:
options[opt.envVar] = os.getenv(opt.envVar, opt.default)
# If there are cached options, read them in.
cache_file = getCacheFileName()
if os.path.exists(cache_file):
execfile(cache_file)
return required, optional, options
def setVars(clVerMajor, clVerMinor):
required, optional, options = getDefaultVars(clVerMajor, clVerMinor)
print "+++ Required Settings"
processInput(options, 'prefix', 'Installation prefix', True)
boost_dir = ''
boost_ver = ''
for opt in required:
result = processInput(options, opt.envVar, opt.desc, opt.required)
# The following is a little hack to get a reasonable default set for
# the BOOST_INCLUDES variable before the user has to enter it manually.
if opt.envVar == 'BOOST_ROOT':
boost_dir = result
elif opt.envVar == 'BOOST_VERSION' and \
options.get('BOOST_INCLUDES', '') == '':
boost_ver = result
options['BOOST_INCLUDES'] = boost_dir + r'\include\boost-' + boost_ver
# The following is a little hack to get a reasonable default set for
# the CPPDOM_INCLUDES variable before the user has to enter it manually.
if opt.envVar == 'CPPDOM_ROOT' and \
options.get('CPPDOM_INCLUDES', '') == '':
options['CPPDOM_INCLUDES'] = os.path.join(result, 'include')
# The following is a little hack to get a reasonable default set for
# the GMTL_INCLUDES variable before the user has to enter it manually.
if opt.envVar == 'GMTL_ROOT' and \
options.get('GMTL_INCLUDES', '') == '':
options['GMTL_INCLUDES'] = os.path.join(result, 'include')
print "+++ Optional Settings"
processInput(options, 'deps-prefix', 'Dependency installation prefix',
False)
for opt in optional:
processInput(options, opt.envVar, opt.desc, opt.required)
postProcessOptions(options)
writeCacheFile(options)
return options
def postProcessOptions(options):
os.environ['instprefix'] = options['prefix'].replace('\\', '\\\\')
# If the %JAVA_HOME% setting is a valid directory, add its bin subdirectory
# to the path.
if os.environ['JAVA_HOME'] != '' and os.path.exists(os.environ['JAVA_HOME']):
jdk_path = os.path.join(os.environ['JAVA_HOME'], 'bin')
os.environ['PATH'] = jdk_path + os.pathsep + os.environ['PATH']
os.environ['JACORB_PATH'] = os.path.join(gJugglerDir, r'external\JacORB')
if (os.environ['OMNIORB_ROOT'] != '' and
os.path.exists(os.environ['OMNIORB_ROOT'])):
# A 64-bit build of omniORB has to have been compiled against a 64-bit
# build of Python. Unfortunately, when omniidl.exe acts as the Python
# interpreter, it doesn't take care of setting PYTHONHOME, and this
# prevents it from being able to find core modules (such as sys.py or
# os.py). We'll assume that a 64-bit Python interpreter is being used
# to run this script and use its installation prefix as PYTHONHOME to
# help out omniidl.exe.
if gBuild64 and not os.environ.has_key('PYTHONHOME'):
os.environ['PYTHONHOME'] = sys.prefix
omni_bin = os.path.join(os.environ['OMNIORB_ROOT'], 'bin')
if os.path.exists(os.path.join(omni_bin, 'omniidl.exe')):
os.environ['OMNIORB_BIN'] = omni_bin
else:
os.environ['OMNIORB_BIN'] = os.path.join(omni_bin, 'x86_win32')
# Extend the path to include omniORB's bin directory.
os.environ['PATH'] = os.environ['OMNIORB_BIN'] + os.pathsep + os.environ['PATH']
omni_lib = os.path.join(os.environ['OMNIORB_ROOT'], 'lib')
if os.getenv('PYTHONPATH', '') != '':
os.environ['PYTHONPATH'] = os.path.join(omni_lib, 'python') + os.pathsep + os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = os.path.join(omni_lib, 'python')
if os.path.exists(os.path.join(omni_lib, 'omnithread.lib')):
os.environ['OMNIORB_LIB'] = omni_lib
else:
os.environ['OMNIORB_LIB'] = os.path.join(omni_lib, 'x86_win32')
omni_glob = os.path.join(os.environ['OMNIORB_LIB'], 'omniORB*_rt.lib')
libs = glob.glob(omni_glob)
omni_ver_re = re.compile(r'omniORB(\d\d\d)_rt.lib')
for l in libs:
match = omni_ver_re.search(l)
if match is not None:
os.environ['OMNIORB_VERSION'] = match.group(1)
break
omnithread_glob = os.path.join(os.environ['OMNIORB_LIB'],
'omnithread*_rt.lib')
libs = glob.glob(omnithread_glob)
omnithread_ver_re = re.compile(r'omnithread(\d\d)_rt.lib')
for l in libs:
match = omnithread_ver_re.search(l)
if match is not None:
os.environ['OMNITHREAD_VERSION'] = match.group(1)
break
# Determine if al.h is in the base include directory or in include\AL.
if os.environ['OPENAL_ROOT'] != '':
if gBuild64:
subdir = 'Win64'
else:
subdir = 'Win32'
lib_dirs = [os.path.join(os.environ['OPENAL_ROOT'], 'lib'),
os.path.join(os.environ['OPENAL_ROOT'], 'libs'),
os.path.join(os.environ['OPENAL_ROOT'], 'libs', subdir)]
for l in lib_dirs:
openal_lib = os.path.join(l, 'OpenAL32.lib')
if os.path.exists(openal_lib):
os.environ['OPENAL_LIB_DIR'] = l
break
header_file = os.path.join(os.environ['OPENAL_ROOT'], 'include', 'AL',
'al.h')
# If the file is include\AL\al.h, then set the environment variable
# %HAVE_AL_AL_H% to the preprocessor symbol 'HAVE_AL_AL_H'.
# See modules/sonix/plugins/OpenAL/OpenALSoundImplementation.cpp.
if os.path.exists(header_file):
os.environ['HAVE_AL_AL_H'] = 'HAVE_AL_AL_H'
# Determine if alut.h is in the base include directory or in include\AL.
openal_envs = ['ALUT_ROOT', 'OPENAL_ROOT']
for env in openal_envs:
if os.environ[env] != '':
header_file = os.path.join(os.environ[env], 'include', 'AL', 'alut.h')
# If the file is include\AL\alut.h, then set the environment variable
# %HAVE_AL_ALUT_H% to the preprocessor symbol 'HAVE_AL_ALUT_H'.
# See modules/sonix/plugins/OpenAL/OpenALSoundImplementation.cpp.
if os.path.exists(header_file):
os.environ['HAVE_AL_ALUT_H'] = 'HAVE_AL_ALUT_H'
# If the ALUT installation directory is not set, then assume that it is the
# same as the OpenAL installation directory.
if os.environ['ALUT_ROOT'] == '' and os.environ['OPENAL_ROOT'] != '':
os.environ['ALUT_ROOT'] = os.environ['OPENAL_ROOT']
if os.environ['TRACKD_API_ROOT'] != '' and os.path.exists(os.environ['TRACKD_API_ROOT']):
trackdapi_dir = os.environ['TRACKD_API_ROOT']
trackdapi_incdir = os.path.join(trackdapi_dir, 'include')
# Figure out which Trackd API header to include.
headers = ['trackdAPI.h', 'trackdAPI_CC.h']
for h in headers:
if os.path.exists(os.path.join(trackdapi_incdir, h)):
os.environ['GADGET_TRACKD_API_H'] = '<%s>' % h
break
# Determine if al.h is in the base include directory or in include\AL.
if os.environ['SDL_ROOT'] != '':
sdl2_path = os.path.join(os.environ['SDL_ROOT'], 'include', 'SDL2')
if os.path.exists(sdl2_path):
os.environ['HAVE_SDL2'] = 'HAVE_SDL2'
os.environ['SDL_LIB'] = 'SDL2.lib'
else:
os.environ['SDL_LIB'] = 'SDL.lib'
def writeCacheFile(optionDict):
cache_file = open(getCacheFileName(), 'w')
for k, v in optionDict.iteritems():
output = "options['%s'] = r'%s'\n" % (k, v)
cache_file.write(output)
cache_file.close()
def getBoostVersion():
boost_ver_re = re.compile(r'#define\s+BOOST_LIB_VERSION\s+"(.+)"')
info = ('0', '0', '0')
if os.environ.has_key('BOOST_INCLUDES'):
ver_file = open(os.path.join(os.environ['BOOST_INCLUDES'], 'boost',
'version.hpp'))
lines = ver_file.readlines()
ver_file.close()
for l in lines:
match = boost_ver_re.search(l)
if match is not None:
ver_info = match.group(1).split('_')
info = tuple(ver_info)
break
return info
def buildVersion(inputFile, exps):
ver_file = open(inputFile)
lines = ver_file.readlines()
ver_file.close()
nums = [0 for e in exps]
for l in lines:
i = 0
while i < len(exps):
match = exps[i].search(l)
if match is not None:
nums[i] = match.group(1)
break
i = i + 1
return tuple(nums)
def getCppDOMVersion():
exps = [
re.compile(r'#define\s+CPPDOM_VERSION_MAJOR\s+(\d+)'),
re.compile(r'#define\s+CPPDOM_VERSION_MINOR\s+(\d+)'),
re.compile(r'#define\s+CPPDOM_VERSION_PATCH\s+(\d+)')
]
info = (0, 0, 0)
if os.environ.has_key('CPPDOM_INCLUDES'):
info = buildVersion(os.path.join(os.environ['CPPDOM_INCLUDES'], 'cppdom',
'version.h'),
exps)
return info
def getGMTLVersion():
exps = [
re.compile(r'#define\s+GMTL_VERSION_MAJOR\s+(\d+)'),
re.compile(r'#define\s+GMTL_VERSION_MINOR\s+(\d+)'),
re.compile(r'#define\s+GMTL_VERSION_PATCH\s+(\d+)')
]
nums = [0, 0, 0]
if os.environ.has_key('GMTL_INCLUDES'):
info = buildVersion(os.path.join(os.environ['GMTL_INCLUDES'], 'gmtl',
'Version.h'),
exps)
return info
def updateVersions(vcDir, options):
class JugglerModule:
def __init__(self, srcDir, vcDir, projDir, versionEnvVar, substVars,
genFiles = None):
self.source_dir = os.path.join(gJugglerDir, srcDir)
self.version_params = os.path.join(self.source_dir,
'Makefile.inc.in')
self.version_file = os.path.join(self.source_dir, 'VERSION')
self.version_env_var = versionEnvVar
self.version_env_var_dot = versionEnvVar + '_DOT'
self.subst_vars = substVars
self.param_files = []
self.proj_dir = os.path.join(gJugglerDir, vcDir, projDir)
if genFiles is not None:
for f in genFiles:
output = os.path.join(self.proj_dir, f[0])
if len(f) == 1 or f[1] is None:
template = os.path.join(self.source_dir, f[0] + '.in')
else:
template = f[1]
self.param_files.append((output, template))
def updateParamFiles(self):
for (output, template) in self.param_files:
if os.path.exists(output):
mtime = os.path.getmtime
# This test to determine if the module's param header needs to
# be regenerated is equivalent to that used by the UNIX build
# system.
if mtime(self.version_file) > mtime(output) or \
mtime(template) > mtime(output):
self.__genParamFile(output, template)
else:
self.__genParamFile(output, template)
def getVersion(self, joinStr, versionInfo = None):
if versionInfo is None:
(version, major, minor, patch, build) = self.__getVersionInfo()
else:
(version, major, minor, patch, build) = versionInfo
return '%d%s%d%s%d' % (major, joinStr, minor, joinStr, patch)
def setVersionEnvVar(self):
vi = self.__getVersionInfo()
os.environ[self.version_env_var] = self.getVersion('_', vi)
os.environ[self.version_env_var_dot] = self.getVersion('.', vi)
def removeOldVersions(self):
output_files = []
for ext in ['lib', 'dll', 'exp', 'ilk', 'pdb']:
output_files += glob.glob(os.path.join(self.proj_dir, '*', '*',
'*.' + ext))
mtime = os.path.getmtime
for f in output_files:
if mtime(self.version_file) > mtime(f):
try:
os.remove(f)
except:
print "Failed to remove", f
version_re = re.compile(r'((\d+)\.(\d+)\.(\d+)-(\d+))\s')
branch_re = re.compile(r'BRANCH\s*=\s*(.+)')
canon_name_re = re.compile(r'CANON_NAME\s*=\s*(\S.+)')
subst_re = re.compile(r'@(\w+)@')
zero_strip_re = re.compile(r'^0*([^0]\d+)')
def __getVersionInfo(self):
ver_file = open(self.version_file)
cur_ver = ver_file.readline()
ver_file.close()
ver_match = self.version_re.match(cur_ver)
version = ver_match.group(1)
major = int(ver_match.group(2))
minor = int(ver_match.group(3))
patch = int(ver_match.group(4))
build = int(ver_match.group(5))
return (version, major, minor, patch, build)
def __genParamFile(self, output, template):
(version, major, minor, patch, build) = self.__getVersionInfo()
# NOTE: This will not always be identical to the UNIX version because
# Python does not have %e as a time formatting directive.
date = time.strftime('%b %d, %Y %H:%M:%S')
canon_name = ''
branch = ''
param_file = open(self.version_params, 'r')
params = param_file.readlines()
param_file.close()
# This is basically a poor man's grep. Can this be done better?
for line in params:
match = self.branch_re.match(line)
if match is not None:
branch = match.group(1)
continue
match = self.canon_name_re.match(line)
if match is not None:
canon_name = match.group(1)
continue
version_number = '%03d%03d%03d' % (major, minor, patch)
version_string = "\"v%s '%s' (BOOST) %s %s\"" % \
(version, canon_name, branch, date)
# Strip leading zeroes from version_number. Is there an easier way
# to do this?
version_number = self.zero_strip_re.match(version_number).group(1)
subst_vars = self.subst_vars
subst_vars['VER_NUMBER'] = version_number
subst_vars['MAJOR_VER_NUMBER'] = str(major)
subst_vars['MAJOR_VERSION'] = str(major)
subst_vars['MINOR_VER_NUMBER'] = str(minor)
subst_vars['MINOR_VERSION'] = str(minor)
subst_vars['PATCH_VER_NUMBER'] = str(patch)
subst_vars['MICRO_VERSION'] = str(patch)
subst_vars['BUILD_VER_NUMBER'] = str(build)
subst_vars['VER_STRING'] = version_string
subst_vars['VERSION_DOT'] = '%d.%d.%d' % (major, minor, patch)
subst_vars['SUBSYSTEM'] = 'BOOST'
subst_vars['PLATFORM'] = 'Windows'
subst_vars['data_subdir'] = 'share'
subst_vars['USE_GCC'] = 'no'
subst_vars['includedir'] = r'"${prefix}\include"'
subst_vars['libdir'] = r'${exec_prefix}\lib'
try:
input_file = open(template, 'r')
input_lines = input_file.readlines()
input_file.close()
for i in xrange(len(input_lines)):
input_lines[i] = \
self.subst_re.sub(lambda m: subst_vars.get(m.group(1), ''),
input_lines[i])
printStatus("Generating updated " + output)
param_header = open(output, 'w')
param_header.writelines(input_lines)
param_header.close()
except IOError, ex:
printStatus("ERROR: Could not read from %s" % template)
printStatus(ex)
printStatus("Cannot continue; exiting with error status.")
sys.exit(EXIT_STATUS_MISSING_DATA_FILE)
mods = []
rt_part = ""
vpr_subst_vars = {}
vpr_subst_vars['vpr_cxxflags'] = '/DBOOST_ALL_DYN_LINK /DCPPDOM_DYN_LINK /EHsc /GR'
vpr_subst_vars['vpr_ldflags'] = r'/libpath:"$libdir"'
vpr_subst_vars['vpr_libs'] = ''
#This is commented out to change windows platforms to use Boost.ASIO
#vpr_subst_vars['subsystem_libs'] = 'libnspr4.lib libplc4.lib'
vpr_subst_vars['subsystem_libs'] = ''
vpr_subst_vars['BOOST_ROOT'] = r'${fp_file_cwd}\..\..'
vpr_subst_vars['BOOST_VERSION_DOT'] = '.'.join(getBoostVersion())
vpr_subst_vars['BOOST_INCLUDES'] = r'/I"${prefix}\include"'
vpr_subst_vars['BOOST_LDFLAGS'] = r'/libpath:"${prefix}\lib"'
vpr_subst_vars['CPPDOM_VERSION'] = '.'.join(getCppDOMVersion())
vpr_subst_vars['LIBDIR_NAME'] = 'lib'
vpr_module = JugglerModule(r'modules\vapor', vcDir, 'VPR', 'VPR_VERSION',
vpr_subst_vars,
[(r'vpr\vprParam.h',), ('vpr.fpc',),
('boost.fpc',), ('boost_system.fpc',),
('boost_filesystem.fpc',),
('boost_signals.fpc',),
(r'vpr\version.rc',
os.path.join(gJugglerDir, 'version.rc.in'))])
mods.append(vpr_module)
# XXX: These are pretty weak assumptions.
tweek_have_cxx = options.get('OMNIORB_ROOT', '') != ''
tweek_have_java = options.get('JAVA_HOME', '') != ''
tweek_jars = []
hack_jars = ['looks.jar', 'liquidlnf.jar', 'metouia.jar']
for j in gTweekJars + hack_jars + gJdomJars:
tweek_jars.append(os.path.join('${prefix}', 'share', 'tweek', 'java', j))
tweek_ext_jars = []
for j in gTweekExtJars:
tweek_ext_jars.append(os.path.join('${prefix}', 'share', 'tweek',
'java', j))
tweek_subst_vars = {}
tweek_subst_vars['tweek_cxxflags'] = '/EHsc /GR'
if tweek_have_cxx:
tweek_subst_vars['tweek_cxxflags'] += ' /DTWEEK_HAVE_CXX /D__WIN32__=1 /D__x86__=1 /D__NT__=1 /D__OSVERSION__=5 /DUSE_core_stub_in_nt_dll /DUSE_core_stub_in_nt_dll_NOT_DEFINED_Subject /I"$prefix\\include\\tweek\\idl"'
tweek_subst_vars['tweek_extra_libs'] = \
'omnithread%s_rt.lib omniORB%s_rt.lib omniDynamic%s_rt.lib' % \
(os.environ['OMNITHREAD_VERSION'], os.environ['OMNIORB_VERSION'],
os.environ['OMNIORB_VERSION'])
tweek_subst_vars['tweek_ldflags'] = r'/libpath:"$libdir"'
tweek_subst_vars['tweek_libs'] = ''
tweek_subst_vars['tweek_idlflags_java'] = r'-I"$prefix\include"'
tweek_subst_vars['tweek_idlflags_cxx'] = r'-bcxx -Wbh=.h,s=.cpp -I"$prefix\include"'
tweek_subst_vars['tweek_idl_inc_flag_java'] = '-I'
tweek_subst_vars['tweek_idl_inc_flag_cxx'] = '-I'
tweek_subst_vars['tweek_idl_inc_flag_python'] = '-I'
tweek_subst_vars['tweek_idlgendir_java'] = '-d '
tweek_subst_vars['tweek_idlgendir_cxx'] = '-C'
tweek_subst_vars['tweek_idlgendir_python'] = '-C'
tweek_subst_vars['tweek_java_api_jars'] = ';'.join(tweek_jars)
tweek_subst_vars['tweek_ext_jars'] = ';'.join(tweek_ext_jars)
#tweek_subst_vars['CXX_ORB_DEPS'] = 'omniORB4 omnithread3 omniDynamic4'
tweek_subst_vars['VPR_VERSION'] = vpr_module.getVersion('.')
if tweek_have_cxx:
tweek_subst_vars['BUILD_CXX'] = 'Y'
else:
tweek_subst_vars['BUILD_CXX'] = 'N'
if tweek_have_java:
tweek_subst_vars['BUILD_JAVA'] = 'Y'
else:
tweek_subst_vars['BUILD_JAVA'] = 'N'
tweek_subst_vars['BUILD_PYTHON_IDL'] = 'N'
tweek_subst_vars['LIBDIR_NAME'] = 'lib'
tweek_module = JugglerModule(r'modules\tweek', vcDir, 'Tweek_CXX',
'TWEEK_VERSION', tweek_subst_vars,
[(r'tweek\tweekParam.h',), ('tweek.fpc',),
('tweek-java.fpc',), ('tweek-python.fpc',),
(r'tweek\version.rc',
os.path.join(gJugglerDir, 'version.rc.in'))])
mods.append(tweek_module)
jccl_jars = []
for j in gJcclJars + gJcclRtrcJars:
jccl_jars.append(os.path.join('${prefix}', 'share', 'jccl', 'java', j))
jccl_subst_vars = {}
jccl_subst_vars['jccl_cxxflags'] = '/EHsc /GR'
jccl_subst_vars['jccl_ldflags'] = r'/libpath:"$libdir"'
jccl_subst_vars['jccl_libs'] = ''
jccl_subst_vars['BUILD_CXX'] = tweek_subst_vars['BUILD_CXX']
jccl_subst_vars['BUILD_JAVA'] = tweek_subst_vars['BUILD_JAVA']
jccl_subst_vars['jccl_java_api_jars'] = ';'.join(jccl_jars)
jccl_subst_vars['VPR_VERSION'] = vpr_module.getVersion('.')
jccl_subst_vars['LIBDIR_NAME'] = 'lib'
jccl_module = JugglerModule(r'modules\jackal', vcDir, 'JCCL', 'JCCL_VERSION',
jccl_subst_vars,
[(r'jccl\jcclParam.h',
os.path.join(gJugglerDir,
r'modules\jackal\common\jccl\jcclParam.h.in')),
('jccl.fpc',),
(r'jccl\version.rc',
os.path.join(gJugglerDir, 'version.rc.in'))
])
mods.append(jccl_module)
snx_subst_vars = {}
snx_subst_vars['snx_cxxflags'] = '/EHsc /GR'
snx_subst_vars['snx_ldflags'] = r'/libpath:"$libdir"'
snx_subst_vars['snx_libs'] = ''
snx_subst_vars['VPR_VERSION'] = vpr_module.getVersion('.')
snx_subst_vars['MIN_GMTL_VERSION'] = '.'.join(getGMTLVersion())
snx_subst_vars['LIBDIR_NAME'] = 'lib'
snx_module = JugglerModule(r'modules\sonix', vcDir, 'Sonix', 'SNX_VERSION',
snx_subst_vars,
[(r'snx\snxParam.h',), ('sonix.fpc',),
(r'snx\version.rc',
os.path.join(gJugglerDir, 'version.rc.in'))])
mods.append(snx_module)
gadget_subst_vars = {}
gadget_subst_vars['gadget_cxxflags'] = '/EHsc /GR'
gadget_subst_vars['gadget_ldflags'] = r'/libpath:"$libdir"'
gadget_subst_vars['gadget_libs'] = ''
gadget_subst_vars['gadget_extra_libs'] = \
'comctl32.lib ws2_32.lib user32.lib'
gadget_subst_vars['VPR_VERSION'] = jccl_subst_vars['VPR_VERSION']
gadget_subst_vars['JCCL_VERSION'] = jccl_module.getVersion('.')
gadget_subst_vars['MIN_GMTL_VERSION'] = snx_subst_vars['MIN_GMTL_VERSION']
gadget_subst_vars['LIBDIR_NAME'] = 'lib'
gadget_module = JugglerModule(r'modules\gadgeteer', vcDir, 'Gadgeteer',
'GADGET_VERSION', gadget_subst_vars,
[(r'gadget\gadgetParam.h',),
('gadgeteer.fpc',),
(r'gadget\version.rc',
os.path.join(gJugglerDir, 'version.rc.in'))
])
mods.append(gadget_module)
vrj_subst_vars = {}
vrj_subst_vars['vrj_cxxflags'] = '/EHsc /GR'
vrj_subst_vars['vrj_ldflags'] = r'/libpath:"$libdir"'
vrj_subst_vars['vrj_libs'] = ''
vrj_subst_vars['vrj_d3d_lib'] = ''
vrj_subst_vars['vrj_ogl_lib'] = ''
vrj_subst_vars['vrj_pf_lib'] = ''
vrj_subst_vars['vrj_d3d_extra_libs'] = \
'd3dxof.lib dxguid.lib d3dx9d.lib d3d9.lib winmm.lib'
vrj_subst_vars['vrj_ogl_extra_libs'] = 'opengl32.lib glu32.lib'
vrj_subst_vars['vrj_pf_extra_libs'] = \
'/libpath:"${PFROOT}\lib" libpf.lib libpfdu-util.lib libpfui.lib opengl32.lib glu32.lib'
vrj_subst_vars['VPR_VERSION'] = jccl_subst_vars['VPR_VERSION']
vrj_subst_vars['JCCL_VERSION'] = gadget_subst_vars['JCCL_VERSION']
vrj_subst_vars['SNX_VERSION'] = snx_module.getVersion('.')
vrj_subst_vars['GADGET_VERSION'] = gadget_module.getVersion('.')
vrj_subst_vars['BOOST_ROOT'] = r'${fp_file_cwd}\..\..'
vrj_subst_vars['BOOST_VERSION_DOT'] = '.'.join(getBoostVersion())
vrj_subst_vars['BOOST_INCLUDES'] = r'/I"${prefix}\include"'
vrj_subst_vars['BOOST_LDFLAGS'] = r'/libpath:"${prefix}\lib"'
vrj_subst_vars['LIBDIR_NAME'] = 'lib'
vrj_module = JugglerModule(r'modules\vrjuggler', vcDir, 'VRJuggler',
'VRJ_VERSION', vrj_subst_vars,
[(r'vrj\vrjParam.h',), ('vrjuggler.fpc',),
('vrjuggler-direct3d.fpc',),
('vrjuggler-opengl.fpc',),
('vrjuggler-performer.fpc',),
('boost_program_options.fpc',),
(r'vrj\version.rc',
os.path.join(gJugglerDir, 'version.rc.in'))])
mods.append(vrj_module)
for m in mods:
m.setVersionEnvVar()
m.updateParamFiles()
m.removeOldVersions()
def generateAntBuildFiles(vcDir):
class AntTarget:
def __init__(self, srcdir, vcDir, moduleName, outputFile = 'build.xml',
topSubDir = None):
'''
__init__(srcdir, vcDir, moduleName, outputFile, topSubDir)
Arguments:
srcdir -- The location of the Java source to be compiled.
vcDir -- The root of the directory tree containing the Visual
C++ build system.
moduleName -- The name of the Visual C++ project (under the vcDir
subdirectory) associated with this Ant build. When
concatenated to vcDir, this is where the the .class
file(s) and the .jar file(s) will be created.
outputFile -- The name of the Ant build file to generate. If not
specified, this defaults to 'build.xml'.
topSubDir -- The root of the directory where all the work will be
done. This is needed for Ant builds that depend on
previously constructed JAR files that will most likely
exist somewhere in the vcDir directory tree. This
string is used as part of the replacment for the
string @topdir@ in the source build.xml.in file. If
not specified, this defaults to whatever value is
passed in for vcDir.
'''
if topSubDir is None:
topSubDir = vcDir
self.srcdir = os.path.join(gJugglerDir, srcdir)
self.topdir = os.path.join(gJugglerDir, topSubDir)
self.module_name = os.path.join(gJugglerDir, vcDir, moduleName)
self.output_file = os.path.join(self.module_name, outputFile)
if not os.path.exists(self.module_name):
os.mkdir(self.module_name)
elif not os.path.isdir(self.module_name):
printStatus("ERROR: %s exists, but it is not a directory!" % self.module_name)
sys.exit(EXIT_STATUS_INVALID_PATH)
self.tweek_jars = []
for j in gTweekJars:
self.tweek_jars.append(os.path.join(gJugglerDir, vcDir,
'Tweek_Java', j))
self.tweek_ext_jars = []
for j in gTweekExtJars:
self.tweek_jars.append(os.path.join(gJugglerDir, vcDir,
'Tweek_Java', j))
self.jccl_jars = []
for j in gJcclJars:
self.jccl_jars.append(os.path.join(gJugglerDir, vcDir,
'JCCL_Java', j))
self.jccl_rtrc_jars = []
for j in gJcclRtrcJars:
self.jccl_jars.append(os.path.join(gJugglerDir, vcDir,
'JCCL_Java',
'RTRC_Plugin_Java', j))
# This form of regular expressions appears to be necessary because
# the sub() method does not handle backslashes in the replacement string
# the way I would like.
srcdir_re = re.compile(r'^(.*)@srcdir@(.*)$')
topdir_re = re.compile(r'^(.*)@topdir@(.*)$')
juggler_root_re = re.compile(r'^(.*)@JUGGLERROOT_ABS@(.*)$')
jdom_jar_re = re.compile(r'^(.*)@JDOM_JAR@(.*)$')
tweek_jars_re = re.compile(r'^(.*)@TWEEK_JARS@(.*)$')
tweek_ext_jars_re = re.compile(r'^(.*)@TWEEK_EXT_JARS@(.*)$')
jccl_jars_re = re.compile(r'^(.*)@JCCL_JARS@(.*)$')
java_orb_jar_re = re.compile(r'^(.*)@JAVA_ORB_JAR@(.*)$')
jogl_jars_re = re.compile(r'^(.*)@JOGL_JARS@(.*)$')
java3d_jars_re = re.compile(r'^(.*)@JAVA3D_JAR@(.*)$')
jdom_jars = []
jdom_root = os.path.join(gJugglerDir, 'external', 'jdom')
for j in gJdomJars:
if j == 'jdom.jar':
jdom_jars.append(os.path.join(jdom_root, 'build', j))
else:
jdom_jars.append(os.path.join(jdom_root, 'lib', j))
jogl_jars = [
os.path.join(os.environ['JOGL_HOME'], 'jogl.jar'),
os.path.join(os.environ['JOGL_HOME'], 'jogl-demos-util.jar')
]
java3d_jars = [
os.path.join(os.environ['JAVA3D_HOME'], r'jre\lib\ext\j3daudio.jar'),
os.path.join(os.environ['JAVA3D_HOME'], r'jre\lib\ext\j3dcore.jar'),
os.path.join(os.environ['JAVA3D_HOME'], r'jre\lib\ext\j3dutils.jar'),
os.path.join(os.environ['JAVA3D_HOME'], r'jre\lib\ext\vecmath.jar')
]
def generateBuildFile(self):
input_file = open(os.path.join(self.srcdir, 'build.xml.in'), 'r')
input = input_file.readlines()
input_file.close()
for i in xrange(len(input)):
line = input[i]
if self.srcdir_re.search(line):
match = self.srcdir_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], self.srcdir,
match.groups()[1])
elif self.topdir_re.search(line):
match = self.topdir_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], self.topdir,
match.groups()[1])
elif self.juggler_root_re.search(line):
match = self.juggler_root_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], gJugglerDir,
match.groups()[1])
elif self.java_orb_jar_re.search(line):
match = self.java_orb_jar_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], "",
match.groups()[1])
elif self.jdom_jar_re.search(line):
jars = os.pathsep.join(self.jdom_jars)
match = self.jdom_jar_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], jars,
match.groups()[1])
elif self.tweek_jars_re.search(line):
jars = os.pathsep.join(self.tweek_jars + self.jdom_jars)
match = self.tweek_jars_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], jars,
match.groups()[1])
elif self.tweek_ext_jars_re.search(line):
jars = os.pathsep.join(self.tweek_ext_jars)
match = self.tweek_ext_jars_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], jars,
match.groups()[1])
elif self.jccl_jars_re.search(line):
jars = os.pathsep.join(self.jccl_jars + self.jccl_rtrc_jars)
match = self.jccl_jars_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], jars,
match.groups()[1])
elif self.jogl_jars_re.search(line):
jars = os.pathsep.join(self.jogl_jars)
match = self.jogl_jars_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], jars,
match.groups()[1])
elif self.java3d_jars_re.search(line):
jars = os.pathsep.join(self.java3d_jars)
match = self.java3d_jars_re.search(line)
input[i] = '%s%s%s\n' % (match.groups()[0], jars,
match.groups()[1])
build_file = open(self.output_file, 'w')
build_file.writelines(input)
build_file.close()
mods = []
mods.append(AntTarget(r'modules\tweek\java', vcDir, 'Tweek_Java'))
mods.append(AntTarget(r'modules\tweek\extensions\java', vcDir,
'Tweek_Java', 'build-ext.xml'))
mods.append(AntTarget(r'modules\jackal\config', vcDir, 'JCCL_Java',
'build-config.xml'))
mods.append(AntTarget(r'modules\jackal\editors', vcDir, 'JCCL_Java',
'build-editors.xml'))
mods.append(AntTarget(r'modules\jackal\plugins\corba_rtrc', vcDir,
r'JCCL_Java\RTRC_Plugin_Java', 'build.xml'))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig', vcDir, 'VRJConfig',
'build.xml', os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\commoneditors',
vcDir, r'VRJConfig\commoneditors',
'build-commoneditors.xml'))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\customeditors\cave',
vcDir, 'VRJConfig', 'build-cave.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\customeditors\display_window',
vcDir, 'VRJConfig', 'build-display_window.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\customeditors\flock',
vcDir, 'VRJConfig', 'build-flock.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\customeditors\intersense',
vcDir, 'VRJConfig', 'build-intersense.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\customeditors\motionstar',
vcDir, 'VRJConfig', 'build-motionstar.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\customeditors\pinchglove',
vcDir, 'VRJConfig', 'build-pinchglove.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\customeditors\proxyeditor',
vcDir, 'VRJConfig', 'build-proxyeditor.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\customeditors\surfacedisplayeditor',
vcDir, 'VRJConfig', 'build-surfacedisplayeditor.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\wizards\cluster',
vcDir, 'VRJConfig', 'build-wizard-cluster.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\wizards\newdevice',
vcDir, 'VRJConfig', 'build-wizard-newdevice.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\vrjconfig\wizards\vrsystem',
vcDir, 'VRJConfig', 'build-wizard-vrsystem.xml',
os.path.join(vcDir, 'VRJConfig')))
mods.append(AntTarget(r'modules\vrjuggler\plugins\corba_perf_mon',
vcDir, r'VRJugglerPlugins\Perf_Plugin_Java',
'build.xml'))
for m in mods:
m.generateBuildFile()
def doInstall(prefix, buildDir):
makeTree(prefix)
installExternal(prefix, buildDir)
installVPR(prefix, buildDir)
installTweek(prefix, buildDir)
installTweekJava(prefix, buildDir)
installJCCL(prefix, buildDir)
installJCCLJava(prefix, buildDir)
installJCCLPlugins(prefix, buildDir)
installJCCLPluginsJava(prefix, buildDir)
installSonix(prefix, buildDir)
installSonixPlugins(prefix, buildDir)
installGadgeteer(prefix, buildDir)
installGadgeteerDrivers(prefix, buildDir)
installGadgeteerPlugins(prefix, buildDir)
installVRJuggler(prefix, buildDir)
installVRJConfig(prefix, buildDir)
installVRJugglerPlugins(prefix, buildDir)
installVRJugglerPluginsJava(prefix, buildDir)
installMsvcRT(prefix)
def mkinstalldirs(dir):
# print "Checking for", dir
if not os.path.exists(dir):
(head, tail) = os.path.split(dir)
if len(head) > 0:
mkinstalldirs(head)
os.mkdir(dir)
def makeTree(prefix):
mkinstalldirs(os.path.join(prefix, 'bin'))
mkinstalldirs(os.path.join(prefix, 'include'))
mkinstalldirs(os.path.join(prefix, 'lib'))
mkinstalldirs(os.path.join(prefix, 'lib', 'flagpoll'))
mkinstalldirs(os.path.join(prefix, 'share'))
def smartCopy(srcfile, dst):
""" Only copy file if it has changed, and delete it first.
Drop in replacement for shutil.copy2.
srcfile - Full path to source file to copy.
dst - Destination filename or directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(srcfile))
# Verify we need to copy and make sure to delete if needed
if os.path.isfile(dst):
stat_src = os.stat(srcfile)
stat_dst = os.stat(dst)
if (stat_src.st_size == stat_dst.st_size) and \
(stat_src.st_mtime == stat_dst.st_mtime):
#print "skipping: ", dst
return # File doesn't need to be copied
#print "removing: ", dst
os.remove(dst)
# Copy it
shutil.copy2(srcfile, dst)
def installDir(startDir, destDir, allowedExts = None, disallowedExts = None,
disallowedFiles = None):
#print " %s ==> %s"%(startDir, destDir)
cwd = os.getcwd()
# Make sure that destDir specifies an absolute path.
if not os.path.isabs(destDir):
destDir = os.path.abspath(destDir)
mkinstalldirs(destDir)
os.chdir(startDir)
contents = os.listdir(startDir)
if disallowedExts is None:
disallowedExts = []
if disallowedFiles is None:
disallowedFiles = []
# Add some extensions that should always be disallowed. This relieves the
# caller from having to add these repeatedly.
disallowedExts.append('.ilk')
disallowedExts.append('.ncb')
if not gInstallDebug:
disallowedExts.append('.pdb')
disallowedExts.append('.suo')
skip_dirs = ['.svn', 'CVS', 'autom4te.cache']
for f in contents:
if os.path.isdir(f):
if f in skip_dirs:
continue
start_dir = os.path.join(startDir, f)
dest_dir = os.path.join(destDir, f)
installDir(start_dir, dest_dir, allowedExts, disallowedExts,
disallowedFiles)
else:
try:
(root, f_ext) = os.path.splitext(f)
if allowedExts is None:
if f_ext not in disallowedExts:
smartCopy(f, pj(destDir,f))
elif f_ext in allowedExts:
if f not in disallowedFiles:
smartCopy(f, pj(destDir,f))
except (IOError, os.error), why:
print "Can't copy %s to %s: %s" % (f, destDir, str(why))
os.chdir(cwd)
def installLibs(srcRoot, destdir,
buildTypes = [('ReleaseDLL',), ('DebugDLL', 'debug'),
('DebugRtDLL',)],
extensions = ['.dll', '.lib', '.exp','.pdb']):
build_platform = 'Win32'
if gBuild64:
build_platform = 'x64'
for t in buildTypes:
build_dir = t[0]
cur_destdir = destdir
if len(t) == 2:
cur_destdir = os.path.join(destdir, t[1])
srcdir = os.path.join(srcRoot, build_platform, build_dir)
if os.path.exists(srcdir):
installDir(srcdir, cur_destdir, extensions)
def installExternal(prefix, buildDir):
pass
def installVPR(prefix, buildDir):
printStatus("Installing VPR headers and libraries ...")
destdir = os.path.join(prefix, 'include', 'vpr')
srcdir = os.path.join(gJugglerDir, 'modules', 'vapor', 'vpr')
installDir(srcdir, destdir, ['.h'])
srcdir = os.path.join(buildDir, 'VPR', 'vpr')
installDir(srcdir, destdir, ['.h'])
destdir = os.path.join(prefix, 'lib')
srcroot = os.path.join(buildDir, 'VPR')
installLibs(srcroot, destdir)
destdir = os.path.join(prefix, 'lib', 'flagpoll')
fpc_file = os.path.join(buildDir, 'VPR', 'vpr.fpc')
smartCopy(fpc_file, destdir)
destdir = os.path.join(prefix, 'share', 'vpr', 'test')
srcdir = os.path.join(gJugglerDir, 'modules', 'vapor', 'test')
installDir(srcdir, destdir, None, ['.in'])
# Install additional files into <prefix>\share\vpr
destdir = os.path.join(prefix, 'share', 'vpr')
srcroot = os.path.join(gJugglerDir, 'modules', 'vapor')
smartCopy(os.path.join(gJugglerDir, 'COPYING.txt'), destdir)
extra_files = ['ChangeLog', 'README.txt', 'RELEASE_NOTES.txt']
for f in extra_files:
smartCopy(os.path.join(srcroot, f), destdir)
def installTweek(prefix, buildDir):
printStatus("Installing Tweek C++ headers, libraries, and data files ...")
destdir = os.path.join(prefix, 'include', 'tweek')
srcdir = os.path.join(gJugglerDir, 'modules', 'tweek', 'tweek')
installDir(srcdir, destdir, ['.h', '.idl'])
srcdir = os.path.join(buildDir, 'Tweek_CXX', 'tweek')
installDir(srcdir, destdir, ['.h'])
destdir = os.path.join(prefix, 'lib')
srcroot = os.path.join(buildDir, 'Tweek_CXX')
installLibs(srcroot, destdir)
destdir = os.path.join(prefix, 'lib', 'flagpoll')
smartCopy(os.path.join(buildDir, 'Tweek_CXX', 'tweek.fpc'), destdir)
smartCopy(os.path.join(buildDir, 'Tweek_CXX', 'tweek-java.fpc'), destdir)
smartCopy(os.path.join(buildDir, 'Tweek_CXX', 'tweek-python.fpc'), destdir)
destdir = os.path.join(prefix, 'share', 'tweek', 'test')
srcdir = os.path.join(gJugglerDir, 'modules', 'tweek', 'test')
installDir(srcdir, destdir, None, ['.in'])
destdir = os.path.join(prefix, 'share', 'tweek', 'data')
srcdir = os.path.join(gJugglerDir, 'modules', 'tweek', 'data')
installDir(srcdir, destdir)
# Install additional files into <prefix>\share\tweek
destdir = os.path.join(prefix, 'share', 'tweek')
srcroot = os.path.join(gJugglerDir, 'modules', 'tweek')
smartCopy(os.path.join(gJugglerDir, 'COPYING.txt'), destdir)
extra_files = ['ChangeLog', 'RELEASE_NOTES.txt']
for f in extra_files:
smartCopy(os.path.join(srcroot, f), destdir)
def installTweekJava(prefix, buildDir):
srcdir = os.path.join(buildDir, 'Tweek_Java')
if os.path.exists(os.path.join(srcdir, gTweekJars[0])):
printStatus("Installing Tweek Java libraries and data files ...")
beans = ['Viewers']
ext_beans = []
destdir = os.path.join(prefix, 'share', 'tweek', 'java')
mkinstalldirs(destdir)
# Install the base JAR files that make up the Tweek Java API.
for j in gTweekJars + gTweekExtJars:
smartCopy(os.path.join(srcdir, j), destdir)
# Install the tweek_jni DLL.
for p in ['Win32', 'x64']:
dll = os.path.join(srcdir, 'tweek_jni', p, 'ReleaseDLL',
'tweek_jni.dll')
if os.path.exists(dll):
arch = os.environ['PROCESSOR_ARCHITECTURE']
destdir = os.path.join(destdir, arch)
mkinstalldirs(destdir)
smartCopy(dll, destdir)
destdir = os.path.join(prefix, 'share', 'tweek', 'beans')
mkinstalldirs(destdir)
bean_srcdir = srcdir
xml_srcdir = os.path.join(gJugglerDir, 'modules', 'tweek', 'java')
# Install the standard Tweek Beans.
for b in beans:
jar = b + '.jar'
xml = b + '.xml'
smartCopy(os.path.join(bean_srcdir, jar), destdir)
smartCopy(os.path.join(xml_srcdir, xml), destdir)
xml_srcdir = os.path.join(gJugglerDir, 'modules', 'tweek', 'extensions',
'java')
# Install the extension Tweek Beans.
for b in ext_beans:
jar = b + '.jar'
xml = b + '.xml'
smartCopy(os.path.join(bean_srcdir, jar), destdir)
smartCopy(os.path.join(xml_srcdir, xml), destdir)
# Install tweek.bat.
srcdir = os.path.join(gJugglerDir, 'modules', 'tweek', 'java')
destdir = os.path.join(prefix, 'bin')
smartCopy(os.path.join(srcdir, 'tweek.bat'), destdir)
# Install JacORB IDL compiler.
srcdir = os.path.join(gJugglerDir, 'external', 'JacORB')
installDir(srcdir, destdir, ['.jar'])
smartCopy(os.path.join(srcdir, 'idl.bat'), destdir)
# Destination for all remaining .jar files.
destdir = os.path.join(prefix, 'share', 'tweek', 'java')
# Install JDOM.
srcdir = os.path.join(gJugglerDir, 'external', 'jdom', 'lib')
installDir(srcdir, destdir, ['.jar'])
srcdir = os.path.join(gJugglerDir, 'external', 'jdom', 'build')
installDir(srcdir, destdir, ['.jar'])
# Install various look and feel implementations.
laf_jars = [
r'jgoodies-looks\looks.jar',
r'liquid\liquidlnf.jar',
r'metouia\metouia.jar'
]
srcroot = os.path.join(gJugglerDir, 'external', 'swing-laf')
for j in laf_jars:
smartCopy(os.path.join(srcroot, j), destdir)
else:
printStatus("Tweek Java API not built. Skipping.")
def installJCCL(prefix, buildDir):
printStatus("Installing JCCL C++ headers, libraries, and tools ...")
destdir = os.path.join(prefix, 'include', 'jccl')
srcdir = os.path.join(gJugglerDir, 'modules', 'jackal', 'common', 'jccl')
installDir(srcdir, destdir, ['.h'])
srcdir = os.path.join(gJugglerDir, 'modules', 'jackal', 'config', 'jccl')
installDir(srcdir, destdir, ['.h'])
srcdir = os.path.join(gJugglerDir, 'modules', 'jackal', 'rtrc', 'jccl')
installDir(srcdir, destdir, ['.h'])
srcdir = os.path.join(buildDir, 'JCCL', 'jccl')
installDir(srcdir, destdir, ['.h'])
destdir = os.path.join(prefix, 'lib')
srcroot = os.path.join(buildDir, 'JCCL')
installLibs(srcroot, destdir)
destdir = os.path.join(prefix, 'lib', 'flagpoll')
smartCopy(os.path.join(buildDir, 'JCCL', 'jccl.fpc'), destdir)
destdir = os.path.join(prefix, 'share', 'jccl', 'test')
srcdir = os.path.join(gJugglerDir, 'modules', 'jackal', 'test')
installDir(srcdir, destdir, None, ['.in'])
destdir = os.path.join(prefix, 'share', 'jccl', 'tools')
srcdir = os.path.join(gJugglerDir, 'modules', 'jackal', 'tools')
installDir(srcdir, destdir)
schema_root = os.path.join(prefix, 'share', 'jccl', 'data', 'schema')
srcdir = os.path.join(gJugglerDir, 'modules', 'jackal', 'data')
destdir = os.path.join(schema_root, 'www.vrjuggler.org', 'jccl', 'xsd',
'3.0')
mkinstalldirs(destdir)
smartCopy(os.path.join(srcdir, 'configuration.xsd'), destdir)
destdir = os.path.join(schema_root, 'www.vrjuggler.org', 'jccl', 'xsd',
'3.1')
mkinstalldirs(destdir)
smartCopy(os.path.join(srcdir, 'definition.xsd'), destdir)
destdir = schema_root
srcdir = os.path.join(gJugglerDir, 'modules', 'jackal', 'data',
'stdschemas')
installDir(srcdir, destdir)
# Install additional files into <prefix>\share\jccl
destdir = os.path.join(prefix, 'share', 'jccl')
srcroot = os.path.join(gJugglerDir, 'modules', 'jackal')
smartCopy(os.path.join(gJugglerDir, 'COPYING.txt'), destdir)
extra_files = ['ChangeLog', 'RELEASE_NOTES.txt']
for f in extra_files:
smartCopy(os.path.join(srcroot, f), destdir)
def installJCCLPlugins(prefix, buildDir):
printStatus("Installing JCCL C++ plug-ins ...")
destdir = os.path.join(prefix, 'lib', 'jccl', 'plugins')
srcroot = os.path.join(buildDir, 'JCCL', 'RTRC_Plugin_CXX')
installLibs(srcroot, destdir, extensions = ['.dll', '.exp'])
def installJCCLJava(prefix, buildDir):
srcdir = os.path.join(buildDir, 'JCCL_Java')
if os.path.exists(os.path.join(srcdir, 'jccl_config.jar')):
printStatus("Installing JCCL Java libraries and data files ...")
destdir = os.path.join(prefix, 'share', 'jccl', 'beans')
mkinstalldirs(destdir)
jars = [
'jccl_config.jar',
'jccl_editors.jar'
]
for j in jars:
smartCopy(os.path.join(srcdir, j), destdir)
srcdir = os.path.join(gJugglerDir, 'modules', 'jackal', 'config')
smartCopy(os.path.join(srcdir, 'jccl_config.xml'), destdir)
# Install dependencies.
dep_jars = [
r'TableLayout\TableLayout.jar'
]
destdir = os.path.join(prefix, 'share', 'jccl', 'java')
srcroot = os.path.join(gJugglerDir, 'external')
mkinstalldirs(destdir)
for j in dep_jars:
smartCopy(os.path.join(srcroot, j), destdir)
else:
printStatus("JCCL Java API not built. Skipping.")
def installJCCLPluginsJava(prefix, buildDir):
srcdir = os.path.join(buildDir, 'JCCL_Java', 'RTRC_Plugin_Java')
if os.path.exists(os.path.join(srcdir, gJcclRtrcJars[0])):
printStatus("Installing JCCL Java plug-ins ...")
destdir = os.path.join(prefix, 'share', 'jccl', 'beans')
for j in gJcclRtrcJars:
smartCopy(os.path.join(srcdir, j), destdir)
srcdir = os.path.join(gJugglerDir, 'modules', 'jackal', 'plugins',
'corba_rtrc')
smartCopy(os.path.join(srcdir, 'jccl_rtrc.xml'), destdir)
else:
printStatus("JCCL Java plug-ins not built. Skipping.")
def installSonix(prefix, buildDir):
printStatus("Installing Sonix headers, libraries, and samples ...")
destdir = os.path.join(prefix, 'include', 'snx')
srcdir = os.path.join(gJugglerDir, 'modules', 'sonix', 'snx')
installDir(srcdir, destdir, ['.h'])
srcdir = os.path.join(buildDir, 'Sonix', 'snx')
installDir(srcdir, destdir, ['.h'])
destdir = os.path.join(prefix, 'lib')
srcroot = os.path.join(buildDir, 'Sonix')
installLibs(srcroot, destdir)
destdir = os.path.join(prefix, 'lib', 'flagpoll')
smartCopy(os.path.join(buildDir, 'Sonix', 'sonix.fpc'), destdir)
destdir = os.path.join(prefix, 'share', 'sonix', 'samples')
srcdir = os.path.join(gJugglerDir, 'modules', 'sonix', 'samples')
installDir(srcdir, destdir, None, ['.in'])
destdir = os.path.join(prefix, 'share', 'sonix', 'data')
srcdir = os.path.join(gJugglerDir, 'modules', 'sonix', 'data')
installDir(srcdir, destdir)
# Install additional files into <prefix>\share\sonix
destdir = os.path.join(prefix, 'share', 'sonix')
srcroot = os.path.join(gJugglerDir, 'modules', 'sonix')
smartCopy(os.path.join(gJugglerDir, 'COPYING.txt'), destdir)
extra_files = ['ChangeLog', 'README.txt']
for f in extra_files:
smartCopy(os.path.join(srcroot, f), destdir)
def installSonixPlugins(prefix, buildDir):
printStatus("Installing Sonix plug-ins ...")
destdir = os.path.join(prefix, 'lib', 'sonix', 'plugins')
plugins = ['OpenAL', 'Audiere']
for p in plugins:
installLibs(os.path.join(buildDir, 'Sonix', p), destdir,
buildTypes = [('ReleaseDLL', 'opt'), ('DebugDLL', 'dbg'),
('DebugRtDll', 'dbgrt')],
extensions = ['.dll', '.exp'])
def installGadgeteer(prefix, buildDir):
printStatus("Installing Gadgeteer headers, libraries, and samples ...")
destdir = os.path.join(prefix, 'include', 'gadget')
srcdir = os.path.join(gJugglerDir, 'modules', 'gadgeteer', 'gadget')
installDir(srcdir, destdir, ['.h'])
srcdir = os.path.join(buildDir, 'Gadgeteer', 'gadget')
installDir(srcdir, destdir, ['.h'])
destdir = os.path.join(prefix, 'include', 'cluster')
srcdir = os.path.join(gJugglerDir, 'modules', 'gadgeteer', 'cluster')
installDir(srcdir, destdir, ['.h'])
destdir = os.path.join(prefix, 'lib')
srcroot = os.path.join(buildDir, 'Gadgeteer')
installLibs(srcroot, destdir)
destdir = os.path.join(prefix, 'lib', 'flagpoll')
smartCopy(os.path.join(buildDir, 'Gadgeteer', 'gadgeteer.fpc'), destdir)
destdir = os.path.join(prefix, 'share', 'gadgeteer', 'data')
srcdir = os.path.join(gJugglerDir, 'modules', 'gadgeteer', 'data')
installDir(srcdir, destdir)
destdir = os.path.join(prefix, 'share', 'gadgeteer', 'samples')
srcdir = os.path.join(gJugglerDir, 'modules', 'gadgeteer', 'samples')
installDir(srcdir, destdir, None, ['.in'])
destdir = os.path.join(prefix, 'share', 'gadgeteer', 'test')
srcdir = os.path.join(gJugglerDir, 'modules', 'gadgeteer', 'test')
installDir(srcdir, destdir, None, ['.in'])
destdir = os.path.join(prefix, 'share', 'gadgeteer', 'tools')
srcdir = os.path.join(gJugglerDir, 'modules', 'gadgeteer', 'tools')
installDir(srcdir, destdir, None, ['.in'])
# Install additional files into <prefix>\share\gadgeteer
destdir = os.path.join(prefix, 'share', 'gadgeteer')
srcroot = os.path.join(gJugglerDir, 'modules', 'gadgeteer')
smartCopy(os.path.join(gJugglerDir, 'COPYING.txt'), destdir)
extra_files = ['ChangeLog', 'RELEASE_NOTES.txt']
for f in extra_files:
smartCopy(os.path.join(srcroot, f), destdir)
def installGadgeteerDrivers(prefix, buildDir):
printStatus("Installing Gadgeteer device drivers ...")
destdir = os.path.join(prefix, 'lib', 'gadgeteer', 'drivers')
srcroot = os.path.join(buildDir, 'Gadgeteer')
drivers = ['DTrack', 'DataGlove', 'DirectXJoystick', 'Ether24', 'Fastrak',
'Flock', 'IBox', 'IntersenseAPI', 'IS900', 'MotionNode',
'MotionStar', 'MSFTSpeechRecognition', 'PinchGlove',
'SerialEncoder', 'SdlJoystick', 'SpaceBall', 'TUIO','TrackdAPI',
'VRPN', 'Wanda', 'X-IST', 'OptiTrack']
for d in drivers:
srcdir = os.path.join(srcroot, d)
installLibs(srcdir, destdir, extensions = ['.dll', '.exp'])
if os.environ['FTD2XX_ROOT'] != "":
if gBuild64:
srcdir = os.path.join(os.environ['FTD2XX_ROOT'], 'amd64')
else:
srcdir = os.path.join(os.environ['FTD2XX_ROOT'], 'i386')
printStatus("Installing FTD2XX DLLs")
destdir = os.path.join(prefix, 'bin')
ftd2xx_dlls = glob.glob(os.path.join(srcdir, '*.dll'))
for d in ftd2xx_dlls:
smartCopy(d, destdir)
def installGadgeteerPlugins(prefix, buildDir):
printStatus("Installing Gadgeteer cluster plug-ins ...")
destdir = os.path.join(prefix, 'include', 'plugins',
'ApplicationDataManager')
srcdir = os.path.join(gJugglerDir, 'modules', 'gadgeteer', 'plugins',
'ApplicationDataManager')
installDir(srcdir, destdir, ['.h'])
destdir = os.path.join(prefix, 'include', 'plugins',
'ApplicationBarrierManager')
srcdir = os.path.join(gJugglerDir, 'modules', 'gadgeteer', 'plugins',
'ApplicationBarrierManager')
installDir(srcdir, destdir, ['.h'])
destdir = os.path.join(prefix, 'lib', 'gadgeteer', 'plugins')
srcroot = os.path.join(buildDir, 'Gadgeteer')
plugins = ['ApplicationDataManager', 'ApplicationBarrierManager', 'RemoteInputManager']
for p in plugins:
srcdir = os.path.join(srcroot, p)
installLibs(srcdir, destdir, extensions = ['.dll', '.exp'])
def installVRJuggler(prefix, buildDir):
printStatus("Installing VR Juggler headers, libraries, and samples ...")
destdir = os.path.join(prefix, 'include', 'vrj')
srcdir = os.path.join(gJugglerDir, 'modules', 'vrjuggler', 'vrj')
installDir(srcdir, destdir, ['.h'])
srcdir = os.path.join(buildDir, 'VRJuggler', 'vrj')
installDir(srcdir, destdir, ['.h'])
destdir = os.path.join(prefix, 'lib')
srcroot = os.path.join(buildDir, 'VRJuggler')
installLibs(srcroot, destdir)
srcroot = os.path.join(buildDir, 'VRJuggler', 'OpenGL_Draw_Manager')
installLibs(srcroot, destdir)
srcroot = os.path.join(buildDir, 'VRJuggler', 'Performer_Draw_Manager')
installLibs(srcroot, destdir)
srcroot = os.path.join(buildDir, 'VRJuggler', 'Direct3D_Draw_Manager')
installLibs(srcroot, destdir)
destdir = os.path.join(prefix, 'lib', 'flagpoll')
smartCopy(os.path.join(buildDir, 'VRJuggler', 'vrjuggler.fpc'), destdir)
smartCopy(os.path.join(buildDir, 'VRJuggler', 'vrjuggler-direct3d.fpc'),
destdir)
smartCopy(os.path.join(buildDir, 'VRJuggler', 'vrjuggler-opengl.fpc'),
destdir)
if os.getenv('PFROOT', '') != '':
smartCopy(os.path.join(buildDir, 'VRJuggler', 'vrjuggler-performer.fpc'),
destdir)
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'data')
srcdir = os.path.join(gJugglerDir, 'modules', 'vrjuggler', 'data')
installDir(srcdir, destdir)
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'samples')
srcdir = os.path.join(gJugglerDir, 'modules', 'vrjuggler', 'samples')
installDir(srcdir, destdir, None, ['.in'])
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'test')
srcdir = os.path.join(gJugglerDir, 'modules', 'vrjuggler', 'test')
installDir(srcdir, destdir, None, ['.in'])
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'tools')
srcdir = os.path.join(gJugglerDir, 'modules', 'vrjuggler', 'tools')
installDir(srcdir, destdir, None, ['.in'])
# Install additional files into <prefix>\share\vrjuggler
destdir = os.path.join(prefix, 'share', 'vrjuggler')
srcroot = os.path.join(gJugglerDir, 'modules', 'vrjuggler')
smartCopy(os.path.join(gJugglerDir, 'COPYING.txt'), destdir)
extra_files = ['ChangeLog', 'RELEASE_NOTES.txt']
for f in extra_files:
smartCopy(os.path.join(srcroot, f), destdir)
def installVRJConfig(prefix, buildDir):
jardir = os.path.join(buildDir, 'VRJConfig')
if os.path.exists(os.path.join(jardir, 'VRJConfig.jar')):
printStatus("Installing VRJConfig ...")
vrjconfig_src = os.path.join(gJugglerDir, 'modules', 'vrjuggler',
'vrjconfig')
bean_jars = [
'VRJConfig.jar'
]
common_editors = glob.glob(os.path.join(jardir, 'commoneditors', '*.jar'))
custom_editor_src = os.path.join(vrjconfig_src, 'customeditors')
custom_editors = [
('cave', 'CaveEditor'),
('display_window', 'DisplayWindowEditor'),
('flock', 'FlockEditor'),
('intersense', 'IntersenseEditor'),
('motionstar', 'MotionStarEditor'),
('pinchglove', 'PinchGloveEditor'),
('proxyeditor', 'ProxyEditor'),
('surfacedisplayeditor', 'SurfaceDisplayEditor')
]
wizard_src = os.path.join(vrjconfig_src, 'wizards')
wizards = [
('cluster', 'ClusterWizard'),
('newdevice', 'NewDeviceWizard'),
('vrsystem', 'VRSystemWizard'),
]
# Install JAR files that act as reusable Java class libraries.
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'java')
mkinstalldirs(destdir)
for j in common_editors:
jar_file = os.path.join(jardir, j)
if os.path.exists(jar_file):
smartCopy(jar_file, destdir)
# Install the base set of VRJConfig JavaBeans.
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'beans')
mkinstalldirs(destdir)
for j in bean_jars:
jar_file = os.path.join(jardir, j)
if os.path.exists(jar_file):
smartCopy(jar_file, destdir)
smartCopy(os.path.join(vrjconfig_src, 'VRJConfig.xml'), destdir)
# Install any custom editors that were compiled.
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'beans',
'customeditors')
mkinstalldirs(destdir)
for e in custom_editors:
jar_file = os.path.join(jardir, e[1] + '.jar')
xml_file = os.path.join(custom_editor_src, e[0], e[1] + '.xml')
if os.path.exists(jar_file):
smartCopy(xml_file, destdir)
smartCopy(jar_file, destdir)
# Install any wizards that were compiled.
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'beans', 'wizards')
mkinstalldirs(destdir)
for e in wizards:
jar_file = os.path.join(jardir, e[1] + '.jar')
if os.path.exists(jar_file):
smartCopy(jar_file, destdir)
# Install vrjconfig.bat.
destdir = os.path.join(prefix, 'bin')
smartCopy(os.path.join(vrjconfig_src, 'vrjconfig.bat'), destdir)
# Install dependencies.
dep_jars = [
r'jgraph\lib\jgraph.jar',
r'jgraph\lib\jgraphaddons.jar',
]
srcroot = os.path.join(gJugglerDir, 'external')
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'java')
for j in dep_jars:
smartCopy(os.path.join(srcroot, j), destdir)
else:
printStatus("VRJConfig not built. Skipping.")
def installVRJugglerPlugins(prefix, buildDir):
printStatus("Installing VR Juggler C++ plug-ins ...")
destdir = os.path.join(prefix, 'lib', 'vrjuggler', 'plugins')
srcroot = os.path.join(buildDir, 'VRJugglerPlugins', 'Perf_Plugin_CXX')
installLibs(srcroot, destdir, extensions = ['.dll', '.exp'])
def installVRJugglerPluginsJava(prefix, buildDir):
srcdir = os.path.join(buildDir, 'VRJugglerPlugins', 'Perf_Plugin_Java')
plugins = [('PerformanceMonitor', 'corba_perf_mon')]
for p in plugins:
name = p[0]
dir = p[1]
if os.path.exists(os.path.join(srcdir, name + '.jar')):
printStatus("Installing VR Juggler Java plug-ins ...")
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'beans')
mkinstalldirs(destdir)
smartCopy(os.path.join(srcdir, name + '.jar'), destdir)
srcdir = os.path.join(gJugglerDir, 'modules', 'vrjuggler', 'plugins',
dir)
smartCopy(os.path.join(srcdir, name + '.xml'), destdir)
else:
printStatus("VR Juggler %s Java plug-ins not built. Skipping." % name)
# Install JFreeChart.
destdir = os.path.join(prefix, 'share', 'vrjuggler', 'java')
srcdir = os.path.join(gJugglerDir, 'external', 'jfreechart')
installDir(srcdir, destdir, ['.jar'])
def installMsvcRT(prefix):
printStatus("Installing MSVC runtime DLLs")
try:
srcroot = os.environ['SystemRoot']
destdir = os.path.join(prefix, 'lib')
# Get *every* MSVC runtime DLL. This list could be shortened at some
# point if anyone cares to try.
sys_dir = os.path.join(srcroot, 'System32')
dlls = glob.glob(os.path.join(sys_dir, 'msvc*.dll'))
for d in dlls:
smartCopy(d, pj(destdir, d))
#smartCopy(d, pj(destdir,d))
smartCopy(os.path.join(sys_dir, 'dbghelp.dll'), destdir)
except KeyError, ex:
printStatus("WARNING: Could not install MSVC runtime DLLs")
print ex
def doDependencyInstall(prefix, buildDir):
makeTree(prefix)
installCppDOM(prefix)
installBoost(prefix, buildDir)
installGMTL(prefix)
installAudiere(prefix)
installOpenAL(prefix)
installOmniORB(prefix)
installDoozer(prefix)
installSDL(prefix)
installVRPN(prefix)
def simpleInstall(name, root, prefix, includeDir = None, libDir = 'lib',
optional = False):
if optional and root == '':
return
printStatus("Installing " + name)
# Install all header files.
if includeDir is None:
includeDir = os.path.join(root, 'include')
srcdir = includeDir
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'include')
installDir(srcdir, destdir, ['.h', '.hpp', '.ipp'])
# Install all libraries.
srcdir = os.path.join(root, libDir)
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'lib')
installDir(srcdir, destdir)
# Install all executables.
srcdir = os.path.join(root, 'bin')
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'bin')
installDir(srcdir, destdir)
# Install all data files.
srcdir = os.path.join(root, 'share')
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'share')
installDir(srcdir, destdir)
def installCppDOM(prefix):
if gBuild64:
libdir = 'lib64'
else:
libdir = 'lib'
simpleInstall('CppDOM headers and libraries', os.environ['CPPDOM_ROOT'],
prefix, os.environ['CPPDOM_INCLUDES'], libdir)
def installDoozer(prefix):
simpleInstall('Installing Doozer makefile bits',
os.getenv('DOOZER_ROOT', ''), prefix, optional = True)
def installBoost(prefix, buildDir):
printStatus("Installing Boost headers and libraries")
destdir = os.path.join(prefix, 'lib', 'flagpoll')
fpc_files = glob.glob(os.path.join(buildDir, 'VPR', 'boost*.fpc'))
fpc_files += glob.glob(os.path.join(buildDir, 'VRJuggler', 'boost*.fpc'))
for f in fpc_files:
smartCopy(f, destdir)
srcroot = os.environ['BOOST_ROOT']
srcdir = os.environ['BOOST_INCLUDES']
destdir = os.path.join(prefix, 'include')
installDir(srcdir, destdir)
srcdir = os.path.join(srcroot, 'lib')
destdir = os.path.join(prefix, 'lib')
mkinstalldirs(destdir)
lib_list = glob.glob(os.path.join(srcdir, '*boost_*'))
for f in lib_list:
#print " ==> ", f
smartCopy(f, destdir)
def installGMTL(prefix):
simpleInstall('GMTL headers', os.environ['GMTL_ROOT'], prefix,
os.environ['GMTL_INCLUDES'])
# Install all libraries.
srcdir = os.path.join(os.environ['GMTL_ROOT'], 'share', 'flagpoll')
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'lib', 'flagpoll')
installDir(srcdir, destdir)
def installAudiere(prefix):
simpleInstall('Audiere headers, libraries, and executables',
os.getenv('AUDIERE_ROOT', ''), prefix, optional = True)
def installOpenAL(prefix):
srcdir = os.environ['OPENAL_ROOT']
if srcdir != "":
printStatus("Installing OpenAL DLL")
destdir = os.path.join(prefix, 'bin')
# OpenAL 1.0 and 1.1 put the redistributable DLL in different places.
dll_dirs = [os.path.join(srcdir, 'bin'), os.path.join(srcdir, 'lib'), os.path.join(srcdir, 'dll')]
sysroot = os.environ['SystemRoot']
# For a 64-bit build, we know that we only have to to look in one place
# for the DLL.
if gBuild64:
dll_dirs.append(os.path.join(sysroot, 'system32'))
else:
dll_dirs += [os.path.join(sysroot, 'SysWOW64'),
os.path.join(sysroot, 'system32')]
for d in dll_dirs:
dll = os.path.join(d, 'OpenAL32.dll')
if os.path.exists(dll):
smartCopy(dll, destdir)
break
srcdir = os.environ['ALUT_ROOT']
if srcdir != "":
printStatus("Installing ALUT DLL")
destdir = os.path.join(prefix, 'bin')
alut_dll_dirs = [os.path.join(srcdir, 'lib'), os.path.join(srcdir, 'bin')]
for d in alut_dll_dirs:
alut_dll = os.path.join(d, 'alut.dll')
if os.path.exists(alut_dll):
smartCopy(alut_dll, destdir)
break
def installOmniORB(prefix):
root = os.getenv('OMNIORB_ROOT', '')
if root == '':
return
printStatus('Installing omniORB headers, libraries, and executables')
# Install all header files.
srcdir = os.path.join(root, 'include')
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'include')
installDir(srcdir, destdir, ['.h', '.hh'])
# Install all libraries.
# NOTE: When we install the omniORB .lib files, we get rid of the x86_win32
# subdirectory.
srcdir = os.environ['OMNIORB_LIB']
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'lib')
installDir(srcdir, destdir)
srcdir = os.path.join(root, 'lib', 'python')
if os.path.exists(srcdir):
# Install the omniidl Python bits into the bin directory so that users
# do not have to set %PYTHONPATH%.
destdir = os.path.join(prefix, 'bin')
installDir(srcdir, destdir)
# If omnipython is installed along with omniORB, we need to install it, too.
srcdir = os.path.join(root, 'lib', 'python1.5')
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'lib', 'python1.5')
installDir(srcdir, destdir)
# Install all executables and DLLs.
# NOTE: When we install the omniORB .dll files, we get rid of the x86_win32
# subdirectory.
srcdir = os.environ['OMNIORB_BIN']
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'bin')
installDir(srcdir, destdir)
srcdir = os.path.join(root, 'bin', 'scripts')
if os.path.exists(srcdir):
destdir = os.path.join(prefix, 'bin', 'scripts')
installDir(srcdir, destdir)
def installSDL(prefix):
simpleInstall('SDL headers and libraries', os.environ['SDL_ROOT'],
prefix, optional = True)
def installVRPN(prefix):
simpleInstall('VRPN headers, libraries, and executables',
os.environ['VRPN_ROOT'], prefix, optional = True)
class GuiFrontEnd:
def __init__(self, master):
self.mRoot = master
self.mRoot.title("VR Juggler Win32 Build")
self.mRoot.protocol("WM_DELETE_WINDOW", self.cleanup)
self.mRoot.bind("<Destroy>", lambda e: self.cleanup)
self.createUI()
# Replace the console version of printStatus() with our own version.
global printStatus
printStatus = self.printMessage
(cl_ver_major, cl_ver_minor, vc_dir, self.mNeedsUpgrade) = chooseVisualStudioDir()
required, optional, options = getDefaultVars(cl_ver_major, cl_ver_minor)
self.mOptions = options
self.mTkOptions = {}
self.mVcDir = vc_dir
# Make a StringVar dictionary.
for k in options:
self.mTkOptions[k] = self.__str2TkinterStrVar(options[k])
self.makeOptionsInterface(required, optional)
self.update()
def __str2TkinterStrVar(self, inputStr):
temp = Tkinter.StringVar()
temp.set(inputStr)
return temp
def __writeCacheFile(self):
cache_file = open(getCacheFileName(), 'w')
for k, v in self.mTkOptions.iteritems():
output = "options['%s'] = r'%s'\n" % (k, v.get())
cache_file.write(output)
cache_file.close()
def printMessage(self, msg):
self.mRoot.OutputFrame.MessageText['state'] = 'normal'
self.mRoot.OutputFrame.MessageText.insert(Tkinter.END, msg + "\n", "a")
self.mRoot.OutputFrame.MessageText['state'] = 'disabled'
def cleanup(self):
self.__writeCacheFile()
self.mRoot.destroy()
def createUI(self):
# Set up the frames.
pad_amount = 10
# Settings.
self.Juggler = "#0EAE06"
self.JugglerYellow = "#EECE26"
self.JugglerPurple = "#8E76AA"
self.HeaderFont = (16)
self.mRoot.HeaderFrame = Tkinter.Frame(self.mRoot, borderwidth = 1,
relief = "sunken",
bg = self.JugglerYellow)
self.mRoot.HeaderFrame.grid(row = 0, column = 0,
sticky = Tkinter.N + Tkinter.E + Tkinter.S + Tkinter.W,
ipadx = pad_amount, ipady = pad_amount)
self.mRoot.SettingsFrame = Tkinter.Frame(self.mRoot, borderwidth = 1,
relief = "sunken",
bg = self.JugglerYellow)
self.mRoot.SettingsFrame.grid(row = 0, column = 1,
sticky = Tkinter.N + Tkinter.E + Tkinter.S + Tkinter.W,
ipadx = pad_amount, ipady = pad_amount)
self.mRoot.CommandFrame = Tkinter.Frame(self.mRoot, borderwidth = 1,
relief = "sunken",
bg = self.JugglerYellow)
self.mRoot.CommandFrame.grid(row = 1, column = 0,
sticky = Tkinter.N + Tkinter.E + Tkinter.S + Tkinter.W,
ipadx = pad_amount, ipady = pad_amount)
self.mRoot.OutputFrame = Tkinter.Frame(self.mRoot, borderwidth = 1,
relief = "sunken",
bg = self.JugglerYellow)
self.mRoot.OutputFrame.grid(row = 1, column = 1,
sticky = Tkinter.N + Tkinter.E + Tkinter.S + Tkinter.W,
ipadx = pad_amount, ipady = pad_amount)
self.mRoot.StatusFrame = Tkinter.Frame(self.mRoot, borderwidth = 1,
relief = "sunken",
bg = self.JugglerYellow)
self.mRoot.StatusFrame.grid(row = 2, column = 0, columnspan = 2,
sticky = Tkinter.W + Tkinter.E)
# HeaderFrame Innards
self.mRoot.HeaderFrame.vjImage = \
Tkinter.PhotoImage(file = r"juggler-logo.gif", format = "gif")
self.mRoot.HeaderFrame.Image = Tkinter.Label(self.mRoot.HeaderFrame,
image = self.mRoot.HeaderFrame.vjImage,
bg = self.JugglerYellow)
self.mRoot.HeaderFrame.Image.grid(row = 0, column = 0, sticky = Tkinter.N)
self.mRoot.HeaderFrame.ImageLabel = \
Tkinter.Label(self.mRoot.HeaderFrame,
text = "VR Juggler Win32 Build Configuration",
bg = self.JugglerYellow, font = self.HeaderFont)
self.mRoot.HeaderFrame.ImageLabel.grid(row = 1, column = 0,
sticky = Tkinter.N)
self.mRoot.HeaderFrame.rowconfigure(1, weight = 1)
# SettingsFrame Innards
self.mRoot.SettingsFrame.RequiredSettingsFrame = \
Tkinter.Frame(self.mRoot.SettingsFrame, borderwidth = 1,
relief = "sunken", bg = self.JugglerYellow)
self.mRoot.SettingsFrame.RequiredSettingsFrame.grid(row = 1, column = 0,
sticky = Tkinter.N + Tkinter.E + Tkinter.S + Tkinter.W)
self.mRoot.SettingsFrame.RequiredSettingsFrame.SettingsRows = {}
self.mRoot.SettingsFrame.OptionalSettingsFrame = \
Tkinter.Frame(self.mRoot.SettingsFrame, borderwidth = 1,
relief = "sunken", bg = self.JugglerYellow)
self.mRoot.SettingsFrame.OptionalSettingsFrame.grid(row = 2, column = 0,
sticky = Tkinter.N + Tkinter.E + Tkinter.S + Tkinter.W)
self.mRoot.SettingsFrame.OptionalSettingsFrame.SettingsRows = {}
#OutputFrame Innards
self.mRoot.OutputFrame.MessageText = Tkinter.Text(self.mRoot.OutputFrame,
height = 20,
width = 100,
state = 'disabled')
self.mRoot.OutputFrame.MessageText.grid(row = 0, column = 0)
self.mRoot.OutputFrame.MessageText.tag_config("a", foreground = "blue")
self.mRoot.OutputFrame.MessageText.ScrollBar = \
Tkinter.Scrollbar(self.mRoot.OutputFrame)
self.mRoot.OutputFrame.MessageText.ScrollBar.grid(row = 0, column = 1,
sticky = Tkinter.W + Tkinter.N + Tkinter.S)
self.mRoot.OutputFrame.MessageText.config(yscrollcommand = self.mRoot.OutputFrame.MessageText.ScrollBar.set)
self.mRoot.OutputFrame.MessageText.ScrollBar.config(command = self.mRoot.OutputFrame.MessageText.yview)
#StatusFrame Innards
self.mRoot.StatusFrame.Label = Tkinter.Label(self.mRoot.StatusFrame,
text = "Status: ")
self.mRoot.StatusFrame.Label.grid(row=0, column=0, rowspan=2)#, sticky="W")
self.mRoot.StatusFrame.StatusLabel = Tkinter.Label(self.mRoot.StatusFrame,
text = "Test",
anchor = Tkinter.W)
self.mRoot.StatusFrame.StatusLabel.grid(row = 0, column = 1,
sticky = Tkinter.EW)
self.mRoot.StatusFrame.columnconfigure(1, weight = 1)
def makeOptionsInterface(self, required, optional):
# RequiredSettingsFrame
next_row = 0
self.mRoot.SettingsFrame.RequiredSettingsFrame.Label = \
Tkinter.Label(self.mRoot.SettingsFrame.RequiredSettingsFrame,
font = self.HeaderFont, text = "Required Settings",
bg = self.JugglerPurple)
self.mRoot.SettingsFrame.RequiredSettingsFrame.Label.grid(row = next_row,
column = 0,
columnspan = 3,
sticky = Tkinter.EW)
self.mRoot.SettingsFrame.RequiredSettingsFrame.columnconfigure(0, weight = 1)
next_row = next_row + 1
self.makeEntryRow(self.mRoot.SettingsFrame.RequiredSettingsFrame,
"Installation Prefix:", 'prefix', next_row)
next_row = next_row + 1
for opt in required:
self.makeEntryRow(self.mRoot.SettingsFrame.RequiredSettingsFrame,
opt.desc, opt.envVar, next_row, opt.required,
opt.isDirectory)
next_row += 1
# OptionalSettingsFrame
next_row = 0
self.mRoot.SettingsFrame.OptionalSettingsFrame.Label = \
Tkinter.Label(self.mRoot.SettingsFrame.OptionalSettingsFrame,
font = self.HeaderFont, text = "Optional Settings",
bg = self.JugglerPurple)
self.mRoot.SettingsFrame.OptionalSettingsFrame.Label.grid(row = next_row,
column = 0,
columnspan = 3,
sticky = Tkinter.EW)
self.mRoot.SettingsFrame.OptionalSettingsFrame.columnconfigure(0, weight = 1)
next_row = next_row + 1
self.makeEntryRow(self.mRoot.SettingsFrame.OptionalSettingsFrame,
"Dependency installation prefix:", 'deps-prefix',
next_row, False)
next_row = next_row + 1
for opt in optional:
self.makeEntryRow(self.mRoot.SettingsFrame.OptionalSettingsFrame,
opt.desc, opt.envVar, next_row, opt.required,
opt.isDirectory)
next_row += 1
# CommandFrame Innards.
next_row = 0
self.mRoot.CommandFrame.Build64Check = \
Tkinter.Checkbutton(self.mRoot.CommandFrame,
text="64-bit Build",
bg = self.JugglerYellow,
activebackground = self.JugglerYellow,
onvalue = "Yes", offvalue = "No")
self.mRoot.CommandFrame.Build64Check.Variable = Tkinter.StringVar()
build64 = "No"
if gBuild64:
build64 = "Yes"
self.mRoot.CommandFrame.Build64Check.Variable.set(build64)
self.mRoot.CommandFrame.Build64Check["variable"] = \
self.mRoot.CommandFrame.Build64Check.Variable
self.mRoot.CommandFrame.Build64Check.grid(row = next_row, column = 0,
sticky = Tkinter.EW, pady = 4)
next_row = next_row + 1
self.mRoot.CommandFrame.OpenVSCheck = \
Tkinter.Checkbutton(self.mRoot.CommandFrame,
text="Open Visual Studio IDE",
bg = self.JugglerYellow,
activebackground = self.JugglerYellow,
onvalue = "Yes", offvalue = "No")
self.mRoot.CommandFrame.OpenVSCheck.Variable = Tkinter.StringVar()
self.mRoot.CommandFrame.OpenVSCheck.Variable.set("No")
self.mRoot.CommandFrame.OpenVSCheck["variable"] = \
self.mRoot.CommandFrame.OpenVSCheck.Variable
self.mRoot.CommandFrame.OpenVSCheck.grid(row = next_row, column = 0,
sticky = Tkinter.EW, pady = 4)
next_row = next_row + 1
self.mRoot.CommandFrame.InstallJugglerCheck = \
Tkinter.Checkbutton(self.mRoot.CommandFrame, text = "Install Juggler",
bg = self.JugglerYellow,
activebackground = self.JugglerYellow,
onvalue ="Yes", offvalue = "No")
self.mRoot.CommandFrame.InstallJugglerCheck.Variable = Tkinter.StringVar()
self.mRoot.CommandFrame.InstallJugglerCheck.Variable.set("Yes")
self.mRoot.CommandFrame.InstallJugglerCheck["variable"] = \
self.mRoot.CommandFrame.InstallJugglerCheck.Variable
self.mRoot.CommandFrame.InstallJugglerCheck.grid(row = next_row,
column = 0,
sticky = Tkinter.EW,
pady = 4)
next_row = next_row + 1
self.mRoot.CommandFrame.InstallJugglerDepsCheck = \
Tkinter.Checkbutton(self.mRoot.CommandFrame,
text = "Install Juggler Dependencies",
bg = self.JugglerYellow,
activebackground = self.JugglerYellow,
onvalue = "Yes", offvalue = "No")
#,command=self.installDeps, state="disabled")
self.mRoot.CommandFrame.InstallJugglerDepsCheck.Variable = Tkinter.StringVar()
self.mRoot.CommandFrame.InstallJugglerDepsCheck.Variable.set("No")
self.mRoot.CommandFrame.InstallJugglerDepsCheck["variable"] = \
self.mRoot.CommandFrame.InstallJugglerDepsCheck.Variable
self.mRoot.CommandFrame.InstallJugglerDepsCheck.grid(row = next_row,
column = 0,
sticky = Tkinter.EW,
pady = 4)
next_row = next_row + 1
self.mRoot.CommandFrame.BuildInstallButton = \
Tkinter.Button(self.mRoot.CommandFrame, text = "Build and Install",
command = self.doBuild, state = "disabled")
self.mRoot.CommandFrame.BuildInstallButton.grid(row = next_row,
column = 0,
sticky = Tkinter.EW,
pady = 4)
next_row = next_row + 1
def update(self):
self.updateRequiredOptions()
self.updateOptionsValidation()
self.updateCommandFrame()
def entryValidation(self, entry, update = False):
if os.path.isdir(entry.get()):
entry.config(fg = "blue")
else:
entry.config(fg = "red")
if update:
self.updateRequiredOptions()
self.updateCommandFrame()
def updateOptionValidation(self, setting):
if setting["isDirectory"]:
self.entryValidation(setting["Entry"])
def updateOptionsValidation(self):
for k in self.mRoot.SettingsFrame.RequiredSettingsFrame.SettingsRows:
self.updateOptionValidation(self.mRoot.SettingsFrame.RequiredSettingsFrame.SettingsRows[k])
for k in self.mRoot.SettingsFrame.OptionalSettingsFrame.SettingsRows:
self.updateOptionValidation(self.mRoot.SettingsFrame.OptionalSettingsFrame.SettingsRows[k])
def updateRequiredOptions(self):
self.mReqSettingsSet = True
for k in self.mRoot.SettingsFrame.RequiredSettingsFrame.SettingsRows:
if self.mTkOptions[k].get() == "":
self.mReqSettingsSet = False
def updateCommandFrame(self):
if self.mReqSettingsSet:
#self.mRoot.CommandFrame.BuildButton.config(state = "normal")
self.mRoot.CommandFrame.BuildInstallButton.config(state = "normal")
else:
#self.mRoot.CommandFrame.BuildButton.config(state = "disabled")
self.mRoot.CommandFrame.BuildInstallButton.config(state = "disabled")
def validateOptions(self):
status = True
# Make sure that all options that are directories reference valid
# directories.
inv_dir_list = []
for k in self.mTkOptions:
if self.mOptionWidgetsDict[k][4] and \
not os.path.isdir(self.mTkOptions[k].get() and \
self.mTkOptions[k].get() != ""):
status = False
inv_dir_list.append(self.mOptionWidgetsDict[k][0]['text'])
if not status:
tkMessageBox.showwarning(
"Invalid Directory",
"The following settings reference non-existent directories and must be set before proceeding.\n%s" % inv_dir_list)
return status
return status
def doBuild(self):
self.mRoot.CommandFrame.BuildInstallButton.config(state = "disabled")
# Set the environment vars.
for k in self.mTkOptions.iterkeys():
os.environ[k] = self.mTkOptions[k].get()
# This has to be done before calling postProcessOptions().
if self.mRoot.CommandFrame.Build64Check.Variable.get() == "Yes":
global gBuild64
gBuild64 = True
if True:#self.validateOptions():
postProcessOptions(self.mOptions)
self.__writeCacheFile()
self.BuildThread = threading.Thread(None, self.runVisualStudio,
"BuildThread")
self.BuildThread.start()
def installJuggler(self):
doInstall(self.mTkOptions['prefix'].get(),
os.path.join(gJugglerDir, self.mVcDir))
def installDeps(self):
doDependencyInstall(self.mTkOptions['deps-prefix'].get(),
os.path.join(gJugglerDir, self.mVcDir))
def getFile(self, optionIndex, initialDir, toEntry):
def clearAndGet(self, optionIndex, initialDir):
result_dir = tkFileDialog.askdirectory(title = optionIndex,
initialdir = initialDir)
if result_dir != '':
# Normalize directory (on windows it will also set slashes right).
result_dir = os.path.normpath(result_dir)
self.mTkOptions[optionIndex].set(result_dir)
return lambda: clearAndGet(self, optionIndex, initialDir)
def makeEntryRow(self, master, label, optionIndex, row, required = True,
isDirectory = True):
# Label.
label_ref = Tkinter.Label(master, text = label)
label_ref.grid(row = row, column = 0,
sticky = Tkinter.N + Tkinter.E + Tkinter.S + Tkinter.W,
pady = 2)
# Entry.
entry_ref = Tkinter.Entry(master, width = 75,
textvariable = self.mTkOptions[optionIndex])
entry_ref.grid(row = row, column = 1,
sticky = Tkinter.N + Tkinter.E + Tkinter.S + Tkinter.W,
pady = 2)
if isDirectory:
if required:
self.mTkOptions[optionIndex].trace_variable('w',
lambda n, i, m: self.entryValidation(entry_ref, True))
else:
self.mTkOptions[optionIndex].trace_variable('w',
lambda n, i, m: self.entryValidation(entry_ref, False))
# Button, if there is one.
button_ref = None
if isDirectory:
button_ref = Tkinter.Button(master, text = 'Browse',
command = self.getFile(optionIndex,
self.mTkOptions[optionIndex].get(),
entry_ref))
button_ref.grid(row = row, column = 2,
sticky = Tkinter.N + Tkinter.E + Tkinter.S + Tkinter.W,
pady = 2)
master.SettingsRows[optionIndex] = {
"Label" : label_ref,
"Entry" : entry_ref,
"Button" : button_ref,
"isRequired" : required,
"isDirectory" : isDirectory
}
def buildFinished(self):
print self.mRoot.CommandFrame.InstallJugglerCheck.Variable.get()
print self.mRoot.CommandFrame.InstallJugglerDepsCheck['state']
if self.mRoot.CommandFrame.InstallJugglerCheck.Variable.get() == "Yes":
self.printMessage("Installing Juggler...")
doInstall(self.mTkOptions['prefix'].get(),
os.path.join(gJugglerDir, self.mVcDir))
if self.mRoot.CommandFrame.InstallJugglerDepsCheck.Variable.get() == "Yes":
self.printMessage("Installing Juggler Dependencies...")
doDependencyInstall(self.mTkOptions['deps-prefix'].get(),
os.path.join(gJugglerDir, self.mVcDir))
self.printMessage("Build and Installation Finished.")
self.updateCommandFrame()
def runVisualStudio(self):
#print "updateVersions()"
self.printMessage("Generating Version Headers.")
updateVersions(self.mVcDir, self.mOptions)
self.printMessage("Generating Ant Build Files.")
generateAntBuildFiles(self.mVcDir)
devenv_cmd = getVSCmd()
(devenv_cmd_no_exe, ext) = os.path.splitext(devenv_cmd)
devenv_cmd_no_exe = '"%s"' % (devenv_cmd_no_exe)
solution_file = r'"%s"' % os.path.join(gJugglerDir, self.mVcDir,
'Juggler.sln')
build_args = r'/build DebugDLL'
if self.mRoot.CommandFrame.OpenVSCheck.Variable.get() == "No":
cmd = devenv_cmd_no_exe + ' ' + solution_file + ' ' + build_args
print cmd
try:
stdin = os.popen(cmd)
while True:
line = stdin.readline()
if not line:
break
self.mRoot.OutputFrame.MessageText['state'] = 'normal'
self.mRoot.OutputFrame.MessageText.insert(Tkinter.END, line)
self.mRoot.OutputFrame.MessageText.yview("moveto", 1.0)
self.mRoot.OutputFrame.MessageText['state'] = 'disabled'
except OSError, osEx:
print "Could not execute %s: %s" % (cmd, osEx)
sys.exit(EXIT_STATUS_MSVS_START_ERROR)
else:
cmd = devenv_cmd_no_exe + ' ' + solution_file
try:
self.printMessage("Visual Studio has been opened. Build the Solution and then exit Visual Studio to continue the Installation.")
status = os.spawnl(os.P_WAIT, devenv_cmd, 'devenv', solution_file)
except OSError, osEx:
print "Could not execute %s: %s" % (cmd, osEx)
sys.exit(EXIT_STATUS_MSVS_START_ERROR)
self.buildFinished()
def getVSCmd( interactive=True ):
devenv_cmd = None
# devenv is used by the full version of Visual Studio. VCExpress is the
# launch command used by Visual C++ Express Edition.
if( interactive ):
cmds = ['devenv.exe', 'VCExpress.exe']
else:
cmds = ['devenv.com', 'VCExpress.exe']
for p in os.getenv('PATH', '').split(os.pathsep):
# print "Searching in", p
for c in cmds:
cmd = os.path.join(p, c)
if os.path.exists(cmd):
devenv_cmd = cmd
break
if devenv_cmd is not None:
break
if devenv_cmd is None:
# The environment variable %VSINSTALLDIR% is set by vsvars32.bat.
print "WARNING: Falling back on the use of %VSINSTALLDIR%"
devenv_cmd = r'%s' % os.path.join(os.getenv('VSINSTALLDIR', ''),
'devenv.exe')
return devenv_cmd
def getMSBuild():
msbuild_cmd = None
# devenv is used by the full version of Visual Studio. VCExpress is the
# launch command used by Visual C++ Express Edition.
cmds = ['msbuild.exe']
for p in os.getenv('PATH', '').split(os.pathsep):
# print "Searching in", p
for c in cmds:
cmd = os.path.join(p, c)
if os.path.exists(cmd):
msbuild_cmd = cmd
break
if msbuild_cmd is not None:
break
if msbuild_cmd is None:
# The environment variable %VSINSTALLDIR% is set by vsvars32.bat.
print "WARNING: Falling back on the use of %VSINSTALLDIR%"
msbuild_cmd = r'%s' % os.path.join(os.getenv('VSINSTALLDIR', ''),
'msbuild.exe')
return msbuild_cmd
def doMSVCUpgrade(devenvCmd, vcDir, solutionFile):
import msvcconv
print "Upgrading solution and project files..."
proj_dir = os.path.join(gJugglerDir, vcDir)
for root, dirnames, filenames in os.walk(proj_dir):
for filename in fnmatch.filter(filenames, '*.vcproj'):
orig_name = os.path.join(root, filename)
converted_name = os.path.join(root, filename).replace(".vcproj", ".vcxproj")
converted_short_name = filename[:].replace(".vcproj", ".vcxproj")
if os.path.exists(converted_name):
mtime = os.path.getmtime
# Test to see if we should regenerate
if mtime(orig_name) > mtime(converted_name):
print "\nDeleting outdated %s" % converted_short_name
try:
os.remove(os.path.join(root, filename).replace(".vcproj", ".vcxproj"))
except OSError, ex:
print ex
if not os.path.exists(converted_name):
print "\nCreating %s by conversion..." % converted_short_name
# Get rid of .vcxproj.filters file if it exists
try:
os.remove(os.path.join(root, filename).replace(".vcproj", ".vcxproj.filters"))
except OSError, ex:
pass
subprocess.call([devenvCmd, orig_name, "/upgrade"])
project = msvcconv.ProjectFile(converted_name)
project.parseAndFix()
if project.getChangesMade():
print "%s - Fixed target names following conversion" % converted_short_name
project.write()
# Finally upgrade solution if needed
subprocess.call([devenvCmd, solutionFile, "/upgrade"])
def getBuildCommand(devenvCmd, solutionFile, config):
if gBuild64:
arch = 'x64'
else:
arch = 'Win32'
cmd = [devenvCmd, solutionFile, "/build", "%s|%s" % (config, arch)]
# with devenv, there doesn't seem to be a way to control parallel
# builds via the command line
#
#if gJobLimit == None:
# cmd.append("/m")
#else:
# cmd.append("/maxcpucount:%s" % gJobLimit)
#cmd.append("/p:BuildInParallel=true")
return cmd
def getIDECommand(devenvCmd, solutionFile):
cmd = [devenvCmd, solutionFile]
return cmd
def main():
disable_tk = False
configs = []
try:
cmd_opts, cmd_args = getopt.getopt(sys.argv[1:], "cano:h",
["64", "nogui", "nobuild", "a", "auto",
"b", "build=", "install",
"install-deps", "install-debug",
"options-file=", "jobs=",
"help"])
except getopt.GetoptError:
usage()
sys.exit(EXIT_STATUS_INVALID_ARGUMENT)
skip_vs = False
install = None
installDeps = None
numJobs = 1
global gOptionsFileName
global gBuild64
global gUnattended
global gJobLimit
global gInstallDebug
for o, a in cmd_opts:
if o in ("-c","--nogui"):
disable_tk = True
elif o in ("--jobs="):
gJobLimit = a
elif o in ("-b","--build"):
if a in gValidBuildConfigs:
print "Will build in %s mode" % a
configs.append(a)
else:
print "Unrecognized build configuration %s!" % a
print "Valid build configurations: %s" % ", ".join(gValidBuildConfigs)
sys.exit(EXIT_STATUS_INVALID_ARGUMENT)
elif o == "--64":
gBuild64 = True
elif o == "--install":
install = True
elif o == "--install-deps":
installDeps = True
elif o == "--install-debug":
gInstallDebug = True
elif o in ("-a", "--auto"):
disable_tk = True
gUnattended = True;
elif o in ("-o", "--options-file"):
gOptionsFileName = a
# Make sure file exists.
if not os.path.isfile(gOptionsFileName):
print "No file %s exists. Will use default options." % \
gOptionsFileName
elif o in ("-n", "--nobuild"):
skip_vs = True
elif o in ("-h", "--help"):
usage()
sys.exit(0)
# If Tkinter is not available or the user disabled the Tk frontend, use
# the text-based interface.
if not gHaveTk or disable_tk:
(cl_ver_major, cl_ver_minor, vc_dir, needs_upgrade) = chooseVisualStudioDir()
options = setVars(cl_ver_major, cl_ver_minor)
updateVersions(vc_dir, options)
generateAntBuildFiles(vc_dir)
try:
status = 0
if not skip_vs:
devenv_cmd_interactive = getVSCmd()
devenv_cmd = getVSCmd( False )
msbuild_cmd = getMSBuild()
solution_file = r'%s' % os.path.join(gJugglerDir, vc_dir,
'Juggler.sln')
if needs_upgrade:
doMSVCUpgrade(devenv_cmd, vc_dir, solution_file)
if len(configs) > 0:
for config in configs:
cmd = getBuildCommand(devenv_cmd, solution_file, config)
print "Launching %s" % " ".join(cmd)
subprocess.call(cmd)
else:
cmd = getIDECommand(devenv_cmd_interactive, solution_file)
print "Launching %s" % " ".join(cmd)
subprocess.call(cmd)
if gUnattended:
if install == True:
print "Automatically proceeding with VR Juggler installation..."
proceed = 'y'
else:
print "--install not specified, skipping VR Juggler installation..."
proceed = 'n'
else:
if install == True:
print "Proceeding with VR Juggler installation..."
proceed = 'y'
else:
print "Proceed with VR Juggler installation [y]: ",
proceed = sys.stdin.readline().strip(" \n")
if not (proceed == '' or proceed.lower().startswith('y')):
sys.exit(EXIT_STATUS_SUCCESS)
doInstall(options['prefix'],
os.path.join(gJugglerDir, vc_dir))
if gUnattended:
if installDeps == True:
print "Automatically proceeding with VR Juggler dependency installation..."
proceed = 'y'
else:
print "--install-deps not specified, skipping VR Juggler dependency installation..."
proceed = 'n'
else:
if installDeps == True:
print "Proceeding with VR Juggler dependency installation..."
proceed = 'y'
else:
print "Proceed with VR Juggler dependency installation [y]: ",
proceed = sys.stdin.readline().strip(" \n")
if proceed == '' or proceed.lower().startswith('y'):
doDependencyInstall(options['deps-prefix'],
os.path.join(gJugglerDir, vc_dir))
except subprocess.CalledProcessError, cpErr:
print "Could not execute: %s" % cpErr
sys.exit(EXIT_STATUS_MSVS_START_ERROR)
sys.exit(EXIT_STATUS_SUCCESS)
else:
root = Tkinter.Tk()
my_app = GuiFrontEnd(root)
root.mainloop()
def usage():
print "Usage: %s [OPTIONS]" % (sys.argv[0])
print "Python script for building VR Juggler on Windows.\n"
print "-c, --nogui Disable the Tkinter GUI front end"
print " (i.e., Run in command line mode)."
print "--64 Indicate that a 64-bit build will"
print " be made."
print "-b, --build=CONFIG Do an unattended build"
print " in the given configuration (may be"
print " passed multiple times for more than one"
print " config) - Valid configs:"
print " %s" % ", ".join(gValidBuildConfigs)
print "-n, --nobuild Skip launching Visual Studio or the build tool."
print "--jobs=NUMJOBS Do not create more than NUMJOBS parallel processes"
print "--install Automatically install VR Juggler."
print "--install-deps Automatically install VR Juggler dependencies."
print "--install-debug Don't automatically skip installing pdb files."
print "-a, --auto Does not interactively ask for values of any options. Uses the Default values, or the options file if it exists. Implies -c"
print "-o, --options-file=FILE Uses FILE to Load/Save Options (defaults to options.cache)."
print "-h, --help Print this usage text and quit."
if __name__ == '__main__':
try:
main()
except SystemExit, exitEx:
if exitEx.code == EXIT_STATUS_SUCCESS:
status = 'successful completion'
elif exitEx.code == EXIT_STATUS_NO_MSVS:
status = 'no Visual Studio installation found'
elif exitEx.code == EXIT_STATUS_MISSING_DATA_FILE:
status = 'could not read data file required for compiling'
elif exitEx.code == EXIT_STATUS_MSVS_START_ERROR:
status = 'could not start Visual Studio'
elif exitEx.code == EXIT_STATUS_INVALID_PATH:
status = 'invalid directory structure'
elif exitEx.code == EXIT_STATUS_MISSING_REQ_VALUE:
status = 'required value not given'
elif exitEx.code == EXIT_STATUS_UNSUPPORTED_COMPILER:
status = 'unsupported compiler'
elif exitEx.code == EXIT_STATUS_INVALID_ARGUMENT:
status = 'invalid command line argument'
else:
status = 'error encountered'
print "Exiting with status %d (%s)" % (exitEx.code, status)
if not gUnattended:
print "Press <ENTER> to quit ..."
sys.stdin.readline()
# Exit for real without throwing another SystemExit exception.
os._exit(exitEx.code)
except:
info = sys.exc_info()
traceback.print_exception(info[0], info[1], info[2])
print "An exception was caught. Press <ENTER> to quit ..."
sys.stdin.readline()
| lgpl-2.1 |
windflyer/apport | test/test_report.py | 1 | 97457 | # coding: UTF-8
import unittest, shutil, time, tempfile, os, subprocess, grp, atexit, re, sys
try:
from cStringIO import StringIO
StringIO # pyflakes
except ImportError:
from io import StringIO
import apport.report
import problem_report
import apport.packaging
have_twistd = subprocess.call(['which', 'twistd'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
class T(unittest.TestCase):
def test_add_package_info(self):
'''add_package_info().'''
# determine bash version
bashversion = apport.packaging.get_version('bash')
pr = apport.report.Report()
pr.add_package_info('bash')
self.assertEqual(pr['Package'], 'bash ' + bashversion.strip())
self.assertEqual(pr['SourcePackage'], 'bash')
self.assertTrue('libc' in pr['Dependencies'])
# test without specifying a package, but with ExecutablePath
pr = apport.report.Report()
self.assertRaises(KeyError, pr.add_package_info)
pr['ExecutablePath'] = '/bin/bash'
pr.add_package_info()
self.assertEqual(pr['Package'], 'bash ' + bashversion.strip())
self.assertEqual(pr['SourcePackage'], 'bash')
self.assertTrue('libc' in pr['Dependencies'])
# check for stray empty lines
self.assertTrue('\n\n' not in pr['Dependencies'])
self.assertTrue('PackageArchitecture' in pr)
pr = apport.report.Report()
pr['ExecutablePath'] = '/nonexisting'
pr.add_package_info()
self.assertTrue('Package' not in pr)
def test_add_os_info(self):
'''add_os_info().'''
pr = apport.report.Report()
pr.add_os_info()
self.assertTrue(pr['Uname'].startswith('Linux'))
self.assertTrue(hasattr(pr['DistroRelease'], 'startswith'))
self.assertGreater(len(pr['DistroRelease']), 5)
self.assertTrue(pr['Architecture'])
# does not overwrite an already existing uname
pr['Uname'] = 'foonux 1.2'
dr = pr['DistroRelease']
del pr['DistroRelease']
pr.add_os_info()
self.assertEqual(pr['Uname'], 'foonux 1.2')
self.assertEqual(pr['DistroRelease'], dr)
def test_add_user_info(self):
'''add_user_info().'''
pr = apport.report.Report()
pr.add_user_info()
self.assertTrue('UserGroups' in pr)
# double-check that user group names are removed
for g in pr['UserGroups'].split():
self.assertTrue(grp.getgrnam(g).gr_gid < 1000)
self.assertTrue(grp.getgrgid(os.getgid()).gr_name not in pr['UserGroups'])
def test_add_proc_info(self):
'''add_proc_info().'''
# check without additional safe environment variables
pr = apport.report.Report()
self.assertEqual(pr.pid, None)
pr.add_proc_info()
self.assertEqual(pr.pid, os.getpid())
self.assertTrue(set(['ProcEnviron', 'ProcMaps', 'ProcCmdline',
'ProcMaps']).issubset(set(pr.keys())), 'report has required fields')
if 'LANG' in os.environ:
self.assertTrue('LANG=' + os.environ['LANG'] in pr['ProcEnviron'])
else:
self.assertFalse('LANG=' in pr['ProcEnviron'])
self.assertTrue('USER' not in pr['ProcEnviron'])
self.assertTrue('PWD' not in pr['ProcEnviron'])
self.assertTrue('report.py' in pr['ExecutablePath'])
self.assertEqual(int(pr['ExecutableTimestamp']),
int(os.stat(__file__).st_mtime))
# check with one additional safe environment variable
pr = apport.report.Report()
pr.add_proc_info(extraenv=['PWD'])
self.assertTrue('USER' not in pr['ProcEnviron'])
if 'PWD' in os.environ:
self.assertTrue('PWD=' + os.environ['PWD'] in pr['ProcEnviron'])
# check process from other user
restore_root = False
if os.getuid() == 0:
# temporarily drop to normal user "mail"
os.setresuid(8, 8, -1)
restore_root = True
pr = apport.report.Report()
self.assertRaises(OSError, pr.add_proc_info, 1) # EPERM for init process
if restore_root:
os.setresuid(0, 0, -1)
self.assertEqual(pr.pid, 1)
self.assertTrue('Pid:\t1' in pr['ProcStatus'], pr['ProcStatus'])
self.assertTrue(pr['ProcEnviron'].startswith('Error:'), pr['ProcEnviron'])
self.assertTrue('InterpreterPath' not in pr)
# check escaping of ProcCmdline
p = subprocess.Popen(['cat', '/foo bar', '\\h', '\\ \\', '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert p.pid
# wait until /proc/pid/cmdline exists
while True:
with open('/proc/%i/cmdline' % p.pid) as fd:
if fd.read():
break
time.sleep(0.1)
pr = apport.report.Report()
pr.add_proc_info(pid=p.pid)
self.assertEqual(pr.pid, p.pid)
p.communicate(b'\n')
self.assertEqual(pr['ProcCmdline'], 'cat /foo\ bar \\\\h \\\\\\ \\\\ -')
self.assertEqual(pr['ExecutablePath'], '/bin/cat')
self.assertTrue('InterpreterPath' not in pr)
self.assertTrue('/bin/cat' in pr['ProcMaps'])
self.assertTrue('[stack]' in pr['ProcMaps'])
# check correct handling of executable symlinks
assert os.path.islink('/bin/sh'), '/bin/sh needs to be a symlink for this test'
p = subprocess.Popen(['sh'], stdin=subprocess.PIPE)
assert p.pid
# wait until /proc/pid/cmdline exists
while True:
with open('/proc/%i/cmdline' % p.pid) as fd:
if fd.read():
break
time.sleep(0.1)
pr = apport.report.Report()
pr.pid = p.pid
pr.add_proc_info()
p.communicate(b'exit\n')
self.assertFalse('InterpreterPath' in pr, pr.get('InterpreterPath'))
self.assertEqual(pr['ExecutablePath'], os.path.realpath('/bin/sh'))
self.assertEqual(int(pr['ExecutableTimestamp']),
int(os.stat(os.path.realpath('/bin/sh')).st_mtime))
# check correct handling of interpreted executables: shell
p = subprocess.Popen(['zgrep', 'foo'], stdin=subprocess.PIPE)
assert p.pid
# wait until /proc/pid/cmdline exists
while True:
with open('/proc/%i/cmdline' % p.pid) as fd:
if fd.read():
break
time.sleep(0.1)
pr = apport.report.Report()
pr.add_proc_info(pid=p.pid)
p.communicate(b'\n')
self.assertTrue(pr['ExecutablePath'].endswith('bin/zgrep'))
with open(pr['ExecutablePath']) as fd:
self.assertEqual(pr['InterpreterPath'],
os.path.realpath(fd.readline().strip()[2:]))
self.assertEqual(int(pr['ExecutableTimestamp']),
int(os.stat(pr['ExecutablePath']).st_mtime))
self.assertTrue('[stack]' in pr['ProcMaps'])
# check correct handling of interpreted executables: python
(fd, testscript) = tempfile.mkstemp()
os.write(fd, ('''#!/usr/bin/%s
import sys
sys.stdin.readline()
''' % os.getenv('PYTHON', 'python3')).encode('ascii'))
os.close(fd)
os.chmod(testscript, 0o755)
p = subprocess.Popen([testscript], stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
assert p.pid
# wait until /proc/pid/cmdline exists
while True:
with open('/proc/%i/cmdline' % p.pid) as fd:
if fd.read():
break
time.sleep(0.1)
pr = apport.report.Report()
pr.add_proc_info(pid=p.pid)
p.communicate(b'\n')
self.assertEqual(pr['ExecutablePath'], testscript)
self.assertEqual(int(pr['ExecutableTimestamp']),
int(os.stat(testscript).st_mtime))
os.unlink(testscript)
self.assertTrue('python' in pr['InterpreterPath'])
self.assertTrue('python' in pr['ProcMaps'])
self.assertTrue('[stack]' in pr['ProcMaps'])
# test process is gone, should complain about nonexisting PID
self.assertRaises(ValueError, pr.add_proc_info, p.pid)
def test_add_proc_info_nonascii(self):
'''add_proc_info() for non-ASCII values'''
lang = b'n\xc3\xb6_v\xc3\xb8lid'
# one variable from each category (ignored/filtered/shown)
p = subprocess.Popen(['cat'], stdin=subprocess.PIPE,
env={'MYNAME': b'J\xc3\xbcrgen-Ren\xc3\xa9',
'XDG_RUNTIME_DIR': b'/a\xc3\xafb',
'LANG': lang})
time.sleep(0.1)
r = apport.report.Report()
r.add_proc_environ(pid=p.pid)
p.communicate(b'')
self.assertTrue(lang in r['ProcEnviron'].encode('UTF-8'))
self.assertTrue('XDG_RUNTIME_DIR=<set>' in r['ProcEnviron'], r['ProcEnviron'])
def test_add_proc_info_current_desktop(self):
'''add_proc_info() CurrentDesktop'''
p = subprocess.Popen(['cat'], stdin=subprocess.PIPE,
env={'LANG': 'xx_YY.UTF-8'})
time.sleep(0.1)
r = apport.report.Report()
r.add_proc_info(pid=p.pid)
p.communicate(b'')
self.assertEqual(r['ProcEnviron'], 'LANG=xx_YY.UTF-8')
self.assertFalse('CurrentDesktop' in r, r)
p = subprocess.Popen(['cat'], stdin=subprocess.PIPE,
env={'LANG': 'xx_YY.UTF-8',
'XDG_CURRENT_DESKTOP': 'Pixel Pusher'})
time.sleep(0.1)
r = apport.report.Report()
r.add_proc_info(pid=p.pid)
p.communicate(b'')
self.assertEqual(r['ProcEnviron'], 'LANG=xx_YY.UTF-8')
self.assertEqual(r['CurrentDesktop'], 'Pixel Pusher')
def test_add_path_classification(self):
'''classification of $PATH.'''
# system default
p = subprocess.Popen(['cat'], stdin=subprocess.PIPE,
env={'PATH': '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games'})
time.sleep(0.1)
r = apport.report.Report()
r.add_proc_environ(pid=p.pid)
p.communicate(b'')
self.assertFalse('PATH' in r['ProcEnviron'],
'system default $PATH should be filtered out')
# no user paths
p = subprocess.Popen(['cat'], stdin=subprocess.PIPE,
env={'PATH': '/usr/sbin:/usr/bin:/sbin:/bin'})
time.sleep(0.1)
r = apport.report.Report()
r.add_proc_environ(pid=p.pid)
p.communicate(b'')
self.assertTrue('PATH=(custom, no user)' in r['ProcEnviron'],
'PATH is customized without user paths')
# user paths
p = subprocess.Popen(['cat'], stdin=subprocess.PIPE,
env={'PATH': '/home/pitti:/usr/sbin:/usr/bin:/sbin:/bin'})
time.sleep(0.1)
r = apport.report.Report()
r.add_proc_environ(pid=p.pid)
p.communicate(b'')
self.assertTrue('PATH=(custom, user)' in r['ProcEnviron'],
'PATH is customized with user paths')
def test_check_interpreted(self):
'''_check_interpreted().'''
restore_root = False
if os.getuid() == 0:
# temporarily drop to normal user "mail"
os.setresuid(8, 8, -1)
restore_root = True
try:
# standard ELF binary
f = tempfile.NamedTemporaryFile()
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/gedit'
pr['ProcStatus'] = 'Name:\tgedit'
pr['ProcCmdline'] = 'gedit\0/' + f.name
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/usr/bin/gedit')
self.assertFalse('InterpreterPath' in pr)
f.close()
# bogus argv[0]
pr = apport.report.Report()
pr['ExecutablePath'] = '/bin/dash'
pr['ProcStatus'] = 'Name:\tznonexisting'
pr['ProcCmdline'] = 'nonexisting\0/foo'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/bin/dash')
self.assertFalse('InterpreterPath' in pr)
# standard sh script
pr = apport.report.Report()
pr['ExecutablePath'] = '/bin/dash'
pr['ProcStatus'] = 'Name:\tzgrep'
pr['ProcCmdline'] = '/bin/sh\0/bin/zgrep\0foo'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/bin/zgrep')
self.assertEqual(pr['InterpreterPath'], '/bin/dash')
# standard sh script when being called explicitly with interpreter
pr = apport.report.Report()
pr['ExecutablePath'] = '/bin/dash'
pr['ProcStatus'] = 'Name:\tdash'
pr['ProcCmdline'] = '/bin/sh\0/bin/zgrep\0foo'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/bin/zgrep')
self.assertEqual(pr['InterpreterPath'], '/bin/dash')
# special case mono scheme: beagled-helper (use zgrep to make the test
# suite work if mono or beagle are not installed)
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/mono'
pr['ProcStatus'] = 'Name:\tzgrep'
pr['ProcCmdline'] = 'zgrep\0--debug\0/bin/zgrep'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/bin/zgrep')
self.assertEqual(pr['InterpreterPath'], '/usr/bin/mono')
# special case mono scheme: banshee (use zgrep to make the test
# suite work if mono or beagle are not installed)
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/mono'
pr['ProcStatus'] = 'Name:\tzgrep'
pr['ProcCmdline'] = 'zgrep\0/bin/zgrep'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/bin/zgrep')
self.assertEqual(pr['InterpreterPath'], '/usr/bin/mono')
# fail on files we shouldn't have access to when name!=argv[0]
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python'
pr['ProcStatus'] = 'Name:\tznonexisting'
pr['ProcCmdline'] = 'python\0/etc/shadow'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/usr/bin/python')
self.assertFalse('InterpreterPath' in pr)
# succeed on files we should have access to when name!=argv[0]
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python'
pr['ProcStatus'] = 'Name:\tznonexisting'
pr['ProcCmdline'] = 'python\0/etc/passwd'
pr._check_interpreted()
self.assertEqual(pr['InterpreterPath'], '/usr/bin/python')
self.assertEqual(pr['ExecutablePath'], '/etc/passwd')
# fail on files we shouldn't have access to when name==argv[0]
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python'
pr['ProcStatus'] = 'Name:\tshadow'
pr['ProcCmdline'] = '../etc/shadow'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/usr/bin/python')
self.assertFalse('InterpreterPath' in pr)
# succeed on files we should have access to when name==argv[0]
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python'
pr['ProcStatus'] = 'Name:\tpasswd'
pr['ProcCmdline'] = '../etc/passwd'
pr._check_interpreted()
self.assertEqual(pr['InterpreterPath'], '/usr/bin/python')
self.assertEqual(pr['ExecutablePath'], '/bin/../etc/passwd')
# interactive python process
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python'
pr['ProcStatus'] = 'Name:\tpython'
pr['ProcCmdline'] = 'python'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/usr/bin/python')
self.assertFalse('InterpreterPath' in pr)
# python script (abuse /bin/bash since it must exist)
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python'
pr['ProcStatus'] = 'Name:\tbash'
pr['ProcCmdline'] = 'python\0/bin/bash'
pr._check_interpreted()
self.assertEqual(pr['InterpreterPath'], '/usr/bin/python')
self.assertEqual(pr['ExecutablePath'], '/bin/bash')
# python script with options (abuse /bin/bash since it must exist)
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python'
pr['ProcStatus'] = 'Name:\tbash'
pr['ProcCmdline'] = 'python\0-OO\0/bin/bash'
pr._check_interpreted()
self.assertEqual(pr['InterpreterPath'], '/usr/bin/python')
self.assertEqual(pr['ExecutablePath'], '/bin/bash')
# python script with a versioned interpreter
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python2.7'
pr['ProcStatus'] = 'Name:\tbash'
pr['ProcCmdline'] = '/usr/bin/python\0/bin/bash'
pr._check_interpreted()
self.assertEqual(pr['InterpreterPath'], '/usr/bin/python2.7')
self.assertEqual(pr['ExecutablePath'], '/bin/bash')
# python script through -m
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python2.7'
pr['ProcStatus'] = 'Name:\tpython'
pr['ProcCmdline'] = 'python\0-tt\0-m\0apport/report\0-v'
pr._check_interpreted()
self.assertEqual(pr['InterpreterPath'], '/usr/bin/python2.7')
self.assertTrue('report' in pr['ExecutablePath'],
'expecting "report" in ExecutablePath "%s"' % pr['ExecutablePath'])
# python script through -m, with dot separator; top-level module
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python3'
pr['ProcStatus'] = 'Name:\tpython3'
pr['ProcCmdline'] = 'python\0-m\0re\0install'
pr._check_interpreted()
self.assertEqual(pr['InterpreterPath'], '/usr/bin/python3')
self.assertTrue('re.py' in pr['ExecutablePath'], pr['ExecutablePath'])
# python script through -m, with dot separator; sub-level module
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python3'
pr['ProcStatus'] = 'Name:\tpython3'
pr['ProcCmdline'] = 'python\0-m\0distutils.cmd\0foo'
pr._check_interpreted()
self.assertEqual(pr['InterpreterPath'], '/usr/bin/python3')
self.assertTrue('distutils/cmd.py' in pr['ExecutablePath'], pr['ExecutablePath'])
finally:
if restore_root:
os.setresuid(0, 0, -1)
@unittest.skipUnless(have_twistd, 'twisted is not installed')
def test_check_interpreted_twistd(self):
'''_check_interpreted() for programs ran through twistd'''
# LP#761374
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python2.7'
pr['ProcStatus'] = 'Name:\ttwistd'
pr['ProcCmdline'] = '/usr/bin/python\0/usr/bin/twistd\0--uid\0root\0--gid\0root\0--pidfile\0/var/run/nanny.pid\0-r\0glib2\0--logfile\0/var/log/nanny.log\0-y\0/usr/share/nanny/daemon/nanny.tap'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/usr/share/nanny/daemon/nanny.tap')
self.assertEqual(pr['InterpreterPath'], '/usr/bin/twistd')
# LP#625039
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python2.7'
pr['ProcStatus'] = 'Name:\ttwistd'
pr['ProcCmdline'] = '/usr/bin/python\0/usr/bin/twistd\0--pidfile=/var/run/apt-p2p//apt-p2p.pid\0--rundir=/var/run/apt-p2p/\0--python=/usr/sbin/apt-p2p\0--logfile=/var/log/apt-p2p.log\0--no_save'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/usr/sbin/apt-p2p')
self.assertEqual(pr['InterpreterPath'], '/usr/bin/twistd')
# somewhere from LP#755025
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python2.7'
pr['ProcStatus'] = 'Name:\ttwistd'
pr['ProcCmdline'] = '/usr/bin/python\0/usr/bin/twistd\0-r\0gtk2\0--pidfile\0/tmp/vmc.pid\0-noy\0/usr/share/vodafone-mobile-connect/gtk-tap.py\0-l\0/dev/null'
pr._check_interpreted()
self.assertEqual(pr['ExecutablePath'], '/usr/share/vodafone-mobile-connect/gtk-tap.py')
self.assertEqual(pr['InterpreterPath'], '/usr/bin/twistd')
# LP#725383 -> not practical to determine file here
pr = apport.report.Report()
pr['ExecutablePath'] = '/usr/bin/python2.7'
pr['ProcStatus'] = 'Name:\ttwistd'
pr['ProcCmdline'] = '/usr/bin/python\0/usr/bin/twistd\0--pidfile=/var/run/poker-network-server.pid\0--logfile=/var/log/poker-network-server.log\0--no_save\0--reactor=poll\0pokerserver'
pr._check_interpreted()
self.assertTrue('ExecutablePath' in pr)
self.assertTrue('UnreportableReason' in pr)
self.assertEqual(pr['InterpreterPath'], '/usr/bin/twistd')
@classmethod
def _generate_sigsegv_report(klass, file=None, signal='11', code='''
int f(x) {
int* p = 0; *p = x;
return x+1;
}
int main() { return f(42); }
'''):
'''Create a test executable which will die with a SIGSEGV, generate a
core dump for it, create a problem report with those two arguments
(ExecutablePath and CoreDump) and call add_gdb_info().
If file is given, the report is written into it. Return the apport.report.Report.'''
workdir = None
orig_cwd = os.getcwd()
pr = apport.report.Report()
try:
workdir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, workdir)
os.chdir(workdir)
# create a test executable
with open('crash.c', 'w') as fd:
fd.write(code)
assert subprocess.call(['gcc', '-g', 'crash.c', '-o', 'crash']) == 0
assert os.path.exists('crash')
# call it through gdb and dump core
gdb = subprocess.Popen(['gdb', '--batch', '--ex', 'run', '--ex',
'generate-core-file core', './crash'], stdout=subprocess.PIPE)
gdb.communicate()
klass._validate_core('core')
pr['ExecutablePath'] = os.path.join(workdir, 'crash')
pr['CoreDump'] = (os.path.join(workdir, 'core'),)
pr['Signal'] = signal
pr.add_gdb_info()
if file:
pr.write(file)
file.flush()
finally:
os.chdir(orig_cwd)
return pr
@classmethod
def _validate_core(klass, core_path):
subprocess.check_call(['sync'])
assert os.path.exists(core_path)
readelf = subprocess.Popen(['readelf', '-n', core_path], stdout=subprocess.PIPE)
readelf.communicate()
assert readelf.returncode == 0
def _validate_gdb_fields(self, pr):
self.assertTrue('Stacktrace' in pr)
self.assertTrue('ThreadStacktrace' in pr)
self.assertTrue('StacktraceTop' in pr)
self.assertTrue('Registers' in pr)
self.assertTrue('Disassembly' in pr)
self.assertTrue('(no debugging symbols found)' not in pr['Stacktrace'])
self.assertTrue('Core was generated by' not in pr['Stacktrace'], pr['Stacktrace'])
self.assertTrue(not re.match(r'(?s)(^|.*\n)#0 [^\n]+\n#0 ',
pr['Stacktrace']))
self.assertTrue('#0 0x' in pr['Stacktrace'])
self.assertTrue('#1 0x' in pr['Stacktrace'])
self.assertTrue('#0 0x' in pr['ThreadStacktrace'])
self.assertTrue('#1 0x' in pr['ThreadStacktrace'])
self.assertTrue('Thread 1 (' in pr['ThreadStacktrace'])
self.assertTrue(len(pr['StacktraceTop'].splitlines()) <= 5)
def test_add_gdb_info(self):
'''add_gdb_info() with core dump file reference.'''
pr = apport.report.Report()
# should not throw an exception for missing fields
pr.add_gdb_info()
# normal crash
pr = self._generate_sigsegv_report()
self._validate_gdb_fields(pr)
self.assertEqual(pr['StacktraceTop'], 'f (x=42) at crash.c:3\nmain () at crash.c:6', pr['StacktraceTop'])
self.assertFalse('AssertionMessage' in pr)
# crash where gdb generates output on stderr
pr = self._generate_sigsegv_report(code='''
int main() {
void (*function)(void);
function = 0;
function();
}
''')
self._validate_gdb_fields(pr)
self.assertTrue('Cannot access memory at address 0x0' in pr['Disassembly'], pr['Disassembly'])
self.assertFalse('AssertionMessage' in pr)
def test_add_gdb_info_load(self):
'''add_gdb_info() with inline core dump.'''
rep = tempfile.NamedTemporaryFile()
self._generate_sigsegv_report(rep)
rep.seek(0)
pr = apport.report.Report()
with open(rep.name, 'rb') as f:
pr.load(f)
pr.add_gdb_info()
self._validate_gdb_fields(pr)
def test_add_gdb_info_damaged(self):
'''add_gdb_info() with damaged core dump'''
pr = self._generate_sigsegv_report()
del pr['Stacktrace']
del pr['StacktraceTop']
del pr['ThreadStacktrace']
del pr['Disassembly']
# truncate core file
os.truncate(pr['CoreDump'][0], 10000)
self.assertRaises(IOError, pr.add_gdb_info)
self.assertNotIn('Stacktrace', pr)
self.assertNotIn('StacktraceTop', pr)
self.assertIn('core is truncated', pr['UnreportableReason'])
def test_add_zz_parse_segv_details(self):
'''parse-segv produces sensible results'''
rep = tempfile.NamedTemporaryFile()
self._generate_sigsegv_report(rep)
rep.seek(0)
pr = apport.report.Report()
with open(rep.name, 'rb') as f:
pr.load(f)
pr['Signal'] = '1'
pr.add_hooks_info('fake_ui')
self.assertTrue('SegvAnalysis' not in pr.keys())
pr = apport.report.Report()
with open(rep.name, 'rb') as f:
pr.load(f)
pr.add_hooks_info('fake_ui')
self.assertTrue('Skipped: missing required field "Architecture"' in pr['SegvAnalysis'],
pr['SegvAnalysis'])
pr.add_os_info()
pr.add_hooks_info('fake_ui')
self.assertTrue('Skipped: missing required field "ProcMaps"' in pr['SegvAnalysis'],
pr['SegvAnalysis'])
pr.add_proc_info()
pr.add_hooks_info('fake_ui')
self.assertTrue('not located in a known VMA region' in pr['SegvAnalysis'],
pr['SegvAnalysis'])
def test_add_gdb_info_script(self):
'''add_gdb_info() with a script.'''
(fd, script) = tempfile.mkstemp()
coredump = os.path.join(os.path.dirname(script), 'core')
assert not os.path.exists(coredump)
try:
os.close(fd)
# create a test script which produces a core dump for us
with open(script, 'w') as fd:
fd.write('''#!/bin/bash
cd `dirname $0`
ulimit -c unlimited
kill -SEGV $$
''')
os.chmod(script, 0o755)
# call script and verify that it gives us a proper ELF core dump
assert subprocess.call([script]) != 0
self._validate_core(coredump)
pr = apport.report.Report()
pr['InterpreterPath'] = '/bin/bash'
pr['ExecutablePath'] = script
pr['CoreDump'] = (coredump,)
pr.add_gdb_info()
finally:
os.unlink(coredump)
os.unlink(script)
self._validate_gdb_fields(pr)
self.assertTrue('libc.so' in pr['Stacktrace'] or 'in execute_command' in pr['Stacktrace'])
def test_add_gdb_info_abort(self):
'''add_gdb_info() with SIGABRT/assert()
If these come from an assert(), the report should have the assertion
message. Otherwise it should be marked as not reportable.
'''
# abort with assert
(fd, script) = tempfile.mkstemp()
assert not os.path.exists('core')
try:
os.close(fd)
# create a test script which produces a core dump for us
with open(script, 'w') as fd:
fd.write('''#!/bin/sh
gcc -o $0.bin -x c - <<EOF
#include <assert.h>
int main() { assert(1 < 0); }
EOF
ulimit -c unlimited
$0.bin 2>/dev/null
''')
os.chmod(script, 0o755)
# call script and verify that it gives us a proper ELF core dump
assert subprocess.call([script]) != 0
self._validate_core('core')
pr = apport.report.Report()
pr['ExecutablePath'] = script + '.bin'
pr['CoreDump'] = ('core',)
pr.add_gdb_info()
finally:
os.unlink(script)
os.unlink(script + '.bin')
os.unlink('core')
self._validate_gdb_fields(pr)
self.assertTrue("<stdin>:2: main: Assertion `1 < 0' failed." in
pr['AssertionMessage'], pr['AssertionMessage'])
self.assertFalse(pr['AssertionMessage'].startswith('$'), pr['AssertionMessage'])
self.assertFalse('= 0x' in pr['AssertionMessage'], pr['AssertionMessage'])
self.assertFalse(pr['AssertionMessage'].endswith('\\n'), pr['AssertionMessage'])
# abort with internal error
(fd, script) = tempfile.mkstemp()
assert not os.path.exists('core')
try:
os.close(fd)
# create a test script which produces a core dump for us
with open(script, 'w') as fd:
fd.write('''#!/bin/sh
gcc -O2 -D_FORTIFY_SOURCE=2 -o $0.bin -x c - <<EOF
#include <string.h>
int main(int argc, char *argv[]) {
char buf[8];
strcpy(buf, argv[1]);
return 0;
}
EOF
ulimit -c unlimited
LIBC_FATAL_STDERR_=1 $0.bin aaaaaaaaaaaaaaaa 2>/dev/null
''')
os.chmod(script, 0o755)
# call script and verify that it gives us a proper ELF core dump
assert subprocess.call([script]) != 0
self._validate_core('core')
pr = apport.report.Report()
pr['ExecutablePath'] = script + '.bin'
pr['CoreDump'] = ('core',)
pr.add_gdb_info()
finally:
os.unlink(script)
os.unlink(script + '.bin')
os.unlink('core')
self._validate_gdb_fields(pr)
self.assertTrue("** buffer overflow detected ***: %s.bin terminated" % (script) in
pr['AssertionMessage'], pr['AssertionMessage'])
self.assertFalse(pr['AssertionMessage'].startswith('$'), pr['AssertionMessage'])
self.assertFalse('= 0x' in pr['AssertionMessage'], pr['AssertionMessage'])
self.assertFalse(pr['AssertionMessage'].endswith('\\n'), pr['AssertionMessage'])
# abort without assertion
(fd, script) = tempfile.mkstemp()
assert not os.path.exists('core')
try:
os.close(fd)
# create a test script which produces a core dump for us
with open(script, 'w') as fd:
fd.write('''#!/bin/sh
gcc -o $0.bin -x c - <<EOF
#include <stdlib.h>
int main() { abort(); }
EOF
ulimit -c unlimited
$0.bin 2>/dev/null
''')
os.chmod(script, 0o755)
# call script and verify that it gives us a proper ELF core dump
assert subprocess.call([script]) != 0
self._validate_core('core')
pr = apport.report.Report()
pr['ExecutablePath'] = script + '.bin'
pr['CoreDump'] = ('core',)
pr.add_gdb_info()
finally:
os.unlink(script)
os.unlink(script + '.bin')
os.unlink('core')
self._validate_gdb_fields(pr)
self.assertFalse('AssertionMessage' in pr, pr.get('AssertionMessage'))
def test_add_gdb_info_abort_glib(self):
'''add_gdb_info() with glib assertion'''
(fd, script) = tempfile.mkstemp()
assert not os.path.exists('core')
try:
os.close(fd)
# create a test script which produces a core dump for us
with open(script, 'w') as fd:
fd.write('''#!/bin/sh
gcc -o $0.bin -x c - `pkg-config --cflags --libs glib-2.0` <<EOF
#include <glib.h>
int main() { g_assert_cmpint(1, <, 0); }
EOF
ulimit -c unlimited
$0.bin 2>/dev/null
''')
# call script and verify that it gives us a proper ELF core dump
assert subprocess.call(['/bin/sh', script]) != 0
self._validate_core('core')
pr = apport.report.Report()
pr['ExecutablePath'] = script + '.bin'
pr['CoreDump'] = ('core',)
pr.add_gdb_info()
finally:
os.unlink(script)
os.unlink(script + '.bin')
os.unlink('core')
self._validate_gdb_fields(pr)
self.assertTrue(pr['AssertionMessage'].startswith('ERROR:<stdin>:2:main: assertion failed (1 < 0):'),
pr['AssertionMessage'])
def test_add_gdb_info_abort_libnih(self):
'''add_gdb_info() with libnih assertion'''
(fd, script) = tempfile.mkstemp()
assert not os.path.exists('core')
try:
os.close(fd)
# create a test script which produces a core dump for us
with open(script, 'w') as fd:
fd.write('''#!/bin/sh
gcc -o $0.bin -x c - `pkg-config --cflags --libs libnih` <<EOF
#include <libnih.h>
int main() { nih_assert (1 < 0); }
EOF
ulimit -c unlimited
$0.bin 2>/dev/null
''')
# call script and verify that it gives us a proper ELF core dump
assert subprocess.call(['/bin/sh', script]) != 0
self._validate_core('core')
pr = apport.report.Report()
pr['ExecutablePath'] = script + '.bin'
pr['CoreDump'] = ('core',)
pr.add_gdb_info()
finally:
os.unlink(script)
os.unlink(script + '.bin')
os.unlink('core')
self._validate_gdb_fields(pr)
self.assertTrue('Assertion failed in main: 1 < 0' in pr['AssertionMessage'],
pr['AssertionMessage'])
def test_search_bug_patterns(self):
'''search_bug_patterns().'''
patterns = tempfile.NamedTemporaryFile(prefix='apport-')
# create some test patterns
patterns.write(b'''<?xml version="1.0"?>
<patterns>
<pattern url="http://bugtracker.net/bugs/1">
<re key="Package">^bash </re>
<re key="Foo">ba.*r</re>
</pattern>
<pattern url="http://bugtracker.net/bugs/2">
<re key="Package">^bash 1-2$</re>
<re key="Foo">write_(hello|goodbye)</re>
</pattern>
<pattern url="http://bugtracker.net/bugs/3">
<re key="Package">^coreutils </re>
<re key="Bar">^1$</re>
</pattern>
<pattern url="http://bugtracker.net/bugs/4">
<re key="Package">^coreutils </re>
<re></re>
<re key="Bar">*</re> <!-- invalid RE -->
<re key="broken">+[1^</re>
</pattern>
<pattern url="http://bugtracker.net/bugs/5">
<re key="SourcePackage">^bazaar$</re>
<re key="LogFile">AssertionError</re>
</pattern>
<pattern url="http://bugtracker.net/bugs/6">
<re key="Package">^update-notifier</re>
<re key="LogFile">AssertionError \xe2\x80\xbd</re>
</pattern>
</patterns>''')
patterns.flush()
# invalid XML
invalid = tempfile.NamedTemporaryFile(prefix='apport-')
invalid.write(b'''<?xml version="1.0"?>
</patterns>''')
invalid.flush()
# create some reports
r_bash = apport.report.Report()
r_bash['Package'] = 'bash 1-2'
r_bash['Foo'] = 'bazaar'
r_bazaar = apport.report.Report()
r_bazaar['Package'] = 'bazaar 2-1'
r_bazaar['SourcePackage'] = 'bazaar'
r_bazaar['LogFile'] = 'AssertionError'
r_coreutils = apport.report.Report()
r_coreutils['Package'] = 'coreutils 1'
r_coreutils['Bar'] = '1'
r_invalid = apport.report.Report()
r_invalid['Package'] = 'invalid 1'
r_unicode = apport.report.Report()
r_unicode['Package'] = 'update-notifier'
r_unicode['LogFile'] = b'AssertionError \xe2\x80\xbd'
pattern_url = 'file://' + patterns.name
# positive match cases
self.assertEqual(r_bash.search_bug_patterns(pattern_url),
'http://bugtracker.net/bugs/1')
r_bash['Foo'] = 'write_goodbye'
self.assertEqual(r_bash.search_bug_patterns(pattern_url),
'http://bugtracker.net/bugs/2')
self.assertEqual(r_coreutils.search_bug_patterns(pattern_url),
'http://bugtracker.net/bugs/3')
self.assertEqual(r_bazaar.search_bug_patterns(pattern_url),
'http://bugtracker.net/bugs/5')
self.assertEqual(r_unicode.search_bug_patterns(pattern_url),
'http://bugtracker.net/bugs/6')
# also works for CompressedValues
r_bash_compressed = r_bash.copy()
r_bash_compressed['Foo'] = problem_report.CompressedValue(b'bazaar')
self.assertEqual(r_bash_compressed.search_bug_patterns(pattern_url),
'http://bugtracker.net/bugs/1')
# also works for binary values
r_bash_utf8 = r_bash.copy()
r_bash_utf8['Foo'] = b'bazaar'
self.assertEqual(r_bash_utf8.search_bug_patterns(pattern_url),
'http://bugtracker.net/bugs/1')
# negative match cases
r_bash['Package'] = 'bash-static 1-2'
self.assertEqual(r_bash.search_bug_patterns(pattern_url), None)
r_bash['Package'] = 'bash 1-21'
self.assertEqual(r_bash.search_bug_patterns(pattern_url), None,
'does not match on wrong bash version')
r_bash['Foo'] = 'zz'
self.assertEqual(r_bash.search_bug_patterns(pattern_url), None,
'does not match on wrong Foo value')
r_bash['Foo'] = b'zz'
self.assertEqual(r_bash.search_bug_patterns(pattern_url), None,
'does not match on wrong Foo UTF-8 value')
r_bash['Foo'] = b'\x01\xFF'
self.assertEqual(r_bash.search_bug_patterns(pattern_url), None,
'does not match on wrong Foo binary value')
r_coreutils['Bar'] = '11'
self.assertEqual(r_coreutils.search_bug_patterns(pattern_url), None,
'does not match on wrong Bar value')
r_bazaar['SourcePackage'] = 'launchpad'
self.assertEqual(r_bazaar.search_bug_patterns(pattern_url), None,
'does not match on wrong source package')
r_bazaar['LogFile'] = ''
self.assertEqual(r_bazaar.search_bug_patterns(pattern_url), None,
'does not match on empty attribute')
# various errors to check for robustness (no exceptions, just None
# return value)
del r_coreutils['Bar']
self.assertEqual(r_coreutils.search_bug_patterns(pattern_url), None,
'does not match on nonexisting key')
self.assertEqual(r_invalid.search_bug_patterns('file://' + invalid.name), None,
'gracefully handles invalid XML')
r_coreutils['Package'] = 'other 2'
self.assertEqual(r_bash.search_bug_patterns('file:///nonexisting/directory/'), None,
'gracefully handles nonexisting base path')
# existing host, but no bug patterns
self.assertEqual(r_bash.search_bug_patterns('http://security.ubuntu.com/'), None,
'gracefully handles base path without bug patterns')
# nonexisting host
self.assertEqual(r_bash.search_bug_patterns('http://nonexisting.domain/'), None,
'gracefully handles nonexisting URL domain')
def test_add_hooks_info(self):
'''add_hooks_info().'''
orig_hook_dir = apport.report._hook_dir
apport.report._hook_dir = tempfile.mkdtemp()
orig_common_hook_dir = apport.report._common_hook_dir
apport.report._common_hook_dir = tempfile.mkdtemp()
try:
with open(os.path.join(apport.report._hook_dir, 'foo.py'), 'w') as fd:
fd.write('''
import sys
def add_info(report):
report['Field1'] = 'Field 1'
report['Field2'] = 'Field 2\\nBla'
if 'Spethial' in report:
raise StopIteration
''')
with open(os.path.join(apport.report._common_hook_dir, 'foo1.py'), 'w') as fd:
fd.write('''
def add_info(report):
report['CommonField1'] = 'CommonField 1'
if report['Package'] == 'commonspethial':
raise StopIteration
''')
with open(os.path.join(apport.report._common_hook_dir, 'foo2.py'), 'w') as fd:
fd.write('''
def add_info(report):
report['CommonField2'] = 'CommonField 2'
''')
with open(os.path.join(apport.report._common_hook_dir, 'foo3.py'), 'w') as fd:
fd.write('''
def add_info(report, ui):
report['CommonField3'] = str(ui)
''')
# should only catch .py files
with open(os.path.join(apport.report._common_hook_dir, 'notme'), 'w') as fd:
fd.write('''
def add_info(report):
report['BadField'] = 'XXX'
''')
r = apport.report.Report()
r['Package'] = 'bar'
# should not throw any exceptions
self.assertEqual(r.add_hooks_info('fake_ui'), False)
self.assertEqual(set(r.keys()),
set(['ProblemType', 'Date', 'Package',
'CommonField1', 'CommonField2',
'CommonField3']),
'report has required fields')
r = apport.report.Report()
r['Package'] = 'baz 1.2-3'
# should not throw any exceptions
self.assertEqual(r.add_hooks_info('fake_ui'), False)
self.assertEqual(set(r.keys()),
set(['ProblemType', 'Date', 'Package',
'CommonField1', 'CommonField2',
'CommonField3']),
'report has required fields')
r = apport.report.Report()
r['Package'] = 'foo'
self.assertEqual(r.add_hooks_info('fake_ui'), False)
self.assertEqual(set(r.keys()),
set(['ProblemType', 'Date', 'Package', 'Field1',
'Field2', 'CommonField1', 'CommonField2',
'CommonField3']),
'report has required fields')
self.assertEqual(r['Field1'], 'Field 1')
self.assertEqual(r['Field2'], 'Field 2\nBla')
self.assertEqual(r['CommonField1'], 'CommonField 1')
self.assertEqual(r['CommonField2'], 'CommonField 2')
self.assertEqual(r['CommonField3'], 'fake_ui')
r = apport.report.Report()
r['Package'] = 'foo 4.5-6'
self.assertEqual(r.add_hooks_info('fake_ui'), False)
self.assertEqual(set(r.keys()),
set(['ProblemType', 'Date', 'Package', 'Field1',
'Field2', 'CommonField1', 'CommonField2',
'CommonField3']),
'report has required fields')
self.assertEqual(r['Field1'], 'Field 1')
self.assertEqual(r['Field2'], 'Field 2\nBla')
self.assertEqual(r['CommonField1'], 'CommonField 1')
self.assertEqual(r['CommonField2'], 'CommonField 2')
# test hook abort
r['Spethial'] = '1'
self.assertEqual(r.add_hooks_info('fake_ui'), True)
r = apport.report.Report()
r['Package'] = 'commonspethial'
self.assertEqual(r.add_hooks_info('fake_ui'), True)
# source package hook
with open(os.path.join(apport.report._hook_dir, 'source_foo.py'), 'w') as fd:
fd.write('''
def add_info(report, ui):
report['Field1'] = 'Field 1'
report['Field2'] = 'Field 2\\nBla'
if report['Package'] == 'spethial':
raise StopIteration
''')
r = apport.report.Report()
r['SourcePackage'] = 'foo'
r['Package'] = 'libfoo 3'
self.assertEqual(r.add_hooks_info('fake_ui'), False)
self.assertEqual(set(r.keys()),
set(['ProblemType', 'Date', 'Package',
'SourcePackage', 'Field1', 'Field2',
'CommonField1', 'CommonField2',
'CommonField3']),
'report has required fields')
self.assertEqual(r['Field1'], 'Field 1')
self.assertEqual(r['Field2'], 'Field 2\nBla')
self.assertEqual(r['CommonField1'], 'CommonField 1')
self.assertEqual(r['CommonField2'], 'CommonField 2')
self.assertEqual(r['CommonField3'], 'fake_ui')
# test hook abort
r['Package'] = 'spethial'
self.assertEqual(r.add_hooks_info('fake_ui'), True)
finally:
shutil.rmtree(apport.report._hook_dir)
shutil.rmtree(apport.report._common_hook_dir)
apport.report._hook_dir = orig_hook_dir
apport.report._common_hook_dir = orig_common_hook_dir
def test_add_hooks_info_opt(self):
'''add_hooks_info() for a package in /opt'''
orig_hook_dir = apport.report._hook_dir
apport.report._hook_dir = tempfile.mkdtemp()
orig_common_hook_dir = apport.report._common_hook_dir
apport.report._common_hook_dir = tempfile.mkdtemp()
orig_opt_dir = apport.report._opt_dir
apport.report._opt_dir = tempfile.mkdtemp()
try:
opt_hook_dir = os.path.join(apport.report._opt_dir,
'foolabs.example.com', 'foo', 'share',
'apport', 'package-hooks')
os.makedirs(opt_hook_dir)
with open(os.path.join(opt_hook_dir, 'source_foo.py'), 'w') as fd:
fd.write('''def add_info(report, ui):
report['SourceHook'] = '1'
''')
with open(os.path.join(opt_hook_dir, 'foo-bin.py'), 'w') as fd:
fd.write('''def add_info(report, ui):
report['BinHook'] = '1'
''')
r = apport.report.Report()
r['Package'] = 'foo-bin 0.2'
r['SourcePackage'] = 'foo'
r['ExecutablePath'] = '%s/foolabs.example.com/foo/bin/frob' % apport.report._opt_dir
self.assertEqual(r.add_hooks_info('fake_ui'), False)
self.assertEqual(r['SourceHook'], '1')
finally:
shutil.rmtree(apport.report._opt_dir)
shutil.rmtree(apport.report._hook_dir)
shutil.rmtree(apport.report._common_hook_dir)
apport.report._hook_dir = orig_hook_dir
apport.report._common_hook_dir = orig_common_hook_dir
apport.report._opt_dir = orig_opt_dir
def test_add_hooks_info_errors(self):
'''add_hooks_info() with errors in hooks'''
orig_hook_dir = apport.report._hook_dir
apport.report._hook_dir = tempfile.mkdtemp()
orig_common_hook_dir = apport.report._common_hook_dir
apport.report._common_hook_dir = tempfile.mkdtemp()
orig_stderr = sys.stderr
sys.stderr = StringIO()
try:
with open(os.path.join(apport.report._hook_dir, 'fooprogs.py'), 'w') as fd:
fd.write('''def add_info(report, ui):
report['BinHookBefore'] = '1'
1/0
report['BinHookAfter'] = '1'
''')
with open(os.path.join(apport.report._hook_dir, 'source_foo.py'), 'w') as fd:
fd.write('''def add_info(report, ui):
report['SourceHookBefore'] = '1'
unknown()
report['SourceHookAfter'] = '1'
''')
r = apport.report.Report()
r['Package'] = 'fooprogs 0.2'
r['SourcePackage'] = 'foo'
r['ExecutablePath'] = '/bin/foo-cli'
self.assertEqual(r.add_hooks_info('fake_ui'), False)
# should have the data until the crash
self.assertEqual(r['BinHookBefore'], '1')
self.assertEqual(r['SourceHookBefore'], '1')
# should print the exceptions to stderr
err = sys.stderr.getvalue()
self.assertIn('ZeroDivisionError:', err)
self.assertIn("name 'unknown' is not defined", err)
# should also add the exceptions to the report
self.assertTrue('NameError:' in r['HookError_source_foo'],
r['HookError_source_foo'])
self.assertTrue('line 3, in add_info' in r['HookError_source_foo'],
r['HookError_source_foo'])
self.assertFalse('ZeroDivisionError' in r['HookError_source_foo'],
r['HookError_source_foo'])
self.assertTrue('ZeroDivisionError:' in r['HookError_fooprogs'],
r['HookError_fooprogs'])
self.assertTrue('line 3, in add_info' in r['HookError_source_foo'],
r['HookError_fooprogs'])
self.assertFalse('NameError:' in r['HookError_fooprogs'],
r['HookError_fooprogs'])
finally:
sys.stderr = orig_stderr
shutil.rmtree(apport.report._hook_dir)
shutil.rmtree(apport.report._common_hook_dir)
apport.report._hook_dir = orig_hook_dir
apport.report._common_hook_dir = orig_common_hook_dir
def test_ignoring(self):
'''mark_ignore() and check_ignored().'''
orig_ignore_file = apport.report.apport.report._ignore_file
workdir = tempfile.mkdtemp()
apport.report.apport.report._ignore_file = os.path.join(workdir, 'ignore.xml')
try:
with open(os.path.join(workdir, 'bash'), 'w') as fd:
fd.write('bash')
with open(os.path.join(workdir, 'crap'), 'w') as fd:
fd.write('crap')
bash_rep = apport.report.Report()
bash_rep['ExecutablePath'] = os.path.join(workdir, 'bash')
crap_rep = apport.report.Report()
crap_rep['ExecutablePath'] = os.path.join(workdir, 'crap')
# must be able to deal with executables that do not exist any more
cp_rep = apport.report.Report()
cp_rep['ExecutablePath'] = os.path.join(workdir, 'cp')
# no ignores initially
self.assertEqual(bash_rep.check_ignored(), False)
self.assertEqual(crap_rep.check_ignored(), False)
self.assertEqual(cp_rep.check_ignored(), False)
# ignore crap now
crap_rep.mark_ignore()
self.assertEqual(bash_rep.check_ignored(), False)
self.assertEqual(crap_rep.check_ignored(), True)
self.assertEqual(cp_rep.check_ignored(), False)
# ignore bash now
bash_rep.mark_ignore()
self.assertEqual(bash_rep.check_ignored(), True)
self.assertEqual(crap_rep.check_ignored(), True)
self.assertEqual(cp_rep.check_ignored(), False)
# poke crap so that it has a newer timestamp
time.sleep(1)
with open(os.path.join(workdir, 'crap'), 'w') as fd:
fd.write('crapnew')
self.assertEqual(bash_rep.check_ignored(), True)
self.assertEqual(crap_rep.check_ignored(), False)
self.assertEqual(cp_rep.check_ignored(), False)
# do not complain about an empty ignore file
with open(apport.report.apport.report._ignore_file, 'w') as fd:
fd.write('')
self.assertEqual(bash_rep.check_ignored(), False)
self.assertEqual(crap_rep.check_ignored(), False)
self.assertEqual(cp_rep.check_ignored(), False)
# does not crash if the executable went away under our feet
crap_rep['ExecutablePath'] = '/non existing'
crap_rep.mark_ignore()
self.assertEqual(os.path.getsize(apport.report.apport.report._ignore_file), 0)
finally:
shutil.rmtree(workdir)
apport.report.apport.report._ignore_file = orig_ignore_file
def test_blacklisting(self):
'''check_ignored() for system-wise blacklist.'''
orig_blacklist_dir = apport.report._blacklist_dir
apport.report._blacklist_dir = tempfile.mkdtemp()
orig_ignore_file = apport.report._ignore_file
apport.report._ignore_file = '/nonexistant'
try:
bash_rep = apport.report.Report()
bash_rep['ExecutablePath'] = '/bin/bash'
crap_rep = apport.report.Report()
crap_rep['ExecutablePath'] = '/bin/crap'
# no ignores initially
self.assertEqual(bash_rep.check_ignored(), False)
self.assertEqual(crap_rep.check_ignored(), False)
# should not stumble over comments
with open(os.path.join(apport.report._blacklist_dir, 'README'), 'w') as fd:
fd.write('# Ignore file\n#/bin/bash\n')
# no ignores on nonmatching paths
with open(os.path.join(apport.report._blacklist_dir, 'bl1'), 'w') as fd:
fd.write('/bin/bas\n/bin/bashh\nbash\nbin/bash\n')
self.assertEqual(bash_rep.check_ignored(), False)
self.assertEqual(crap_rep.check_ignored(), False)
# ignore crap now
with open(os.path.join(apport.report._blacklist_dir, 'bl_2'), 'w') as fd:
fd.write('/bin/crap\n')
self.assertEqual(bash_rep.check_ignored(), False)
self.assertEqual(crap_rep.check_ignored(), True)
# ignore bash now
with open(os.path.join(apport.report._blacklist_dir, 'bl1'), 'a') as fd:
fd.write('/bin/bash\n')
self.assertEqual(bash_rep.check_ignored(), True)
self.assertEqual(crap_rep.check_ignored(), True)
finally:
shutil.rmtree(apport.report._blacklist_dir)
apport.report._blacklist_dir = orig_blacklist_dir
apport.report._ignore_file = orig_ignore_file
def test_whitelisting(self):
'''check_ignored() for system-wise whitelist.'''
orig_whitelist_dir = apport.report._whitelist_dir
apport.report._whitelist_dir = tempfile.mkdtemp()
orig_ignore_file = apport.report.apport.report._ignore_file
apport.report.apport.report._ignore_file = '/nonexistant'
try:
bash_rep = apport.report.Report()
bash_rep['ExecutablePath'] = '/bin/bash'
crap_rep = apport.report.Report()
crap_rep['ExecutablePath'] = '/bin/crap'
# no ignores without any whitelist
self.assertEqual(bash_rep.check_ignored(), False)
self.assertEqual(crap_rep.check_ignored(), False)
# should not stumble over comments
with open(os.path.join(apport.report._whitelist_dir, 'README'), 'w') as fd:
fd.write('# Ignore file\n#/bin/bash\n')
# accepts matching paths
with open(os.path.join(apport.report._whitelist_dir, 'wl1'), 'w') as fd:
fd.write('/bin/bash\n')
self.assertEqual(bash_rep.check_ignored(), False)
self.assertEqual(crap_rep.check_ignored(), True)
# also accept crap now
with open(os.path.join(apport.report._whitelist_dir, 'wl_2'), 'w') as fd:
fd.write('/bin/crap\n')
self.assertEqual(bash_rep.check_ignored(), False)
self.assertEqual(crap_rep.check_ignored(), False)
# only complete matches accepted
with open(os.path.join(apport.report._whitelist_dir, 'wl1'), 'w') as fd:
fd.write('/bin/bas\n/bin/bashh\nbash\n')
self.assertEqual(bash_rep.check_ignored(), True)
self.assertEqual(crap_rep.check_ignored(), False)
finally:
shutil.rmtree(apport.report._whitelist_dir)
apport.report._whitelist_dir = orig_whitelist_dir
apport.report.apport.report._ignore_file = orig_ignore_file
def test_has_useful_stacktrace(self):
'''has_useful_stacktrace().'''
r = apport.report.Report()
self.assertFalse(r.has_useful_stacktrace())
r['StacktraceTop'] = ''
self.assertFalse(r.has_useful_stacktrace())
r['StacktraceTop'] = '?? ()'
self.assertFalse(r.has_useful_stacktrace())
r['StacktraceTop'] = '?? ()\n?? ()'
self.assertFalse(r.has_useful_stacktrace())
r['StacktraceTop'] = 'read () from /lib/libc.6.so\n?? ()'
self.assertFalse(r.has_useful_stacktrace())
r['StacktraceTop'] = 'read () from /lib/libc.6.so\n?? ()\n?? ()\n?? ()'
self.assertFalse(r.has_useful_stacktrace())
r['StacktraceTop'] = 'read () from /lib/libc.6.so\nfoo (i=1) from /usr/lib/libfoo.so'
self.assertTrue(r.has_useful_stacktrace())
r['StacktraceTop'] = 'read () from /lib/libc.6.so\nfoo (i=1) from /usr/lib/libfoo.so\n?? ()'
self.assertTrue(r.has_useful_stacktrace())
r['StacktraceTop'] = 'read () from /lib/libc.6.so\nfoo (i=1) from /usr/lib/libfoo.so\n?? ()\n?? ()'
self.assertTrue(r.has_useful_stacktrace())
r['StacktraceTop'] = 'read () from /lib/libc.6.so\n?? ()\nfoo (i=1) from /usr/lib/libfoo.so\n?? ()\n?? ()'
self.assertFalse(r.has_useful_stacktrace())
def test_standard_title(self):
'''standard_title().'''
report = apport.report.Report()
self.assertEqual(report.standard_title(), None)
# named signal crash
report['Signal'] = '11'
report['ExecutablePath'] = '/bin/bash'
report['StacktraceTop'] = '''foo()
bar(x=3)
baz()
'''
self.assertEqual(report.standard_title(),
'bash crashed with SIGSEGV in foo()')
# unnamed signal crash
report['Signal'] = '42'
self.assertEqual(report.standard_title(),
'bash crashed with signal 42 in foo()')
# do not crash on empty StacktraceTop
report['StacktraceTop'] = ''
self.assertEqual(report.standard_title(),
'bash crashed with signal 42')
# do not create bug title with unknown function name
report['StacktraceTop'] = '??()\nfoo()'
self.assertEqual(report.standard_title(),
'bash crashed with signal 42 in foo()')
# if we do not know any function name, don't mention ??
report['StacktraceTop'] = '??()\n??()'
self.assertEqual(report.standard_title(),
'bash crashed with signal 42')
# assertion message
report['Signal'] = '6'
report['ExecutablePath'] = '/bin/bash'
report['AssertionMessage'] = 'foo.c:42 main: i > 0'
self.assertEqual(report.standard_title(),
'bash assert failure: foo.c:42 main: i > 0')
# Python crash
report = apport.report.Report()
report['ExecutablePath'] = '/usr/share/apport/apport-gtk'
report['Traceback'] = '''Traceback (most recent call last):
File "/usr/share/apport/apport-gtk", line 202, in <module>
app.run_argv()
File "/var/lib/python-support/python2.5/apport/ui.py", line 161, in run_argv
self.run_crashes()
File "/var/lib/python-support/python2.5/apport/ui.py", line 104, in run_crashes
self.run_crash(f)
File "/var/lib/python-support/python2.5/apport/ui.py", line 115, in run_crash
response = self.ui_present_crash(desktop_entry)
File "/usr/share/apport/apport-gtk", line 67, in ui_present_crash
subprocess.call(['pgrep', '-x',
NameError: global name 'subprocess' is not defined'''
self.assertEqual(report.standard_title(),
"apport-gtk crashed with NameError in ui_present_crash(): global name 'subprocess' is not defined")
# slightly weird Python crash
report = apport.report.Report()
report['ExecutablePath'] = '/usr/share/apport/apport-gtk'
report['Traceback'] = '''TypeError: Cannot create a consistent method resolution
order (MRO) for bases GObject, CanvasGroupableIface, CanvasGroupable'''
self.assertEqual(report.standard_title(),
'apport-gtk crashed with TypeError: Cannot create a consistent method resolution')
# Python crash with custom message
report = apport.report.Report()
report['ExecutablePath'] = '/usr/share/apport/apport-gtk'
report['Traceback'] = '''Traceback (most recent call last):
File "/x/foo.py", line 242, in setup_chooser
raise "Moo"
Mo?o[a-1]'''
self.assertEqual(report.standard_title(), 'apport-gtk crashed with Mo?o[a-1] in setup_chooser()')
# Python crash with custom message with newlines (LP #190947)
report = apport.report.Report()
report['ExecutablePath'] = '/usr/share/apport/apport-gtk'
report['Traceback'] = '''Traceback (most recent call last):
File "/x/foo.py", line 242, in setup_chooser
raise "\nKey: "+key+" isn't set.\nRestarting AWN usually solves this issue\n"
Key: /apps/avant-window-navigator/app/active_png isn't set.
Restarting AWN usually solves this issue'''
t = report.standard_title()
self.assertTrue(t.startswith('apport-gtk crashed with'))
self.assertTrue(t.endswith('setup_chooser()'))
# Python crash at top level in module
report = apport.report.Report()
report['ExecutablePath'] = '/usr/bin/gnome-about'
report['Traceback'] = '''Traceback (most recent call last):
File "/usr/bin/gnome-about", line 30, in <module>
import pygtk
File "/usr/lib/pymodules/python2.6/pygtk.py", line 28, in <module>
import nonexistent
ImportError: No module named nonexistent
'''
self.assertEqual(report.standard_title(),
"gnome-about crashed with ImportError in /usr/lib/pymodules/python2.6/pygtk.py: No module named nonexistent")
# Python crash at top level in main program
report = apport.report.Report()
report['ExecutablePath'] = '/usr/bin/dcut'
report['Traceback'] = '''Traceback (most recent call last):
File "/usr/bin/dcut", line 28, in <module>
import nonexistent
ImportError: No module named nonexistent
'''
self.assertEqual(report.standard_title(),
"dcut crashed with ImportError in __main__: No module named nonexistent")
# package install problem
report = apport.report.Report('Package')
report['Package'] = 'bash'
# no ErrorMessage
self.assertEqual(report.standard_title(),
'package bash failed to install/upgrade')
# empty ErrorMessage
report['ErrorMessage'] = ''
self.assertEqual(report.standard_title(),
'package bash failed to install/upgrade')
# nonempty ErrorMessage
report['ErrorMessage'] = 'botched\nnot found\n'
self.assertEqual(report.standard_title(),
'package bash failed to install/upgrade: not found')
# matching package/system architectures
report['Signal'] = '11'
report['ExecutablePath'] = '/bin/bash'
report['StacktraceTop'] = '''foo()
bar(x=3)
baz()
'''
report['PackageArchitecture'] = 'amd64'
report['Architecture'] = 'amd64'
self.assertEqual(report.standard_title(),
'bash crashed with SIGSEGV in foo()')
# non-native package (on multiarch)
report['PackageArchitecture'] = 'i386'
self.assertEqual(report.standard_title(),
'bash crashed with SIGSEGV in foo() [non-native i386 package]')
# Arch: all package (matches every system architecture)
report['PackageArchitecture'] = 'all'
self.assertEqual(report.standard_title(),
'bash crashed with SIGSEGV in foo()')
report = apport.report.Report('KernelOops')
report['OopsText'] = '------------[ cut here ]------------\nkernel BUG at /tmp/oops.c:5!\ninvalid opcode: 0000 [#1] SMP'
self.assertEqual(report.standard_title(), 'kernel BUG at /tmp/oops.c:5!')
def test_obsolete_packages(self):
'''obsolete_packages().'''
report = apport.report.Report()
self.assertEqual(report.obsolete_packages(), [])
# should work without Dependencies
report['Package'] = 'bash 0'
self.assertEqual(report.obsolete_packages(), ['bash'])
report['Package'] = 'bash 0 [modified: /bin/bash]'
self.assertEqual(report.obsolete_packages(), ['bash'])
report['Package'] = 'bash ' + apport.packaging.get_available_version('bash')
self.assertEqual(report.obsolete_packages(), [])
report['Dependencies'] = 'coreutils 0\ncron 0\n'
self.assertEqual(report.obsolete_packages(), ['coreutils', 'cron'])
report['Dependencies'] = 'coreutils %s [modified: /bin/mount]\ncron 0\n' % \
apport.packaging.get_available_version('coreutils')
self.assertEqual(report.obsolete_packages(), ['cron'])
report['Dependencies'] = 'coreutils %s\ncron %s\n' % (
apport.packaging.get_available_version('coreutils'),
apport.packaging.get_available_version('cron'))
self.assertEqual(report.obsolete_packages(), [])
def test_gen_stacktrace_top(self):
'''_gen_stacktrace_top().'''
# nothing to chop off
r = apport.report.Report()
r['Stacktrace'] = '''#0 0x10000488 in h (p=0x0) at crash.c:25
#1 0x100004c8 in g (x=1, y=42) at crash.c:26
#2 0x10000514 in f (x=1) at crash.c:27
#3 0x10000530 in e (x=1) at crash.c:28
#4 0x10000530 in d (x=1) at crash.c:29
#5 0x10000530 in c (x=1) at crash.c:30
#6 0x10000550 in main () at crash.c:31
'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '''h (p=0x0) at crash.c:25
g (x=1, y=42) at crash.c:26
f (x=1) at crash.c:27
e (x=1) at crash.c:28
d (x=1) at crash.c:29''')
# nothing to chop off: some addresses missing (LP #269133)
r = apport.report.Report()
r['Stacktrace'] = '''#0 h (p=0x0) at crash.c:25
#1 0x100004c8 in g (x=1, y=42) at crash.c:26
#2 f (x=1) at crash.c:27
#3 0x10000530 in e (x=1) at crash.c:28
#4 0x10000530 in d (x=1) at crash.c:29
#5 0x10000530 in c (x=1) at crash.c:30
#6 0x10000550 in main () at crash.c:31
'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '''h (p=0x0) at crash.c:25
g (x=1, y=42) at crash.c:26
f (x=1) at crash.c:27
e (x=1) at crash.c:28
d (x=1) at crash.c:29''')
# single signal handler invocation
r = apport.report.Report()
r['Stacktrace'] = '''#0 0x10000488 in raise () from /lib/libpthread.so.0
#1 0x100004c8 in ??
#2 <signal handler called>
#3 0x10000530 in e (x=1) at crash.c:28
#4 0x10000530 in d (x=1) at crash.c:29
#5 0x10000530 in c (x=1) at crash.c:30
#6 0x10000550 in main () at crash.c:31
'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '''e (x=1) at crash.c:28
d (x=1) at crash.c:29
c (x=1) at crash.c:30
main () at crash.c:31''')
# single signal handler invocation: some addresses missing
r = apport.report.Report()
r['Stacktrace'] = '''#0 0x10000488 in raise () from /lib/libpthread.so.0
#1 ??
#2 <signal handler called>
#3 0x10000530 in e (x=1) at crash.c:28
#4 d (x=1) at crash.c:29
#5 0x10000530 in c (x=1) at crash.c:30
#6 0x10000550 in main () at crash.c:31
'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '''e (x=1) at crash.c:28
d (x=1) at crash.c:29
c (x=1) at crash.c:30
main () at crash.c:31''')
# stacked signal handler; should only cut the first one
r = apport.report.Report()
r['Stacktrace'] = '''#0 0x10000488 in raise () from /lib/libpthread.so.0
#1 0x100004c8 in ??
#2 <signal handler called>
#3 0x10000530 in e (x=1) at crash.c:28
#4 0x10000530 in d (x=1) at crash.c:29
#5 0x10000123 in raise () from /lib/libpthread.so.0
#6 <signal handler called>
#7 0x10000530 in c (x=1) at crash.c:30
#8 0x10000550 in main () at crash.c:31
'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '''e (x=1) at crash.c:28
d (x=1) at crash.c:29
raise () from /lib/libpthread.so.0
<signal handler called>
c (x=1) at crash.c:30''')
# Gnome assertion; should unwind the logs and assert call
r = apport.report.Report()
r['Stacktrace'] = '''#0 0xb7d39cab in IA__g_logv (log_domain=<value optimized out>, log_level=G_LOG_LEVEL_ERROR,
format=0xb7d825f0 "file %s: line %d (%s): assertion failed: (%s)", args1=0xbfee8e3c "xxx") at /build/buildd/glib2.0-2.13.5/glib/gmessages.c:493
#1 0xb7d39f29 in IA__g_log (log_domain=0xb7edbfd0 "libgnomevfs", log_level=G_LOG_LEVEL_ERROR,
format=0xb7d825f0 "file %s: line %d (%s): assertion failed: (%s)") at /build/buildd/glib2.0-2.13.5/glib/gmessages.c:517
#2 0xb7d39fa6 in IA__g_assert_warning (log_domain=0xb7edbfd0 "libgnomevfs", file=0xb7ee1a26 "gnome-vfs-volume.c", line=254,
pretty_function=0xb7ee1920 "gnome_vfs_volume_unset_drive_private", expression=0xb7ee1a39 "volume->priv->drive == drive")
at /build/buildd/glib2.0-2.13.5/glib/gmessages.c:552
No locals.
#3 0xb7ec6c11 in gnome_vfs_volume_unset_drive_private (volume=0x8081a30, drive=0x8078f00) at gnome-vfs-volume.c:254
__PRETTY_FUNCTION__ = "gnome_vfs_volume_unset_drive_private"
#4 0x08054db8 in _gnome_vfs_volume_monitor_disconnected (volume_monitor=0x8070400, drive=0x8078f00) at gnome-vfs-volume-monitor.c:963
vol_list = (GList *) 0x8096d30
current_vol = (GList *) 0x8097470
#5 0x0805951e in _hal_device_removed (hal_ctx=0x8074da8, udi=0x8093be4 "/org/freedesktop/Hal/devices/volume_uuid_92FC9DFBFC9DDA35")
at gnome-vfs-hal-mounts.c:1316
backing_udi = <value optimized out>
#6 0xb7ef1ead in filter_func (connection=0x8075288, message=0x80768d8, user_data=0x8074da8) at libhal.c:820
udi = <value optimized out>
object_path = 0x8076d40 "/org/freedesktop/Hal/Manager"
error = {name = 0x0, message = 0x0, dummy1 = 1, dummy2 = 0, dummy3 = 0, dummy4 = 1, dummy5 = 0, padding1 = 0xb7e50c00}
#7 0xb7e071d2 in dbus_connection_dispatch (connection=0x8075288) at dbus-connection.c:4267
#8 0xb7e33dfd in ?? () from /usr/lib/libdbus-glib-1.so.2'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '''gnome_vfs_volume_unset_drive_private (volume=0x8081a30, drive=0x8078f00) at gnome-vfs-volume.c:254
_gnome_vfs_volume_monitor_disconnected (volume_monitor=0x8070400, drive=0x8078f00) at gnome-vfs-volume-monitor.c:963
_hal_device_removed (hal_ctx=0x8074da8, udi=0x8093be4 "/org/freedesktop/Hal/devices/volume_uuid_92FC9DFBFC9DDA35")
filter_func (connection=0x8075288, message=0x80768d8, user_data=0x8074da8) at libhal.c:820
dbus_connection_dispatch (connection=0x8075288) at dbus-connection.c:4267''')
# XError (taken from LP#848808)
r = apport.report.Report()
r['Stacktrace'] = '''#0 0x007cf416 in __kernel_vsyscall ()
No symbol table info available.
#1 0x01017c8f in __GI_raise (sig=6) at ../nptl/sysdeps/unix/sysv/linux/raise.c:64
#2 0x0101b2b5 in __GI_abort () at abort.c:92
#3 0x0807daab in meta_bug (format=0x80b0c60 "Unexpected X error: %s serial %ld error_code %d request_code %d minor_code %d)\n") at core/util.c:398
#4 0x0806989c in x_error_handler (error=0xbf924acc, xdisplay=0x9104b88) at core/errors.c:247
#5 x_error_handler (xdisplay=0x9104b88, error=0xbf924acc) at core/errors.c:203
#6 0x00e97d3b in _XError (dpy=0x9104b88, rep=0x9131840) at ../../src/XlibInt.c:1583
#7 0x00e9490d in handle_error (dpy=0x9104b88, err=0x9131840, in_XReply=0) at ../../src/xcb_io.c:212
#8 0x00e94967 in handle_response (dpy=0x9104b88, response=0x9131840, in_XReply=0) at ../../src/xcb_io.c:324
#9 0x00e952fe in _XReadEvents (dpy=0x9104b88) at ../../src/xcb_io.c:425
#10 0x00e93663 in XWindowEvent (dpy=0x9104b88, w=16777220, mask=4194304, event=0xbf924c6c) at ../../src/WinEvent.c:79
#11 0x0806071c in meta_display_get_current_time_roundtrip (display=0x916d7d0) at core/display.c:1217
#12 0x08089f64 in meta_window_show (window=0x91ccfc8) at core/window.c:2165
#13 implement_showing (window=0x91ccfc8, showing=1) at core/window.c:1583
#14 0x080879cc in meta_window_flush_calc_showing (window=0x91ccfc8) at core/window.c:1806'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '''meta_display_get_current_time_roundtrip (display=0x916d7d0) at core/display.c:1217
meta_window_show (window=0x91ccfc8) at core/window.c:2165
implement_showing (window=0x91ccfc8, showing=1) at core/window.c:1583
meta_window_flush_calc_showing (window=0x91ccfc8) at core/window.c:1806''')
# another XError (taken from LP#834403)
r = apport.report.Report()
r['Stacktrace'] = '''#0 g_logv (log_domain=0x7fd41db08a46 "Gdk", log_level=<optimized out>, format=0x7fd41db12e87 "%s", args1=0x7fff50bf0c18) at /build/buildd/glib2.0-2.29.16/./glib/gmessages.c:577
#1 0x00007fd42006bb92 in g_log (log_domain=<optimized out>, log_level=<optimized out>, format=<optimized out>) at /build/buildd/glib2.0-2.29.16/./glib/gmessages.c:591
#2 0x00007fd41dae86f3 in _gdk_x11_display_error_event (display=<optimized out>, error=<optimized out>) at /build/buildd/gtk+3.0-3.1.12/./gdk/x11/gdkdisplay-x11.c:2374
#3 0x00007fd41daf5647 in gdk_x_error (error=0x7fff50bf0dc0, xdisplay=<optimized out>) at /build/buildd/gtk+3.0-3.1.12/./gdk/x11/gdkmain-x11.c:312
#4 gdk_x_error (xdisplay=<optimized out>, error=0x7fff50bf0dc0) at /build/buildd/gtk+3.0-3.1.12/./gdk/x11/gdkmain-x11.c:275
#5 0x00007fd41d5a301f in _XError (dpy=0x2425370, rep=<optimized out>) at ../../src/XlibInt.c:1583
#6 0x00007fd41d59fdd1 in handle_error (dpy=0x2425370, err=0x7fd408707980, in_XReply=<optimized out>) at ../../src/xcb_io.c:212
#7 0x00007fd41d5a0d27 in _XReply (dpy=0x2425370, rep=0x7fff50bf0f60, extra=0, discard=0) at ../../src/xcb_io.c:698
#8 0x00007fd41d5852fb in XGetWindowProperty (dpy=0x2425370, w=0, property=348, offset=0, length=2, delete=<optimized out>, req_type=348, actual_type=0x7fff50bf1038, actual_format=0x7fff50bf105c, nitems=0x7fff50bf1040, bytesafter=0x7fff50bf1048, prop=0x7fff50bf1050) at ../../src/GetProp.c:61
#9 0x00007fd41938269e in window_is_xembed (w=<optimized out>, d=<optimized out>) at canberra-gtk-module.c:373
#10 dispatch_sound_event (d=0x32f6a30) at canberra-gtk-module.c:454
#11 dispatch_queue () at canberra-gtk-module.c:815'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '''XGetWindowProperty (dpy=0x2425370, w=0, property=348, offset=0, length=2, delete=<optimized out>, req_type=348, actual_type=0x7fff50bf1038, actual_format=0x7fff50bf105c, nitems=0x7fff50bf1040, bytesafter=0x7fff50bf1048, prop=0x7fff50bf1050) at ../../src/GetProp.c:61
window_is_xembed (w=<optimized out>, d=<optimized out>) at canberra-gtk-module.c:373
dispatch_sound_event (d=0x32f6a30) at canberra-gtk-module.c:454
dispatch_queue () at canberra-gtk-module.c:815''')
# problem with too old gdb, only assertion, nothing else
r = apport.report.Report()
r['Stacktrace'] = '''#0 0x00987416 in __kernel_vsyscall ()
No symbol table info available.
#1 0x00ebecb1 in *__GI_raise (sig=6)
selftid = 945
#2 0x00ec218e in *__GI_abort () at abort.c:59
save_stage = Unhandled dwarf expression opcode 0x9f
'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '')
# ignore uninteresting frames
r = apport.report.Report()
r['Stacktrace'] = '''#0 0x00987416 in __kernel_vsyscall ()
#1 __strchr_sse42 () at strchr.S:97
#2 h (p=0x0) at crash.c:25
#3 0x100004c8 in g (x=1, y=42) at crash.c:26
#4 0x10000999 in __memmove_ssse3 ()
#5 f (x=1) at crash.c:27
#6 0x10000530 in e (x=1) at crash.c:28
#7 0x10000999 in __strlen_sse2_back () at strchr.S:42
#8 0x10000530 in d (x=1) at crash.c:29
#9 0x10000530 in c (x=1) at crash.c:30
#10 0x10000550 in main () at crash.c:31
'''
r._gen_stacktrace_top()
self.assertEqual(r['StacktraceTop'], '''h (p=0x0) at crash.c:25
g (x=1, y=42) at crash.c:26
f (x=1) at crash.c:27
e (x=1) at crash.c:28
d (x=1) at crash.c:29''')
def test_crash_signature(self):
'''crash_signature().'''
r = apport.report.Report()
self.assertEqual(r.crash_signature(), None)
# signal crashes
r['Signal'] = '42'
r['ExecutablePath'] = '/bin/crash'
r['StacktraceTop'] = '''foo_bar (x=1) at crash.c:28
d01 (x=1) at crash.c:29
raise () from /lib/libpthread.so.0
<signal handler called>
__frob::~frob (x=1) at crash.c:30'''
self.assertEqual(r.crash_signature(), '/bin/crash:42:foo_bar:d01:raise:<signal handler called>:__frob::~frob')
r['StacktraceTop'] = '''foo_bar (x=1) at crash.c:28
??
raise () from /lib/libpthread.so.0
<signal handler called>
__frob (x=1) at crash.c:30'''
self.assertEqual(r.crash_signature(), None)
r['StacktraceTop'] = ''
self.assertEqual(r.crash_signature(), None)
# Python crashes
del r['Signal']
r['Traceback'] = '''Traceback (most recent call last):
File "test.py", line 7, in <module>
print(_f(5))
File "test.py", line 5, in _f
return g_foo00(x+1)
File "test.py", line 2, in g_foo00
return x/0
ZeroDivisionError: integer division or modulo by zero'''
self.assertEqual(r.crash_signature(), '/bin/crash:ZeroDivisionError:test.py@7:_f:g_foo00')
# sometimes Python traces do not have file references
r['Traceback'] = 'TypeError: function takes exactly 0 arguments (1 given)'
self.assertEqual(r.crash_signature(), '/bin/crash:TypeError')
r['Traceback'] = 'FooBar'
self.assertEqual(r.crash_signature(), None)
# kernel
r['ProblemType'] = 'KernelCrash'
r['Stacktrace'] = '''
crash 4.0-8.9
GNU gdb 6.1
GDB is free software, covered by the GNU General Public License, and you are
welcome to change it and/or distribute copies of it under certain conditions.
Type "show copying" to see the conditions.
There is absolutely no warranty for GDB. Type "show warranty" for details.
This GDB was configured as "i686-pc-linux-gnu"...
KERNEL: /usr/lib/debug/boot/vmlinux-2.6.31-2-generic
DUMPFILE: /tmp/tmpRJZy_O
CPUS: 1
DATE: Thu Jul 9 12:58:08 2009
UPTIME: 00:00:57
LOAD AVERAGE: 0.15, 0.05, 0.02
TASKS: 173
NODENAME: egon-desktop
RELEASE: 2.6.31-2-generic
VERSION: #16-Ubuntu SMP Mon Jul 6 20:38:51 UTC 2009
MACHINE: i686 (2137 Mhz)
MEMORY: 2 GB
PANIC: "[ 57.879776] Oops: 0002 [#1] SMP " (check log for details)
PID: 0
COMMAND: "swapper"
TASK: c073c180 [THREAD_INFO: c0784000]
CPU: 0
STATE: TASK_RUNNING (PANIC)
PID: 0 TASK: c073c180 CPU: 0 COMMAND: "swapper"
#0 [c0785ba0] sysrq_handle_crash at c03917a3
[RA: c03919c6 SP: c0785ba0 FP: c0785ba0 SIZE: 4]
c0785ba0: c03919c6
#1 [c0785ba0] __handle_sysrq at c03919c4
[RA: c0391a91 SP: c0785ba4 FP: c0785bc8 SIZE: 40]
c0785ba4: c06d4bab c06d42d2 f6534000 00000004
c0785bb4: 00000086 0000002e 00000001 f6534000
c0785bc4: c0785bcc c0391a91
#2 [c0785bc8] handle_sysrq at c0391a8c
[RA: c0389961 SP: c0785bcc FP: c0785bd0 SIZE: 8]
c0785bcc: c0785c0c c0389961
#3 [c0785bd0] kbd_keycode at c038995c
[RA: c0389b8b SP: c0785bd4 FP: c0785c10 SIZE: 64]
c0785bd4: c056f96a c0785be4 00000096 c07578c0
c0785be4: 00000001 f6ac6e00 f6ac6e00 00000001
c0785bf4: 00000000 00000000 0000002e 0000002e
c0785c04: 00000001 f70d6850 c0785c1c c0389b8b
#4 [c0785c10] kbd_event at c0389b86
[RA: c043140c SP: c0785c14 FP: c0785c20 SIZE: 16]
c0785c14: c0758040 f6910900 c0785c3c c043140c
#5 [c0785c20] input_pass_event at c0431409
[RA: c04332ce SP: c0785c24 FP: c0785c40 SIZE: 32]
c0785c24: 00000001 0000002e 00000001 f70d6000
c0785c34: 00000001 0000002e c0785c64 c04332ce
#6 [c0785c40] input_handle_event at c04332c9
[RA: c0433ac6 SP: c0785c44 FP: c0785c68 SIZE: 40]
c0785c44: 00000001 ffff138d 0000003d 00000001
c0785c54: f70d6000 00000001 f70d6000 0000002e
c0785c64: c0785c84 c0433ac6
#7 [c0785c68] input_event at c0433ac1
[RA: c0479806 SP: c0785c6c FP: c0785c88 SIZE: 32]
c0785c6c: 00000001 00000092 f70d677c f70d70b4
c0785c7c: 0000002e f70d7000 c0785ca8 c0479806
#8 [c0785c88] hidinput_hid_event at c0479801
[RA: c0475b31 SP: c0785c8c FP: c0785cac SIZE: 36]
c0785c8c: 00000001 00000007 c0785c00 f70d6000
c0785c9c: f70d70b4 f70d5000 f70d7000 c0785cc4
c0785cac: c0475b31
[RA: 0 SP: c0785ffc FP: c0785ffc SIZE: 0]
PID PPID CPU TASK ST %MEM VSZ RSS COMM
> 0 0 0 c073c180 RU 0.0 0 0 [swapper]
1 0 1 f7038000 IN 0.1 3096 1960 init
2 0 0 f7038c90 IN 0.0 0 0 [kthreadd]
271 2 1 f72bf110 IN 0.0 0 0 [bluetooth]
325 2 1 f71c25b0 IN 0.0 0 0 [khungtaskd]
1404 2 0 f6b5bed0 IN 0.0 0 0 [kpsmoused]
1504 2 1 f649cb60 IN 0.0 0 0 [hd-audio0]
2055 1 0 f6a18000 IN 0.0 1824 536 getty
2056 1 0 f6a1d7f0 IN 0.0 1824 536 getty
2061 1 0 f6a1f110 IN 0.1 3132 1604 login
2062 1 1 f6a18c90 IN 0.0 1824 540 getty
2063 1 1 f6b58c90 IN 0.0 1824 540 getty
2130 1 0 f6b5f110 IN 0.0 2200 1032 acpid
2169 1 0 f69ebed0 IN 0.0 2040 664 syslogd
2192 1 1 f65b3ed0 IN 0.0 1976 532 dd
2194 1 1 f6b5a5b0 IN 0.1 3996 2712 klogd
2217 1 0 f6b74b60 IN 0.1 3008 1120 dbus-daemon
2248 1 0 f65b7110 IN 0.2 6896 4304 hald
2251 1 1 f65b3240 IN 0.1 19688 2604 console-kit-dae
RUNQUEUES[0]: c6002320
RT PRIO_ARRAY: c60023c0
CFS RB_ROOT: c600237c
PID: 9 TASK: f703f110 CPU: 0 COMMAND: "events/0"
'''
self.assertEqual(r.crash_signature(), 'kernel:sysrq_handle_crash:__handle_sysrq:handle_sysrq:kbd_keycode:kbd_event:input_pass_event:input_handle_event:input_event:hidinput_hid_event')
# assertion failures
r = apport.report.Report()
r['Signal'] = '6'
r['ExecutablePath'] = '/bin/bash'
r['AssertionMessage'] = 'foo.c:42 main: i > 0'
self.assertEqual(r.crash_signature(), '/bin/bash:foo.c:42 main: i > 0')
# kernel oops
report = apport.report.Report('KernelOops')
report['OopsText'] = '''
BUG: unable to handle kernel paging request at ffffb4ff
IP: [<c11e4690>] ext4_get_acl+0x80/0x210
*pde = 01874067 *pte = 00000000
Oops: 0000 [#1] SMP
Modules linked in: bnep rfcomm bluetooth dm_crypt olpc_xo1 scx200_acb snd_cs5535audio snd_ac97_codec ac97_bus snd_pcm snd_seq_midi snd_rawmidi snd_seq_midi_event snd_seq snd_timer snd_seq_device snd cs5535_gpio soundcore snd_page_alloc binfmt_misc geode_aes cs5535_mfd geode_rng msr vesafb usbhid hid 8139too pata_cs5536 8139cp
Pid: 1798, comm: gnome-session-c Not tainted 3.0.0-11-generic #17-Ubuntu First International Computer, Inc. ION603/ION603
EIP: 0060:[<c11e4690>] EFLAGS: 00010286 CPU: 0
EIP is at ext4_get_acl+0x80/0x210
EAX: f5d3009c EBX: f5d30088 ECX: 00000000 EDX: f5d301d8
ESI: ffffb4ff EDI: 00008000 EBP: f29b3dc8 ESP: f29b3da4
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
Process gnome-session-c (pid: 1798, ti=f29b2000 task=f2bd72c0 task.ti=f29b2000)
Stack:
f29b3db0 c113bb90 f5d301d8 f29b3de4 c11b9016 f5d3009c f5d30088 f5d30088
00000001 f29b3ddc c11e4cca 00000001 f5d30088 000081ed f29b3df0 c11313b7
00000021 00000021 f5d30088 f29b3e08 c1131b45 c11e4c80 f5d30088 00000021
Call Trace:
[<c113bb90>] ? d_splice_alias+0x40/0x50
[<c11b9016>] ? ext4_lookup.part.30+0x56/0x120
[<c11e4cca>] ext4_check_acl+0x4a/0x90
[<c11313b7>] acl_permission_check+0x97/0xa0
[<c1131b45>] generic_permission+0x25/0xc0
[<c11e4c80>] ? ext4_xattr_set_acl+0x160/0x160
[<c1131c79>] inode_permission+0x99/0xd0
[<c11e4c80>] ? ext4_xattr_set_acl+0x160/0x160
[<c1131d1b>] may_open+0x6b/0x110
[<c1134566>] do_last+0x1a6/0x640
[<c113595d>] path_openat+0x9d/0x350
[<c10de692>] ? unlock_page+0x42/0x50
[<c10fb960>] ? __do_fault+0x3b0/0x4b0
[<c1135c41>] do_filp_open+0x31/0x80
[<c124c743>] ? aa_dup_task_context+0x33/0x60
[<c1250eed>] ? apparmor_cred_prepare+0x2d/0x50
[<c112e9ef>] open_exec+0x2f/0x110
[<c112eef7>] ? check_unsafe_exec+0xb7/0xf0
[<c112efba>] do_execve_common+0x8a/0x270
[<c112f1b7>] do_execve+0x17/0x20
[<c100a0a7>] sys_execve+0x37/0x70
[<c15336ae>] ptregs_execve+0x12/0x18
[<c152c8d4>] ? syscall_call+0x7/0xb
Code: 8d 76 00 8d 93 54 01 00 00 8b 32 85 f6 74 e2 8d 43 14 89 55 e4 89 45 f0 e8 2e 7e 34 00 8b 55 e4 8b 32 83 fe ff 74 07 85 f6 74 03 <3e> ff 06 8b 45 f0 e8 25 19 e4 ff 90 83 fe ff 75 b5 81 ff 00 40
EIP: [<c11e4690>] ext4_get_acl+0x80/0x210 SS:ESP 0068:f29b3da4
CR2: 00000000ffffb4ff
---[ end trace b567e6a3070ffb42 ]---
'''
self.assertEqual(report.crash_signature(), 'kernel paging request:ext4_get_acl+0x80/0x210:ext4_check_acl+0x4a/0x90:acl_permission_check+0x97/0xa0:generic_permission+0x25/0xc0:inode_permission+0x99/0xd0:may_open+0x6b/0x110:do_last+0x1a6/0x640:path_openat+0x9d/0x350:do_filp_open+0x31/0x80:open_exec+0x2f/0x110:do_execve_common+0x8a/0x270:do_execve+0x17/0x20:sys_execve+0x37/0x70:ptregs_execve+0x12/0x18')
def test_nonascii_data(self):
'''methods get along with non-ASCII data'''
# fake os.uname() into reporting a non-ASCII name
uname = os.uname()
uname = (uname[0], b't\xe2\x99\xaax'.decode('UTF-8'), uname[2], uname[3], uname[4])
orig_uname = os.uname
os.uname = lambda: uname
try:
pr = apport.report.Report()
utf8_val = b'\xc3\xa4 ' + uname[1].encode('UTF-8') + b' \xe2\x99\xa5 '
pr['ProcUnicodeValue'] = utf8_val.decode('UTF-8')
pr['ProcByteArrayValue'] = utf8_val
pr.anonymize()
exp_utf8 = b'\xc3\xa4 hostname \xe2\x99\xa5 '
self.assertEqual(pr['ProcUnicodeValue'], exp_utf8.decode('UTF-8'))
self.assertEqual(pr['ProcByteArrayValue'], exp_utf8)
finally:
os.uname = orig_uname
def test_address_to_offset(self):
'''_address_to_offset()'''
pr = apport.report.Report()
self.assertRaises(AssertionError, pr._address_to_offset, 0)
pr['ProcMaps'] = '''
00400000-004df000 r-xp 00000000 08:02 1044485 /bin/bash
006de000-006df000 r--p 000de000 08:02 1044485 /bin/bash
01596000-01597000 rw-p 00000000 00:00 0
01597000-015a4000 rw-p 00000000 00:00 0 [heap]
7f491f868000-7f491f88a000 r-xp 00000000 08:02 526219 /lib/x86_64-linux-gnu/libtinfo.so.5.9
7f491fa8f000-7f491fc24000 r-xp 00000000 08:02 522605 /lib/x86_64-linux-gnu/libc-2.13.so
7f491fc24000-7f491fe23000 ---p 00195000 08:02 522605 /lib/with spaces !/libfoo.so
7fff6e57b000-7fff6e59c000 rw-p 00000000 00:00 0 [stack]
7fff6e5ff000-7fff6e600000 r-xp 00000000 00:00 0 [vdso]
ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
'''
self.assertEqual(pr._address_to_offset(0x41d703), '/bin/bash+1d703')
self.assertEqual(pr._address_to_offset(0x00007f491fac5687),
'/lib/x86_64-linux-gnu/libc-2.13.so+36687')
self.assertEqual(pr._address_to_offset(0x006ddfff), None)
self.assertEqual(pr._address_to_offset(0x006de000), '/bin/bash+0')
self.assertEqual(pr._address_to_offset(0x006df000), '/bin/bash+1000')
self.assertEqual(pr._address_to_offset(0x006df001), None)
self.assertEqual(pr._address_to_offset(0), None)
self.assertEqual(pr._address_to_offset(0x10), None)
self.assertEqual(pr._address_to_offset(0x7f491fc24010),
'/lib/with spaces !/libfoo.so+10')
def test_address_to_offset_arm(self):
'''_address_to_offset() for ARM /proc/pid/maps'''
pr = apport.report.Report()
pr['ProcMaps'] = '''
00008000-0000e000 r-xp 00000000 08:01 13243326 /usr/lib/dconf/dconf-service
00017000-00038000 rw-p 00000000 00:00 0 [heap]
40017000-4001d000 rw-p 00000000 00:00 0
40026000-400f2000 r-xp 00000000 08:01 13110792 /usr/lib/arm-linux-gnueabihf/libgio-2.0.so.0.3400.0
400f2000-400f9000 ---p 000cc000 08:01 13110792 /usr/lib/arm-linux-gnueabihf/libgio-2.0.so.0.3400.0
4020d000-4020f000 rw-p 00000000 00:00 0
4020f000-402e5000 r-xp 00000000 08:01 13108294 /lib/arm-linux-gnueabihf/libc-2.15.so
402e5000-402ed000 ---p 000d6000 08:01 13108294 /lib/arm-linux-gnueabihf/libc-2.15.so
40d21000-40e00000 ---p 00000000 00:00 0
befdf000-bf000000 rw-p 00000000 00:00 0 [stack]
ffff0000-ffff1000 r-xp 00000000 00:00 0 [vectors]
'''
self.assertEqual(pr._address_to_offset(0x402261e6),
'/lib/arm-linux-gnueabihf/libc-2.15.so+171e6')
self.assertEqual(pr._address_to_offset(0x4002601F),
'/usr/lib/arm-linux-gnueabihf/libgio-2.0.so.0.3400.0+1f')
def test_address_to_offset_live(self):
'''_address_to_offset() for current /proc/pid/maps'''
# this primarily checks that the parser actually gets along with the
# real /proc/pid/maps and not just with our static test case above
pr = apport.report.Report()
pr.add_proc_info()
self.assertEqual(pr._address_to_offset(0), None)
res = pr._address_to_offset(int(pr['ProcMaps'].split('-', 1)[0], 16) + 5)
self.assertEqual(res.split('+', 1)[1], '5')
self.assertTrue('python' in res.split('+', 1)[0])
def test_crash_signature_addresses(self):
'''crash_signature_addresses()'''
pr = apport.report.Report()
self.assertEqual(pr.crash_signature_addresses(), None)
pr['ExecutablePath'] = '/bin/bash'
pr['Signal'] = '42'
pr['ProcMaps'] = '''
00400000-004df000 r-xp 00000000 08:02 1044485 /bin/bash
006de000-006df000 r--p 000de000 08:02 1044485 /bin/bash
01596000-01597000 rw-p 00000000 00:00 0
01597000-015a4000 rw-p 00000000 00:00 0 [heap]
7f491f868000-7f491f88a000 r-xp 00000000 08:02 526219 /lib/x86_64-linux-gnu/libtinfo.so.5.9
7f491fa8f000-7f491fc24000 r-xp 00000000 08:02 522605 /lib/x86_64-linux-gnu/libc-2.13.so
7f491fc24000-7f491fe23000 ---p 00195000 08:02 522605 /lib/with spaces !/libfoo.so
7fff6e57b000-7fff6e59c000 rw-p 00000000 00:00 0 [stack]
7fff6e5ff000-7fff6e600000 r-xp 00000000 00:00 0 [vdso]
ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
'''
# no Stacktrace field
self.assertEqual(pr.crash_signature_addresses(), None)
# good stack trace
pr['Stacktrace'] = '''
#0 0x00007f491fac5687 in kill () at ../sysdeps/unix/syscall-template.S:82
No locals.
#1 0x000000000043fd51 in kill_pid ()
#2 g_main_context_iterate (context=0x1731680) at gmain.c:3068
#3 0x000000000042eb76 in ?? ()
#4 0x00000000004324d8 in ??
No symbol table info available.
#5 0x00000000004707e3 in parse_and_execute ()
#6 0x000000000041d703 in _start ()
'''
self.assertEqual(pr.crash_signature_addresses(),
'/bin/bash:42:/lib/x86_64-linux-gnu/libc-2.13.so+36687:/bin/bash+3fd51:/bin/bash+2eb76:/bin/bash+324d8:/bin/bash+707e3:/bin/bash+1d703')
# all resolvable, but too short
pr['Stacktrace'] = '#0 0x00007f491fac5687 in kill () at ../sysdeps/unix/syscall-template.S:82'
self.assertEqual(pr.crash_signature_addresses(), None)
# one unresolvable, but long enough
pr['Stacktrace'] = '''
#0 0x00007f491fac5687 in kill () at ../sysdeps/unix/syscall-template.S:82
No locals.
#1 0x000001000043fd51 in kill_pid ()
#2 g_main_context_iterate (context=0x1731680) at gmain.c:3068
#3 0x000000000042eb76 in ?? ()
#4 0x00000000004324d8 in ??
No symbol table info available.
#5 0x00000000004707e3 in parse_and_execute ()
#6 0x000000000041d715 in main ()
#7 0x000000000041d703 in _start ()
'''
sig = pr.crash_signature_addresses()
self.assertNotEqual(sig, None)
# one true unresolvable, and some "low address" artifacts; should be
# identical to the one above
pr['Stacktrace'] = '''
#0 0x00007f491fac5687 in kill () at ../sysdeps/unix/syscall-template.S:82
No locals.
#1 0x000001000043fd51 in kill_pid ()
#2 0x0000000000000010 in ??
#3 g_main_context_iterate (context=0x1731680) at gmain.c:3068
#4 0x000000000042eb76 in ?? ()
#5 0x0000000000000000 in ?? ()
#6 0x0000000000000421 in ?? ()
#7 0x00000000004324d8 in ??
No symbol table info available.
#8 0x00000000004707e3 in parse_and_execute ()
#9 0x000000000041d715 in main ()
#10 0x000000000041d703 in _start ()
'''
self.assertEqual(pr.crash_signature_addresses(), sig)
# two unresolvables, 2/7 is too much
pr['Stacktrace'] = '''
#0 0x00007f491fac5687 in kill () at ../sysdeps/unix/syscall-template.S:82
No locals.
#1 0x000001000043fd51 in kill_pid ()
#2 g_main_context_iterate (context=0x1731680) at gmain.c:3068
#3 0x000001000042eb76 in ?? ()
#4 0x00000000004324d8 in ??
No symbol table info available.
#5 0x00000000004707e3 in parse_and_execute ()
#6 0x000000000041d715 in main ()
#7 0x000000000041d703 in _start ()
'''
self.assertEqual(pr.crash_signature_addresses(), None)
def test_missing_uid(self):
'''check_ignored() works for removed user'''
orig_getuid = os.getuid
os.getuid = lambda: 123456789
try:
pr = apport.report.Report()
pr['ExecutablePath'] = '/bin/bash'
pr.check_ignored()
finally:
os.getuid = orig_getuid
def test_suspend_resume(self):
pr = apport.report.Report()
pr['ProblemType'] = 'KernelOops'
pr['Failure'] = 'suspend/resume'
pr['MachineType'] = 'Cray XT5'
pr['dmi.bios.version'] = 'ABC123 (1.0)'
expected = 'suspend/resume:Cray XT5:ABC123 (1.0)'
self.assertEqual(expected, pr.crash_signature())
# There will not always be a BIOS version
del pr['dmi.bios.version']
expected = 'suspend/resume:Cray XT5'
self.assertEqual(expected, pr.crash_signature())
def test_get_logind_session(self):
ret = apport.Report.get_logind_session(os.getpid())
if ret is None:
# ensure that we don't run under logind, and thus the None is
# justified
with open('/proc/self/cgroup') as f:
contents = f.read()
sys.stdout.write('[not running under logind] ')
sys.stdout.flush()
self.assertNotIn('name=systemd:/user', contents)
return
(session, timestamp) = ret
self.assertNotEqual(session, '')
# session start must be >= 2014-01-01 and "now"
self.assertLess(timestamp, time.time())
self.assertGreater(timestamp,
time.mktime(time.strptime('2014-01-01', '%Y-%m-%d')))
def test_get_timestamp(self):
r = apport.Report()
self.assertAlmostEqual(r.get_timestamp(), time.time(), delta=2)
r['Date'] = 'Thu Jan 9 12:00:00 2014'
# delta is ±12 hours, as this depends on the timezone that the test is
# run in
self.assertAlmostEqual(r.get_timestamp(), 1389265200.0, delta=43200)
del r['Date']
self.assertEqual(r.get_timestamp(), None)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
jmartinm/invenio-master | modules/bibindex/lib/tokenizers/BibIndexAuthorCountTokenizer.py | 8 | 2105 | # -*- coding:utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibIndexAuthorCountTokenizer: counts number of authors for any publication
given by recID. Will look at tags: '100_a' and '700_a' which are:
'first author name' and 'additional author name'.
"""
from invenio.bibindex_engine_utils import get_field_count
from invenio.bibindex_tokenizers.BibIndexMultiFieldTokenizer import BibIndexMultiFieldTokenizer
class BibIndexAuthorCountTokenizer(BibIndexMultiFieldTokenizer):
"""
Returns a number of authors who created a publication
with given recID in the database.
Takes recID of the record as an argument to tokenizing function.
Calculates terms based on information from multiple tags.
For more information on this type of tokenizers take a look on
BibIndexAuthorCountTokenizer base class.
"""
def __init__(self, stemming_language = None, remove_stopwords = False, remove_html_markup = False, remove_latex_markup = False):
self.tags = ['100__a', '700__a']
def tokenize(self, recID):
"""Uses get_field_count from bibindex_engine_utils
for finding a number of authors of a publication and pass it in the list"""
return [str(get_field_count(recID, self.tags)),]
def get_tokenizing_function(self, wordtable_type):
return self.tokenize
| gpl-2.0 |
lvdongr/spark | examples/src/main/python/mllib/tf_idf_example.py | 126 | 2066 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.feature import HashingTF, IDF
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="TFIDFExample") # SparkContext
# $example on$
# Load documents (one per line).
documents = sc.textFile("data/mllib/kmeans_data.txt").map(lambda line: line.split(" "))
hashingTF = HashingTF()
tf = hashingTF.transform(documents)
# While applying HashingTF only needs a single pass to the data, applying IDF needs two passes:
# First to compute the IDF vector and second to scale the term frequencies by IDF.
tf.cache()
idf = IDF().fit(tf)
tfidf = idf.transform(tf)
# spark.mllib's IDF implementation provides an option for ignoring terms
# which occur in less than a minimum number of documents.
# In such cases, the IDF for these terms is set to 0.
# This feature can be used by passing the minDocFreq value to the IDF constructor.
idfIgnore = IDF(minDocFreq=2).fit(tf)
tfidfIgnore = idfIgnore.transform(tf)
# $example off$
print("tfidf:")
for each in tfidf.collect():
print(each)
print("tfidfIgnore:")
for each in tfidfIgnore.collect():
print(each)
sc.stop()
| apache-2.0 |
jlegendary/servo | python/mach/mach/commands/settings.py | 96 | 1799 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
from textwrap import TextWrapper
from mach.decorators import (
CommandProvider,
Command,
)
#@CommandProvider
class Settings(object):
"""Interact with settings for mach.
Currently, we only provide functionality to view what settings are
available. In the future, this module will be used to modify settings, help
people create configs via a wizard, etc.
"""
def __init__(self, context):
self.settings = context.settings
@Command('settings-list', category='devenv',
description='Show available config settings.')
def list_settings(self):
"""List available settings in a concise list."""
for section in sorted(self.settings):
for option in sorted(self.settings[section]):
short, full = self.settings.option_help(section, option)
print('%s.%s -- %s' % (section, option, short))
@Command('settings-create', category='devenv',
description='Print a new settings file with usage info.')
def create(self):
"""Create an empty settings file with full documentation."""
wrapper = TextWrapper(initial_indent='# ', subsequent_indent='# ')
for section in sorted(self.settings):
print('[%s]' % section)
print('')
for option in sorted(self.settings[section]):
short, full = self.settings.option_help(section, option)
print(wrapper.fill(full))
print(';%s =' % option)
print('')
| mpl-2.0 |
lmazuel/ansible | lib/ansible/executor/task_queue_manager.py | 7 | 14616 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.stats import AggregateStats
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader, module_loader
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
from ansible.utils.helpers import pct_to_int
from ansible.vars.hostvars import HostVars
from ansible.vars.reserved import warn_if_reserved
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
RUN_OK = 0
RUN_ERROR = 1
RUN_FAILED_HOSTS = 2
RUN_UNREACHABLE_HOSTS = 4
RUN_FAILED_BREAK_PLAY = 8
RUN_UNKNOWN_ERROR = 255
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
# make sure the module path (if specified) is parsed and
# added to the module_loader object
if options.module_path is not None:
for path in options.module_path.split(os.pathsep):
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
self._listening_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
self._final_q = multiprocessing.Queue()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
rslt_q = multiprocessing.Queue()
self._workers.append([None, rslt_q])
def _initialize_notified_handlers(self, play):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
self._notified_handlers.clear()
self._listening_handlers.clear()
def _process_block(b):
temp_list = []
for t in b.block:
if isinstance(t, Block):
temp_list.extend(_process_block(t))
else:
temp_list.append(t)
return temp_list
handler_list = []
for handler_block in play.handlers:
handler_list.extend(_process_block(handler_block))
# then initialize it with the given handler list
for handler in handler_list:
if handler._uuid not in self._notified_handlers:
self._notified_handlers[handler._uuid] = []
if handler.listen:
listeners = handler.listen
if not isinstance(listeners, list):
listeners = [listeners]
for listener in listeners:
if listener not in self._listening_handlers:
self._listening_handlers[listener] = []
self._listening_handlers[listener].append(handler._uuid)
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
pass
elif not self._run_additional_callbacks or (callback_needs_whitelist and (
C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
continue
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(play=play)
warn_if_reserved(all_vars)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
new_play.handlers = new_play.compile_roles_handlers() + new_play.handlers
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
# Fork # of forks, # of hosts or serial, whichever is lowest
num_hosts = len(self._inventory.get_hosts(new_play.hosts, ignore_restrictions=True))
max_serial = 0
if new_play.serial:
# the play has not been post_validated here, so we may need
# to convert the scalar value to a list at this point
serial_items = new_play.serial
if not isinstance(serial_items, list):
serial_items = [serial_items]
max_serial = max([pct_to_int(x, num_hosts) for x in serial_items])
contenders = [self._options.forks, max_serial, num_hosts]
contenders = [v for v in contenders if v is not None and v > 0]
self._initialize_processes(min(contenders))
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done=self._start_at_done,
)
# Because the TQM may survive multiple play runs, we start by marking
# any hosts as failed in the iterator here which may have been marked
# as failed in previous runs. Then we clear the internal list of failed
# hosts so we know what failed this round.
for host_name in self._failed_hosts.keys():
host = self._inventory.get_host(host_name)
iterator.mark_host_failed(host)
self.clear_failed_hosts()
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
play_return = strategy.run(iterator, play_context)
# now re-save the hosts that failed from the iterator to our internal list
for host_name in iterator.get_failed_hosts():
self._failed_hosts[host_name] = True
strategy.cleanup()
self._cleanup_processes()
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
def _cleanup_processes(self):
if hasattr(self, '_workers'):
for (worker_prc, rslt_q) in self._workers:
rslt_q.close()
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def has_dead_workers(self):
# [<WorkerProcess(WorkerProcess-2, stopped[SIGKILL])>,
# <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
defunct = False
for (idx, x) in enumerate(self._workers):
if hasattr(x[0], 'exitcode'):
if x[0].exitcode in [-9, -11, -15]:
defunct = True
return defunct
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.replace('v2_', ''), None)
if gotit is not None:
methods.append(gotit)
for method in methods:
try:
method(*args, **kwargs)
except Exception as e:
# TODO: add config toggle to make this fatal or not?
display.warning(u"Failure using method (%s) in callback plugin (%s): %s" % (to_text(method_name), to_text(callback_plugin), to_text(e)))
from traceback import format_tb
from sys import exc_info
display.debug('Callback Exception: \n' + ' '.join(format_tb(exc_info()[2])))
| gpl-3.0 |
waytai/odoo | addons/project/report/project_report.py | 279 | 5789 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class report_project_task_user(osv.osv):
_name = "report.project.task.user"
_description = "Tasks by user and project"
_auto = False
_columns = {
'name': fields.char('Task Summary', readonly=True),
'user_id': fields.many2one('res.users', 'Assigned To', readonly=True),
'reviewer_id': fields.many2one('res.users', 'Reviewer', readonly=True),
'date_start': fields.datetime('Assignation Date', readonly=True),
'no_of_days': fields.integer('# of Days', size=128, readonly=True),
'date_end': fields.datetime('Ending Date', readonly=True),
'date_deadline': fields.date('Deadline', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'project_id': fields.many2one('project.project', 'Project', readonly=True),
'hours_planned': fields.float('Planned Hours', readonly=True),
'hours_effective': fields.float('Effective Hours', readonly=True),
'hours_delay': fields.float('Avg. Plan.-Eff.', readonly=True),
'remaining_hours': fields.float('Remaining Hours', readonly=True),
'progress': fields.float('Progress', readonly=True, group_operator='avg'),
'total_hours': fields.float('Total Hours', readonly=True),
'closing_days': fields.float('Days to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the task"),
'opening_days': fields.float('Days to Assign', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to Open the task"),
'delay_endings_days': fields.float('Overpassed Deadline', digits=(16,2), readonly=True),
'nbr': fields.integer('# of Tasks', readonly=True), # TDE FIXME master: rename into nbr_tasks
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')],
string='Priority', size=1, readonly=True),
'state': fields.selection([('normal', 'In Progress'),('blocked', 'Blocked'),('done', 'Ready for next stage')],'Status', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly=True),
'stage_id': fields.many2one('project.task.type', 'Stage'),
}
_order = 'name desc, project_id'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'report_project_task_user')
cr.execute("""
CREATE view report_project_task_user as
SELECT
(select 1 ) AS nbr,
t.id as id,
t.date_start as date_start,
t.date_end as date_end,
t.date_last_stage_update as date_last_stage_update,
t.date_deadline as date_deadline,
abs((extract('epoch' from (t.write_date-t.date_start)))/(3600*24)) as no_of_days,
t.user_id,
t.reviewer_id,
progress as progress,
t.project_id,
t.effective_hours as hours_effective,
t.priority,
t.name as name,
t.company_id,
t.partner_id,
t.stage_id as stage_id,
t.kanban_state as state,
remaining_hours as remaining_hours,
total_hours as total_hours,
t.delay_hours as hours_delay,
planned_hours as hours_planned,
(extract('epoch' from (t.write_date-t.create_date)))/(3600*24) as closing_days,
(extract('epoch' from (t.date_start-t.create_date)))/(3600*24) as opening_days,
(extract('epoch' from (t.date_deadline-(now() at time zone 'UTC'))))/(3600*24) as delay_endings_days
FROM project_task t
WHERE t.active = 'true'
GROUP BY
t.id,
remaining_hours,
t.effective_hours,
progress,
total_hours,
planned_hours,
hours_delay,
create_date,
write_date,
date_start,
date_end,
date_deadline,
date_last_stage_update,
t.user_id,
t.reviewer_id,
t.project_id,
t.priority,
name,
t.company_id,
t.partner_id,
stage_id
""")
| agpl-3.0 |
joergpatz/PokemonGo-Bot | pokemongo_bot/test/follow_cluster_test.py | 18 | 2041 | import unittest, pickle, os
from mock import patch
from pokemongo_bot.cell_workers.follow_cluster import FollowCluster
class FollowClusterTestCase(unittest.TestCase):
@patch('pokemongo_bot.PokemonGoBot')
def testWorkAway(self, mock_pokemongo_bot):
forts_path = os.path.join(os.path.dirname(__file__), 'resources', 'example_forts.pickle')
with open(forts_path, 'rb') as forts:
ex_forts = pickle.load(forts)
config = {'radius': 50, 'lured': False}
mock_pokemongo_bot.position = (37.396787, -5.994587)
mock_pokemongo_bot.config.walk = 4.16
mock_pokemongo_bot.get_forts.return_value = ex_forts
follow_cluster = FollowCluster(mock_pokemongo_bot, config)
expected = (37.397183750142624, -5.9932912500000013)
result = follow_cluster.work()
self.assertAlmostEqual(expected[0], result[0], delta=0.000000000010000)
self.assertAlmostEqual(expected[1], result[1], delta=0.000000000010000)
assert follow_cluster.is_at_destination == False
assert follow_cluster.announced == False
@patch('pokemongo_bot.PokemonGoBot')
def testWorkArrived(self, mock_pokemongo_bot):
forts_path = os.path.join(os.path.dirname(__file__), 'resources', 'example_forts.pickle')
with open(forts_path, 'rb') as forts:
ex_forts = pickle.load(forts)
config = {'radius': 50, 'lured': False}
mock_pokemongo_bot.position = (37.39718375014263, -5.9932912500000013)
mock_pokemongo_bot.config.walk = 4.16
mock_pokemongo_bot.get_forts.return_value = ex_forts
follow_cluster = FollowCluster(mock_pokemongo_bot, config)
expected = (37.397183750142624, -5.9932912500000013)
result = follow_cluster.work()
self.assertAlmostEqual(expected[0], result[0], delta=0.000000000010000)
self.assertAlmostEqual(expected[1], result[1], delta=0.000000000010000)
assert follow_cluster.is_at_destination == True
assert follow_cluster.announced == False
| mit |
imincik/pkg-qgis-1.8 | python/plugins/fTools/tools/doGeoprocessing.py | 1 | 66760 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# fTools
# Copyright (C) 2008-2011 Carson Farmer
# EMAIL: carson.farmer (at) gmail.com
# WEB : http://www.ftools.ca/fTools.html
#
# A collection of data management and analysis tools for vector data
#
# Geoprocessing functions adapted from 'Geoprocessing Plugin',
# (C) 2008 by Dr. Horst Duester, Stefan Ziegler
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from ui_frmGeoprocessing import Ui_Dialog
import ftools_utils
import sys
class GeoprocessingDialog( QDialog, Ui_Dialog ):
def __init__( self, iface, function ):
QDialog.__init__( self, iface.mainWindow() )
self.iface = iface
self.setupUi( self )
self.param.setValidator(QDoubleValidator(self.param))
self.myFunction = function
QObject.connect( self.btnBrowse, SIGNAL( "clicked()" ), self.outFile )
QObject.connect( self.inShapeA, SIGNAL( "currentIndexChanged(QString)" ), self.checkA )
QObject.connect( self.inShapeB, SIGNAL( "currentIndexChanged(QString)" ), self.checkB )
if function == 4 or function == 1 or function == 2:
QObject.connect( self.inShapeA, SIGNAL( "currentIndexChanged(QString)" ), self.update )
self.manageGui()
self.success = False
self.cancel_close = self.buttonBox_2.button( QDialogButtonBox.Close )
self.buttonOk = self.buttonBox_2.button( QDialogButtonBox.Ok )
self.progressBar.setValue (0 )
def checkA( self ):
inputLayer = unicode( self.inShapeA.currentText() )
if inputLayer != "":
changedLayer = ftools_utils.getVectorLayerByName( inputLayer )
if changedLayer.selectedFeatureCount() != 0:
self.useSelectedA.setCheckState( Qt.Checked )
else:
self.useSelectedA.setCheckState( Qt.Unchecked )
def checkB( self ):
inputLayer = unicode( self.inShapeB.currentText() )
if inputLayer != "":
changedLayer = ftools_utils.getVectorLayerByName( inputLayer )
if changedLayer.selectedFeatureCount() != 0:
self.useSelectedB.setCheckState( Qt.Checked )
else:
self.useSelectedB.setCheckState( Qt.Unchecked )
def update( self ):
self.attrib.clear()
inputLayer = unicode( self.inShapeA.currentText() )
if inputLayer != "":
changedLayer = ftools_utils.getVectorLayerByName( inputLayer )
changedField = changedLayer.dataProvider().fields()
for i in changedField:
self.attrib.addItem( unicode( changedField[i].name() ) )
if self.myFunction == 4:
self.attrib.addItem( "--- " + self.tr( "Dissolve all" ) + " ---" )
def accept( self ):
if self.inShapeA.currentText() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify an input layer" ) )
elif self.inShapeB.isVisible() and self.inShapeB.currentText() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify a difference/intersect/union layer" ) )
elif self.param.isEnabled() and self.param.isVisible() and self.param.text() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify valid buffer value" ) )
elif self.attrib.isEnabled() and self.attrib.isVisible() and self.attrib.currentText() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify dissolve field" ) )
elif self.outShape.text() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify output shapefile" ) )
else:
changedLayerA = ftools_utils.getVectorLayerByName( self.inShapeA.currentText() )
changedLayerB = ftools_utils.getVectorLayerByName( self.inShapeB.currentText() )
# check for selection in layer A
if self.useSelectedA.isChecked() and changedLayerA.selectedFeatureCount() == 0:
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "No features selected, please uncheck 'Use selected' or make a selection" ) )
# check for selection in layer B
elif self.inShapeB.isVisible() and self.useSelectedB.isChecked() and changedLayerB.selectedFeatureCount() == 0:
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "No features selected, please uncheck 'Use selected' or make a selection" ) )
else:
self.outShape.clear()
if self.attrib.isEnabled():
self.geoprocessing( self.inShapeA.currentText(), self.inShapeB.currentText(),
unicode( self.attrib.currentText() ), self.mergeOutput.checkState(), self.useSelectedA.checkState(),
self.useSelectedB.checkState(), self.spnSegments.value() )
else:
if self.param.isEnabled() and self.param.isVisible():
parameter = float( self.param.text() )
else:
parameter = None
self.geoprocessing( self.inShapeA.currentText(), self.inShapeB.currentText(),
parameter, self.mergeOutput.checkState(), self.useSelectedA.checkState(), self.useSelectedB.checkState(), self.spnSegments.value() )
def outFile( self ):
self.outShape.clear()
( self.shapefileName, self.encoding ) = ftools_utils.saveDialog( self )
if self.shapefileName is None or self.encoding is None:
return
self.outShape.setText( QString( self.shapefileName ) )
def manageGui( self ):
if self.myFunction == 1: # Buffer
self.label_2.hide()
self.inShapeB.hide()
self.useSelectedB.hide()
self.label_4.hide()
self.setWindowTitle( self.tr( "Buffer(s)" ) )
elif self.myFunction == 2: # Convex hull
self.label_2.hide()
self.inShapeB.hide()
self.useSelectedB.hide()
self.rdoBuffer.setText( self.tr( "Create single minimum convex hull" ) )
self.rdoField.setText( self.tr( "Create convex hulls based on input field" ) )
self.label_4.hide()
self.param.hide()
self.lblSegments.hide()
self.spnSegments.hide()
self.setWindowTitle( self.tr( "Convex hull(s)" ) )
self.mergeOutput.hide()
elif self.myFunction == 4: # Dissolve
self.label_2.hide()
self.inShapeB.hide()
self.useSelectedB.hide()
self.rdoBuffer.hide()
self.attrib.setEnabled( True )
self.param.hide()
self.rdoField.hide()
self.mergeOutput.hide()
self.lblSegments.hide()
self.spnSegments.hide()
self.setWindowTitle( self.tr( "Dissolve" ) )
else:
self.rdoBuffer.hide()
self.param.hide()
self.label_4.hide()
self.rdoField.hide()
self.attrib.hide()
self.mergeOutput.hide()
self.lblSegments.hide()
self.spnSegments.hide()
if self.myFunction == 3: # Difference
self.label_2.setText( self.tr( "Difference layer" ) )
self.setWindowTitle( self.tr( "Difference" ) )
elif self.myFunction == 5: # Intersect
self.label_2.setText( self.tr( "Intersect layer" ) )
self.setWindowTitle( self.tr( "Intersect" ) )
elif self.myFunction == 7: # Symetrical difference
self.label_2.setText( self.tr( "Difference layer" ) )
self.setWindowTitle( self.tr( "Symetrical difference" ) )
self.useSelectedA.hide()
self.useSelectedB.hide()
elif self.myFunction == 8: # Clip
self.label_2.setText( self.tr( "Clip layer" ) )
self.setWindowTitle( self.tr( "Clip" ) )
else: # Union
self.label_2.setText( self.tr( "Union layer" ) )
self.setWindowTitle( self.tr( "Union" ) )
self.useSelectedA.hide()
self.useSelectedB.hide()
self.resize(381, 100)
self.populateLayers()
def populateLayers( self ):
myListA = []
myListB = []
self.inShapeA.clear()
self.inShapeB.clear()
if self.myFunction == 4:
myListA = ftools_utils.getLayerNames( [ QGis.Polygon ] )
myListB = []
else:
myListA = ftools_utils.getLayerNames( [ QGis.Point, QGis.Line, QGis.Polygon ] )
myListB = ftools_utils.getLayerNames( [ QGis.Point, QGis.Line, QGis.Polygon ] )
self.inShapeA.addItems( myListA )
self.inShapeB.addItems( myListB )
#1: Buffer
#2: Convex Hull
#3: Difference
#4: Dissolve
#5: Intersection
#6: Union
#7: Symetrical Difference
#8: Clip
def geoprocessing( self, myLayerA, myLayerB, myParam, myMerge, mySelectionA, mySelectionB, mySegments ):
check = QFile( self.shapefileName )
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile( self.shapefileName ):
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Unable to delete existing shapefile." ) )
return
self.buttonOk.setEnabled( False )
self.testThread = geoprocessingThread( self.iface.mainWindow(), self, self.myFunction, myLayerA,
myLayerB, myParam, myMerge, mySelectionA, mySelectionB, mySegments, self.shapefileName, self.encoding )
QObject.connect( self.testThread, SIGNAL( "runFinished(PyQt_PyObject)" ), self.runFinishedFromThread )
QObject.connect( self.testThread, SIGNAL( "runStatus(PyQt_PyObject)" ), self.runStatusFromThread )
QObject.connect( self.testThread, SIGNAL( "runRange(PyQt_PyObject)" ), self.runRangeFromThread )
self.cancel_close.setText( self.tr("Cancel") )
QObject.connect( self.cancel_close, SIGNAL( "clicked()" ), self.cancelThread )
self.testThread.start()
return True
def cancelThread( self ):
self.testThread.stop()
self.buttonOk.setEnabled( True )
def runFinishedFromThread( self, results ):
self.testThread.stop()
self.buttonOk.setEnabled( True )
self.cancel_close.setText( self.tr("Close") )
QObject.disconnect( self.cancel_close, SIGNAL( "clicked()" ), self.cancelThread )
out_text = ""
if results[3] is not None:
QMessageBox.warning( self, self.tr( "Geoprocessing" ),
self.tr( "No output created. File creation error:\n%1" )
.arg( results[3] ) )
return
if (not results[2] is None and not results[2]) or not results[1] or not results [0]:
out_text = self.tr( "\nWarnings:" )
end_text = self.tr( "\nSome output geometries may be missing or invalid.\n\nWould you like to add the new layer anyway?" )
else:
out_text = "\n"
end_text = self.tr( "\n\nWould you like to add the new layer to the TOC?" )
if not results[2] is None:
if not results[2]:
out_text = out_text + self.tr( "\nInput CRS error: Different input coordinate reference systems detected, results may not be as expected.")
else:
out_text = out_text + self.tr( "\nInput CRS error: One or more input layers missing coordinate reference information, results may not be as expected.")
if not results[1]:
out_text = out_text + self.tr( "\nFeature geometry error: One or more output features ignored due to invalid geometry.")
if not results[0]:
out_text = out_text + self.tr( "\nGEOS geoprocessing error: One or more input features have invalid geometry.")
addToTOC = QMessageBox.question( self, self.tr("Geoprocessing"), self.tr( "Created output shapefile:\n%1\n%2%3" ).arg( unicode( self.shapefileName ) ).arg( out_text ).arg( end_text ), QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton )
if addToTOC == QMessageBox.Yes:
if not ftools_utils.addShapeToCanvas( unicode( self.shapefileName ) ):
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Error loading output shapefile:\n%1" ).arg( unicode( self.shapefileName ) ))
self.populateLayers()
def runStatusFromThread( self, status ):
self.progressBar.setValue( status )
def runRangeFromThread( self, range_vals ):
self.progressBar.setRange( range_vals[ 0 ], range_vals[ 1 ] )
class geoprocessingThread( QThread ):
def __init__( self, parentThread, parentObject, function, myLayerA, myLayerB,
myParam, myMerge, mySelectionA, mySelectionB, mySegments, myName, myEncoding ):
QThread.__init__( self, parentThread )
self.parent = parentObject
self.running = False
self.myFunction = function
self.myLayerA = myLayerA
self.myLayerB = myLayerB
self.myParam = myParam
self.myMerge = myMerge
self.mySelectionA = mySelectionA
self.mySelectionB = mySelectionB
self.mySegments = int( mySegments )
self.myName = myName
self.myEncoding = myEncoding
def run( self ):
self.running = True
self.vlayerA = ftools_utils.getVectorLayerByName( self.myLayerA )
error = None
if self.myFunction == 1 or self.myFunction == 2 or self.myFunction == 4:
( self.myParam, useField ) = self.checkParameter( self.vlayerA, self.myParam )
if not self.myParam is None:
if self.myFunction == 1:
geos, feature, match, error = self.buffering( useField )
elif self.myFunction == 2:
geos, feature, match, error = self.convex_hull( useField )
elif self.myFunction == 4:
geos, feature, match, error = self.dissolve( useField )
else:
self.vlayerB = ftools_utils.getVectorLayerByName( self.myLayerB )
if self.myFunction == 3:
geos, feature, match, error = self.difference()
elif self.myFunction == 5:
geos, feature, match, error = self.intersect()
elif self.myFunction == 6:
geos, feature, match, error = self.union()
elif self.myFunction == 7:
geos, feature, match, error = self.symetrical_difference()
elif self.myFunction == 8:
geos, feature, match, error = self.clip()
self.emit( SIGNAL( "runFinished(PyQt_PyObject)" ), (geos, feature, match, error) )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
def stop(self):
self.running = False
def buffering( self, useField ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrs = vproviderA.attributeIndexes()
vproviderA.select( allAttrs )
fields = vproviderA.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding, fields,
QGis.WKBPolygon, vproviderA.crs() )
# check if writer was created properly, if not, return with error
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, True, writer.errorMessage()
outFeat = QgsFeature()
inFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
nElement = 0
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# with dissolve
if self.myMerge:
first = True
for inFeat in selectionA:
atMap = inFeat.attributeMap()
if useField:
value = atMap[ self.myParam ].doDouble()[ 0 ]
else:
value = self.myParam
inGeom = QgsGeometry( inFeat.geometry() )
try:
outGeom = inGeom.buffer( float( value ), self.mySegments )
if first:
tempGeom = QgsGeometry( outGeom )
first = False
else:
try:
tempGeom = tempGeom.combine( outGeom )
except:
GEOS_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
try:
outFeat.setGeometry( tempGeom )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
# without dissolve
else:
for inFeat in selectionA:
atMap = inFeat.attributeMap()
if useField:
value = atMap[ self.myParam ].toDouble()[ 0 ]
else:
value = self.myParam
inGeom = QgsGeometry( inFeat.geometry() )
try:
outGeom = inGeom.buffer( float( value ), self.mySegments )
try:
outFeat.setGeometry( outGeom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# with dissolve
if self.myMerge:
first = True
while vproviderA.nextFeature( inFeat ):
atMap = inFeat.attributeMap()
if useField:
value = atMap[ self.myParam ].toDouble()[ 0 ]
else:
value = self.myParam
inGeom = QgsGeometry( inFeat.geometry() )
try:
outGeom = inGeom.buffer( float( value ), self.mySegments )
if first:
tempGeom = QgsGeometry( outGeom )
first = False
else:
try:
tempGeom = tempGeom.combine( outGeom )
except:
GEOS_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
try:
outFeat.setGeometry( tempGeom )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
# without dissolve
else:
while vproviderA.nextFeature( inFeat ):
atMap = inFeat.attributeMap()
if useField:
value = atMap[ self.myParam ].toDouble()[ 0 ]
else:
value = self.myParam
inGeom = QgsGeometry( inFeat.geometry() )
try:
outGeom = inGeom.buffer( float( value ), self.mySegments )
try:
outFeat.setGeometry( outGeom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, True, None
def convex_hull(self, useField ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select(allAttrsA)
fields = vproviderA.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding, fields,
QGis.WKBPolygon, vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, True, writer.errorMessage()
inFeat = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
nElement = 0
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
if useField:
unique = ftools_utils.getUniqueValues( vproviderA, self.myParam )
nFeat = nFeat * len( unique )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for i in unique:
hull = []
first = True
outID = 0
for inFeat in selectionA:
atMap = inFeat.attributeMap()
idVar = atMap[ self.myParam ]
if idVar.toString().trimmed() == i.toString().trimmed():
if first:
outID = idVar
first = False
inGeom = QgsGeometry( inFeat.geometry() )
points = ftools_utils.extractPoints( inGeom )
hull.extend( points )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
if len( hull ) >= 3:
tmpGeom = QgsGeometry( outGeom.fromMultiPoint( hull ) )
try:
outGeom = tmpGeom.convexHull()
outFeat.setGeometry( outGeom )
(area, perim) = self.simpleMeasure( outGeom )
outFeat.addAttribute( 0, QVariant( outID ) )
outFeat.addAttribute( 1, QVariant( area ) )
outFeat.addAttribute( 2, QVariant( perim ) )
writer.addFeature( outFeat )
except:
GEOS_EXCEPT = False
continue
else:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
hull = []
for inFeat in selectionA:
inGeom = QgsGeometry( inFeat.geometry() )
points = ftools_utils.extractPoints( inGeom )
hull.extend( points )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
tmpGeom = QgsGeometry( outGeom.fromMultiPoint( hull ) )
try:
outGeom = tmpGeom.convexHull()
outFeat.setGeometry( outGeom )
writer.addFeature( outFeat )
except:
GEOS_EXCEPT = False
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
if useField:
unique = ftools_utils.getUniqueValues( vproviderA, self.myParam )
nFeat = nFeat * len( unique )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for i in unique:
hull = []
first = True
outID = 0
while vproviderA.nextFeature( inFeat ):
atMap = inFeat.attributeMap()
idVar = atMap[ self.myParam ]
if idVar.toString().trimmed() == i.toString().trimmed():
if first:
outID = idVar
first = False
inGeom = QgsGeometry( inFeat.geometry() )
points = ftools_utils.extractPoints( inGeom )
hull.extend( points )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
if len( hull ) >= 3:
tmpGeom = QgsGeometry( outGeom.fromMultiPoint( hull ) )
try:
outGeom = tmpGeom.convexHull()
outFeat.setGeometry( outGeom )
(area, perim) = self.simpleMeasure( outGeom )
outFeat.addAttribute( 0, QVariant( outID ) )
outFeat.addAttribute( 1, QVariant( area ) )
outFeat.addAttribute( 2, QVariant( perim ) )
writer.addFeature( outFeat )
except:
GEOS_EXCEPT = False
continue
else:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
hull = []
while vproviderA.nextFeature( inFeat ):
inGeom = QgsGeometry( inFeat.geometry() )
points = ftools_utils.extractPoints( inGeom )
hull.extend( points )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
tmpGeom = QgsGeometry( outGeom.fromMultiPoint( hull ) )
try:
outGeom = tmpGeom.convexHull()
outFeat.setGeometry( outGeom )
writer.addFeature( outFeat )
except:
GEOS_EXCEPT = False
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, True, None
def dissolve( self, useField ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
fields = vproviderA.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding, fields,
vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, True, writer.errorMessage()
inFeat = QgsFeature()
outFeat = QgsFeature()
nElement = 0
attrs = None
vproviderA.rewind()
vproviderA.select( allAttrsA )
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
if not useField:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
first = True
for inFeat in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
if first:
attrs = inFeat.attributeMap()
tmpInGeom = QgsGeometry( inFeat.geometry() )
outFeat.setGeometry( tmpInGeom )
first = False
else:
tmpInGeom = QgsGeometry( inFeat.geometry() )
tmpOutGeom = QgsGeometry( outFeat.geometry() )
try:
tmpOutGeom = QgsGeometry( tmpOutGeom.combine( tmpInGeom ) )
outFeat.setGeometry( tmpOutGeom )
except:
GEOS_EXCEPT = False
continue
outFeat.setAttributeMap( attrs )
writer.addFeature( outFeat )
else:
unique = vproviderA.uniqueValues( int( self.myParam ) )
nFeat = nFeat * len( unique )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for item in unique:
first = True
add = False
vproviderA.rewind()
vproviderA.select( allAttrsA )
for inFeat in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
atMap = inFeat.attributeMap()
tempItem = atMap[ self.myParam ]
if tempItem.toString().trimmed() == item.toString().trimmed():
add = True
if first:
QgsGeometry( inFeat.geometry() )
tmpInGeom = QgsGeometry( inFeat.geometry() )
outFeat.setGeometry( tmpInGeom )
first = False
attrs = inFeat.attributeMap()
else:
tmpInGeom = QgsGeometry( inFeat.geometry() )
tmpOutGeom = QgsGeometry( outFeat.geometry() )
try:
tmpOutGeom = QgsGeometry( tmpOutGeom.combine( tmpInGeom ) )
outFeat.setGeometry( tmpOutGeom )
except:
GEOS_EXCEPT = False
add = False
if add:
outFeat.setAttributeMap( attrs )
writer.addFeature( outFeat )
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
if not useField:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
first = True
while vproviderA.nextFeature( inFeat ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
if first:
attrs = inFeat.attributeMap()
tmpInGeom = QgsGeometry( inFeat.geometry() )
outFeat.setGeometry( tmpInGeom )
first = False
else:
tmpInGeom = QgsGeometry( inFeat.geometry() )
tmpOutGeom = QgsGeometry( outFeat.geometry() )
try:
tmpOutGeom = QgsGeometry( tmpOutGeom.combine( tmpInGeom ) )
outFeat.setGeometry( tmpOutGeom )
except:
GEOS_EXCEPT = False
continue
outFeat.setAttributeMap( attrs )
writer.addFeature( outFeat )
else:
unique = vproviderA.uniqueValues( int( self.myParam ) )
nFeat = nFeat * len( unique )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for item in unique:
first = True
add = True
vproviderA.rewind()
vproviderA.select( allAttrsA )
while vproviderA.nextFeature( inFeat ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
atMap = inFeat.attributeMap()
tempItem = atMap[ self.myParam ]
if tempItem.toString().trimmed() == item.toString().trimmed():
if first:
QgsGeometry( inFeat.geometry() )
tmpInGeom = QgsGeometry( inFeat.geometry() )
outFeat.setGeometry( tmpInGeom )
first = False
attrs = inFeat.attributeMap()
else:
tmpInGeom = QgsGeometry( inFeat.geometry() )
tmpOutGeom = QgsGeometry( outFeat.geometry() )
try:
tmpOutGeom = QgsGeometry( tmpOutGeom.combine( tmpInGeom ) )
outFeat.setGeometry( tmpOutGeom )
except:
GEOS_EXCEPT = False
add = False
if add:
outFeat.setAttributeMap( attrs )
writer.addFeature( outFeat )
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, True, None
def difference( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
fields = vproviderA.fields()
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
writer = QgsVectorFileWriter( self.myName, self.myEncoding, fields,
vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
nElement = 0
index = ftools_utils.createIndex( vproviderB )
vproviderB.rewind()
vproviderB.select( allAttrsB )
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
add = True
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
# is intersect feature in selection
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
GEOS_EXCEPT = False
add = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
# we have no selection in overlay layer
else:
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
add = True
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
GEOS_EXCEPT = False
add = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
vproviderA.rewind()
vproviderA.select( allAttrsA )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
while vproviderA.nextFeature( inFeatA ):
nElement += 1
add = True
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
# now check if id in selection
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
GEOS_EXCEPT = False
add = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
# we have no selection in overlay layer
else:
while vproviderA.nextFeature( inFeatA ):
nElement += 1
add = True
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
GEOS_EXCEPT = False
add = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def intersect( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
fields = ftools_utils.combineVectorFields( self.vlayerA, self.vlayerB )
longNames = ftools_utils.checkFieldNameLength( fields )
if not longNames.isEmpty():
message = QString( 'Following field names are longer than 10 characters:\n%1' ).arg( longNames.join( '\n' ) )
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, message
writer = QgsVectorFileWriter( self.myName, self.myEncoding, fields,
vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
nElement = 0
index = ftools_utils.createIndex( vproviderB )
vproviderB.rewind()
vproviderB.select( allAttrsB )
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMapA = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
atMapB = inFeatB.attributeMap()
int_geom = QgsGeometry( geom.intersection( tmpGeom ) )
if int_geom.wkbType() == 0:
int_com = geom.combine( tmpGeom )
int_sym = geom.symDifference( tmpGeom )
int_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
# Geometry list: prevents writing error
# in geometries of different types
# produced by the intersection
# fix #3549
gList = ftools_utils.getGeomType( geom.wkbType() )
if int_geom.wkbType() in gList:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
break
# we don't have selection in overlay layer
else:
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMapA = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
atMapB = inFeatB.attributeMap()
int_geom = QgsGeometry( geom.intersection( tmpGeom ) )
if int_geom.wkbType() == 0:
int_com = geom.combine( tmpGeom )
int_sym = geom.symDifference( tmpGeom )
int_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
gList = ftools_utils.getGeomType( geom.wkbType() )
if int_geom.wkbType() in gList:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
writer.addFeature( outFeat )
except:
EATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
break
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
vproviderA.rewind()
vproviderA.select( allAttrsA )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMapA = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
atMapB = inFeatB.attributeMap()
int_geom = QgsGeometry( geom.intersection( tmpGeom ) )
if int_geom.wkbType() == 0:
int_com = geom.combine( tmpGeom )
int_sym = geom.symDifference( tmpGeom )
int_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
gList = ftools_utils.getGeomType( geom.wkbType() )
if int_geom.wkbType() in gList:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
break
# we have no selection in overlay layer
else:
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMapA = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
atMapB = inFeatB.attributeMap()
int_geom = QgsGeometry( geom.intersection( tmpGeom ) )
if int_geom.wkbType() == 0:
int_com = geom.combine( tmpGeom )
int_sym = geom.symDifference( tmpGeom )
int_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
gList = ftools_utils.getGeomType( geom.wkbType() )
if int_geom.wkbType() in gList:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
break
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def union( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
fields = ftools_utils.combineVectorFields( self.vlayerA, self.vlayerB )
longNames = ftools_utils.checkFieldNameLength( fields )
if not longNames.isEmpty():
message = QString( 'Following field names are longer than 10 characters:\n%1' ).arg( longNames.join( '\n' ) )
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, message
writer = QgsVectorFileWriter( self.myName, self.myEncoding, fields,
vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
indexA = ftools_utils.createIndex( vproviderB )
indexB = ftools_utils.createIndex( vproviderA )
vproviderA.rewind()
vproviderA.select( allAttrsA )
vproviderB.rewind()
vproviderB.select(allAttrsB)
nFeat = vproviderA.featureCount() * vproviderB.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
count = 0
nElement = 0
while vproviderA.nextFeature( inFeatA ):
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
nElement += 1
found = False
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMapA = inFeatA.attributeMap()
intersects = indexA.intersects( geom.boundingBox() )
if len( intersects ) < 1:
try:
outFeat.setGeometry( geom )
outFeat.setAttributeMap( atMapA )
writer.addFeature( outFeat )
except:
# this really shouldn't happen, as we
# haven't edited the input geom at all
FEATURE_EXCEPT = False
else:
for id in intersects:
count += 1
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
atMapB = inFeatB.attributeMap()
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
found = True
int_geom = geom.intersection( tmpGeom )
if int_geom is None:
# There was a problem creating the intersection
GEOS_EXCEPT = False
int_geom = QgsGeometry()
else:
int_geom = QgsGeometry(int_geom)
if diff_geom.intersects( tmpGeom ):
diff_geom = diff_geom.difference( tmpGeom )
if diff_geom is None:
# It's possible there was an error here?
diff_geom = QgsGeometry()
else:
diff_geom = QgsGeometry(diff_geom)
if int_geom.wkbType() == 0:
# intersection produced different geometry types
temp_list = int_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
int_geom = QgsGeometry( i )
try:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
writer.addFeature( outFeat )
except Exception, err:
FEATURE_EXCEPT = False
else:
# Geometry list: prevents writing error
# in geometries of different types
# produced by the intersection
# fix #3549
gList = ftools_utils.getGeomType( geom.wkbType() )
if int_geom.wkbType() in gList:
try:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
writer.addFeature( outFeat )
except Exception, err:
FEATURE_EXCEPT = False
else:
# this only happends if the bounding box
# intersects, but the geometry doesn't
try:
outFeat.setGeometry( geom )
outFeat.setAttributeMap( atMapA )
writer.addFeature( outFeat )
except:
# also shoudn't ever happen
FEATURE_EXCEPT = False
except Exception, err:
GEOS_EXCEPT = False
found = False
if found:
try:
if diff_geom.wkbType() == 0:
temp_list = diff_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
diff_geom = QgsGeometry( i )
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMapA )
writer.addFeature( outFeat )
except Exception, err:
FEATURE_EXCEPT = False
length = len( vproviderA.fields().values() )
vproviderB.rewind()
vproviderB.select(allAttrsB)
while vproviderB.nextFeature( inFeatA ):
add = False
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap().values()
atMap = dict( zip( range( length, length + len( atMap ) ), atMap ) )
intersects = indexB.intersects( geom.boundingBox() )
if len(intersects) < 1:
try:
outFeat.setGeometry( geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except Exception, err:
FEATURE_EXCEPT = False
else:
for id in intersects:
vproviderA.featureAtId( int( id ), inFeatB , True, allAttrsA )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
add = True
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
else:
# this only happends if the bounding box
# intersects, but the geometry doesn't
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except Exception, err:
add = False
GEOS_EXCEPT = False
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except Exception, err:
FEATURE_EXCEPT = False
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
nElement += 1
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def symetrical_difference( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
fields = ftools_utils.combineVectorFields( self.vlayerA, self.vlayerB )
longNames = ftools_utils.checkFieldNameLength( fields )
if not longNames.isEmpty():
message = QString( 'Following field names are longer than 10 characters:\n%1' ).arg( longNames.join( '\n' ) )
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, message
writer = QgsVectorFileWriter( self.myName, self.myEncoding, fields,
vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
indexA = ftools_utils.createIndex( vproviderB )
indexB = ftools_utils.createIndex( vproviderA )
vproviderA.rewind()
vproviderA.select( allAttrsA )
vproviderB.rewind()
vproviderB.select(allAttrsB)
nFeat = vproviderA.featureCount() * vproviderB.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
add = True
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMapA = inFeatA.attributeMap()
intersects = indexA.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
add = False
GEOS_EXCEPT = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMapA )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
length = len( vproviderA.fields().values() )
vproviderB.rewind()
vproviderB.select(allAttrsB)
while vproviderB.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
add = True
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap().values()
atMap = dict( zip( range( length, length + len( atMap ) ), atMap ) )
intersects = indexB.intersects( geom.boundingBox() )
for id in intersects:
vproviderA.featureAtId( int( id ), inFeatB , True, allAttrsA )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
add = False
GEOS_EXCEPT = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def clip( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
fields = vproviderA.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding, fields,
vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
index = ftools_utils.createIndex( vproviderB )
vproviderA.rewind()
vproviderA.select( allAttrsA )
vproviderB.rewind()
vproviderB.select( allAttrsB )
nElement = 0
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
int_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
found = False
first = True
for id in intersects:
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
if tmpGeom.intersects( geom ):
found = True
if first:
outFeat.setGeometry( QgsGeometry( tmpGeom ) )
first = False
else:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( cur_geom.combine( tmpGeom ) )
outFeat.setGeometry( QgsGeometry( new_geom ) )
except:
GEOS_EXCEPT = False
break
if found:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( geom.intersection( cur_geom ) )
if new_geom.wkbType() == 0:
int_com = QgsGeometry( geom.combine( cur_geom ) )
int_sym = QgsGeometry( geom.symDifference( cur_geom ) )
new_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( new_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEAT_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
# we have no selection in overlay layer
else:
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
found = False
first = True
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
if tmpGeom.intersects( geom ):
found = True
if first:
outFeat.setGeometry( QgsGeometry( tmpGeom ) )
first = False
else:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( cur_geom.combine( tmpGeom ) )
outFeat.setGeometry( QgsGeometry( new_geom ) )
except:
GEOS_EXCEPT = False
break
if found:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( geom.intersection( cur_geom ) )
if new_geom.wkbType() == 0:
int_com = QgsGeometry( geom.combine( cur_geom ) )
int_sym = QgsGeometry( geom.symDifference( cur_geom ) )
new_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( new_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEAT_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
found = False
first = True
for id in intersects:
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
if tmpGeom.intersects( geom ):
found = True
if first:
outFeat.setGeometry( QgsGeometry( tmpGeom ) )
first = False
else:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( cur_geom.combine( tmpGeom ) )
outFeat.setGeometry( QgsGeometry( new_geom ) )
except:
GEOS_EXCEPT = False
break
if found:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( geom.intersection( cur_geom ) )
if new_geom.wkbType() == 0:
int_com = QgsGeometry( geom.combine( cur_geom ) )
int_sym = QgsGeometry( geom.symDifference( cur_geom ) )
new_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( new_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEAT_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
# we have no selection in overlay layer
else:
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
first = True
found = False
if len( intersects ) > 0:
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
if tmpGeom.intersects( geom ):
found = True
if first:
outFeat.setGeometry( QgsGeometry( tmpGeom ) )
first = False
else:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( cur_geom.combine( tmpGeom ) )
outFeat.setGeometry( QgsGeometry( new_geom ) )
except:
GEOS_EXCEPT = False
break
if found:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( geom.intersection( cur_geom ) )
if new_geom.wkbType() == 0:
int_com = QgsGeometry( geom.combine( cur_geom ) )
int_sym = QgsGeometry( geom.symDifference( cur_geom ) )
new_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( new_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEAT_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def checkParameter( self, layer, param ):
if self.myFunction == 1:
if type( param ) == unicode:
check = layer.dataProvider().fieldNameIndex( param )
if check == -1:
return ( None, False )
else:
return ( check, True )
else:
if type( param ) == float or type( param ) == int:
return ( param, False )
else:
return ( None, False )
elif self.myFunction == 2:
if not param is None:
if type( param ) == unicode:
check = layer.dataProvider().fieldNameIndex( param )
if check == -1:
return ( None, False )
else:
return ( check, True )
else:
return ( None, False )
else:
return ( True, False )
elif self.myFunction == 4:
if type( param ) == unicode:
check = layer.dataProvider().fieldNameIndex( param )
if check == -1:
return ( check, False )
else:
return ( check, True )
else:
return ( None, False )
def simpleMeasure( self, inGeom ):
if inGeom.wkbType() == QGis.WKBPoint:
pt = QgsPoint()
pt = inGeom.asPoint()
attr1 = pt.x()
attr2 = pt.y()
else:
measure = QgsDistanceArea()
attr1 = measure.measure(inGeom)
if inGeom.type() == QGis.Polygon:
attr2 = self.perimMeasure( inGeom, measure )
else:
attr2 = attr1
return ( attr1, attr2 )
def perimMeasure( self, inGeom, measure ):
value = 0.00
if inGeom.isMultipart():
poly = inGeom.asMultiPolygon()
for k in poly:
for j in k:
value = value + measure.measureLine( j )
else:
poly = inGeom.asPolygon()
for k in poly:
value = value + measure.measureLine( k )
return value
| gpl-2.0 |
wikimedia/operations-debs-statsite | integ/test_integ.py | 1 | 14228 | import os
import os.path
import socket
import textwrap
import subprocess
import contextlib
import sys
import tempfile
import time
import random
try:
import pytest
except ImportError:
print >> sys.stderr, "Integ tests require pytests!"
sys.exit(1)
def pytest_funcarg__servers(request):
"Returns a new APIHandler with a filter manager"
# Create tmpdir and delete after
tmpdir = tempfile.mkdtemp()
# Make the command
output = "%s/output" % tmpdir
cmd = "cat >> %s" % output
# Write the configuration
port = random.randrange(10000, 65000)
config_path = os.path.join(tmpdir, "config.cfg")
conf = """[statsite]
flush_interval = 1
port = %d
udp_port = %d
stream_cmd = %s
[histogram1]
prefix=has_hist
min=10
max=90
width=10
""" % (port, port, cmd)
open(config_path, "w").write(conf)
# Start the process
proc = subprocess.Popen("./statsite -f %s" % config_path, shell=True)
proc.poll()
assert proc.returncode is None
# Define a cleanup handler
def cleanup():
try:
proc.kill()
proc.wait()
#shutil.rmtree(tmpdir)
except:
print proc
pass
request.addfinalizer(cleanup)
# Make a connection to the server
connected = False
for x in xrange(3):
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(1)
conn.connect(("localhost", port))
connected = True
break
except Exception, e:
print e
time.sleep(0.5)
# Die now
if not connected:
raise EnvironmentError("Failed to connect!")
# Make a second connection
conn2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
conn2.connect(("localhost", port))
# Return the connection
return conn, conn2, output
def wait_file(path, timeout=5):
"Waits on a file to be make"
start = time.time()
while not os.path.isfile(path) and time.time() - start < timeout:
time.sleep(0.1)
if not os.path.isfile(path):
raise Exception("Timed out waiting for file %s" % path)
while os.path.getsize(path) == 0 and time.time() - start < timeout:
time.sleep(0.1)
class TestInteg(object):
def test_kv(self, servers):
"Tests adding kv pairs"
server, _, output = servers
server.sendall("tubez:100|kv\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("kv.tubez|100.000000|%d\n" % now, "kv.tubez|100.000000|%d\n" % (now - 1))
def test_gauges(self, servers):
"Tests adding gauges"
server, _, output = servers
server.sendall("g1:1|g\n")
server.sendall("g1:50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.g1|50.000000|%d\n" % now, "gauges.g1|50.000000|%d\n" % (now - 1))
def test_gauges_delta(self, servers):
"Tests adding gauges"
server, _, output = servers
server.sendall("gd:+50|g\n")
server.sendall("gd:+50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.gd|100.000000|%d\n" % now, "gauges.gd|100.000000|%d\n" % (now - 1))
def test_gauges_delta_neg(self, servers):
"Tests adding gauges"
server, _, output = servers
server.sendall("gd:-50|g\n")
server.sendall("gd:-50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.gd|-100.000000|%d\n" % now, "gauges.gd|-100.000000|%d\n" % (now - 1))
def test_counters(self, servers):
"Tests adding kv pairs"
server, _, output = servers
server.sendall("foobar:100|c\n")
server.sendall("foobar:200|c\n")
server.sendall("foobar:300|c\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|600.000000|%d\n" % (now),
"counts.foobar|600.000000|%d\n" % (now - 1))
def test_counters_sample(self, servers):
"Tests adding kv pairs"
server, _, output = servers
server.sendall("foobar:100|c|@0.1\n")
server.sendall("foobar:200|c|@0.1\n")
server.sendall("foobar:300|c|@0.1\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|6000.000000|%d\n" % (now),
"counts.foobar|6000.000000|%d\n" % (now - 1))
def test_meters_alias(self, servers):
"Tests adding timing data with the 'h' alias"
server, _, output = servers
msg = ""
for x in xrange(100):
msg += "val:%d|h\n" % x
server.sendall(msg)
wait_file(output)
out = open(output).read()
assert "timers.val.sum|4950" in out
assert "timers.val.sum_sq|328350" in out
assert "timers.val.mean|49.500000" in out
assert "timers.val.lower|0.000000" in out
assert "timers.val.upper|99.000000" in out
assert "timers.val.count|100" in out
assert "timers.val.stdev|29.011492" in out
assert "timers.val.median|49.000000" in out
assert "timers.val.p95|95.000000" in out
assert "timers.val.p99|99.000000" in out
assert "timers.val.rate|4950" in out
assert "timers.val.sample_rate|100" in out
def test_meters(self, servers):
"Tests adding kv pairs"
server, _, output = servers
msg = ""
for x in xrange(100):
msg += "noobs:%d|ms\n" % x
server.sendall(msg)
wait_file(output)
out = open(output).read()
assert "timers.noobs.sum|4950" in out
assert "timers.noobs.sum_sq|328350" in out
assert "timers.noobs.mean|49.500000" in out
assert "timers.noobs.lower|0.000000" in out
assert "timers.noobs.upper|99.000000" in out
assert "timers.noobs.count|100" in out
assert "timers.noobs.stdev|29.011492" in out
assert "timers.noobs.median|49.000000" in out
assert "timers.noobs.p95|95.000000" in out
assert "timers.noobs.p99|99.000000" in out
assert "timers.noobs.rate|4950" in out
assert "timers.noobs.sample_rate|100" in out
def test_histogram(self, servers):
"Tests adding keys with histograms"
server, _, output = servers
msg = ""
for x in xrange(100):
msg += "has_hist.test:%d|ms\n" % x
server.sendall(msg)
wait_file(output)
out = open(output).read()
assert "timers.has_hist.test.histogram.bin_<10.00|10" in out
assert "timers.has_hist.test.histogram.bin_10.00|10" in out
assert "timers.has_hist.test.histogram.bin_20.00|10" in out
assert "timers.has_hist.test.histogram.bin_30.00|10" in out
assert "timers.has_hist.test.histogram.bin_40.00|10" in out
assert "timers.has_hist.test.histogram.bin_50.00|10" in out
assert "timers.has_hist.test.histogram.bin_60.00|10" in out
assert "timers.has_hist.test.histogram.bin_70.00|10" in out
assert "timers.has_hist.test.histogram.bin_80.00|10" in out
assert "timers.has_hist.test.histogram.bin_>90.00|10" in out
def test_sets(self, servers):
"Tests adding kv pairs"
server, _, output = servers
server.sendall("zip:foo|s\n")
server.sendall("zip:bar|s\n")
server.sendall("zip:baz|s\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("sets.zip|3|%d\n" % now, "sets.zip|3|%d\n" % (now - 1))
class TestIntegUDP(object):
def test_kv(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("tubez:100|kv\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("kv.tubez|100.000000|%d\n" % now, "kv.tubez|100.000000|%d\n" % (now - 1))
def test_gauges(self, servers):
"Tests adding gauges"
_, server, output = servers
server.sendall("g1:1|g\n")
server.sendall("g1:50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.g1|50.000000|%d\n" % now, "gauges.g1|50.000000|%d\n" % (now - 1))
def test_gauges_delta(self, servers):
"Tests adding gauges"
_, server, output = servers
server.sendall("gd:+50|g\n")
server.sendall("gd:+50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.gd|100.000000|%d\n" % now, "gauges.gd|100.000000|%d\n" % (now - 1))
def test_gauges_delta_neg(self, servers):
"Tests adding gauges"
_, server, output = servers
server.sendall("gd:-50|g\n")
server.sendall("gd:-50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.gd|-100.000000|%d\n" % now, "gauges.gd|-100.000000|%d\n" % (now - 1))
def test_bad_kv(self, servers):
"Tests adding a bad value, followed by a valid kv pair"
_, server, output = servers
server.sendall("this is junk data\n")
server.sendall("tubez:100|kv\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("kv.tubez|100.000000|%d\n" % now, "kv.tubez|100.000000|%d\n" % (now - 1))
def test_counters(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("foobar:100|c\n")
server.sendall("foobar:200|c\n")
server.sendall("foobar:300|c\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|600.000000|%d\n" % (now),
"counts.foobar|600.000000|%d\n" % (now - 1))
def test_counters_sample(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("foobar:100|c|@0.1\n")
server.sendall("foobar:200|c|@0.1\n")
server.sendall("foobar:300|c|@0.1\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|6000.000000|%d\n" % (now),
"counts.foobar|6000.000000|%d\n" % (now - 1))
def test_counters_no_newlines(self, servers):
"Tests adding counters without a trailing new line"
_, server, output = servers
server.sendall("zip:100|c")
server.sendall("zip:200|c")
server.sendall("zip:300|c")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.zip|600.000000|%d\n" % (now),
"counts.zip|600.000000|%d\n" % (now - 1))
def test_meters(self, servers):
"Tests adding kv pairs"
_, server, output = servers
msg = ""
for x in xrange(100):
msg += "noobs:%d|ms\n" % x
server.sendall(msg)
wait_file(output)
out = open(output).read()
assert "timers.noobs.sum|4950" in out
assert "timers.noobs.sum_sq|328350" in out
assert "timers.noobs.mean|49.500000" in out
assert "timers.noobs.lower|0.000000" in out
assert "timers.noobs.upper|99.000000" in out
assert "timers.noobs.count|100" in out
assert "timers.noobs.stdev|29.011492" in out
assert "timers.noobs.median|49.000000" in out
assert "timers.noobs.p95|95.000000" in out
assert "timers.noobs.p99|99.000000" in out
assert "timers.noobs.rate|4950" in out
assert "timers.noobs.sample_rate|100" in out
def test_sets(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("zip:foo|s\n")
server.sendall("zip:bar|s\n")
server.sendall("zip:baz|s\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("sets.zip|3|%d\n" % now, "sets.zip|3|%d\n" % (now - 1))
class TestIntegBindAddress(object):
@contextlib.contextmanager
def run(self, addr, port=None):
port = port if port else random.randrange(10000, 65000)
fh = tempfile.NamedTemporaryFile()
conf = '''\
[statsite]
port = %d
udp_port = %s
bind_address = %s\n'''
fh.write(textwrap.dedent(conf % (port, port, addr)))
fh.flush()
try:
p = subprocess.Popen('./statsite -f %s' % fh.name, shell=True)
time.sleep(0.3)
yield port
finally:
p.kill()
fh.close()
def islistening(self, addr, port, command='statsite'):
try:
cmd = 'lsof -FnPc -nP -i @%s:%s' % (addr, port)
out = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
return False
return (command in out) and ('PTCP' in out) and ('PUDP' in out)
def test_ipv4_localhost(self):
with self.run('127.0.0.1') as port:
assert self.islistening('127.0.0.1', port), 'not listening'
def test_ipv4_any(self):
with self.run('0.0.0.0') as port:
assert self.islistening('0.0.0.0', port), 'not listening'
def test_ipv4_bogus(self):
with self.run('a.b.c.d') as port:
assert not self.islistening('0.0.0.0', port), 'should not be listening'
with self.run('1.0.1.0') as port:
assert not self.islistening('1.0.1.0', port), 'should not be listening'
def test_ipv4_used(self):
try:
port = random.randrange(10000, 65000)
p = subprocess.Popen(['nc', '-l', '127.0.0.1', str(port)])
with self.run('127.0.0.1', port):
assert not self.islistening('127.0.0.0', port), 'should not be listening'
finally:
p.kill()
if __name__ == "__main__":
sys.exit(pytest.main(args="-k TestInteg."))
| bsd-3-clause |
10clouds/edx-platform | common/test/acceptance/pages/lms/annotation_component.py | 159 | 3645 | """
Annotation Component Page.
"""
from bok_choy.page_object import PageObject
from selenium.webdriver import ActionChains
class AnnotationComponentPage(PageObject):
"""
View of annotation component page.
"""
url = None
active_problem = 0
def is_browser_on_page(self):
return self.q(css='.annotatable-title').present
@property
def component_name(self):
"""
Return the current problem name.
"""
return self.q(css='.annotatable-title').text[0]
def click_reply_annotation(self, problem):
"""
Mouse over on annotation selector and click on "Reply to Annotation".
"""
annotation_span_selector = '.annotatable-span[data-problem-id="{}"]'.format(problem)
self.mouse_hover(self.browser.find_element_by_css_selector(annotation_span_selector))
self.wait_for_element_visibility(annotation_span_selector, "Reply to Annotation link is visible")
annotation_reply_selector = '.annotatable-reply[data-problem-id="{}"]'.format(problem)
self.q(css=annotation_reply_selector).click()
self.active_problem = problem
def active_problem_selector(self, sub_selector):
"""
Return css selector for current active problem with sub_selector.
"""
return 'div[data-problem-id="{}"] {}'.format(
self.q(css='.vert-{}'.format(self.active_problem + 1)).map(
lambda el: el.get_attribute('data-id')).results[0],
sub_selector,
)
def mouse_hover(self, element):
"""
Mouse over on given element.
"""
mouse_hover_action = ActionChains(self.browser).move_to_element(element)
mouse_hover_action.perform()
def check_scroll_to_problem(self):
"""
Return visibility of active problem's input selector.
"""
annotation_input_selector = self.active_problem_selector('.annotation-input')
return self.q(css=annotation_input_selector).visible
def answer_problem(self):
"""
Submit correct answer for active problem.
"""
self.q(css=self.active_problem_selector('.comment')).fill('Test Response')
answer_css = self.active_problem_selector('.tag[data-id="{}"]'.format(self.active_problem))
# Selenium will first move the element into view then click on it.
self.q(css=answer_css).click()
# Wait for the click to take effect, which is after the class is applied.
self.wait_for(lambda: 'selected' in self.q(css=answer_css).attrs('class')[0], description='answer selected')
# Click the "Check" button.
self.q(css=self.active_problem_selector('.check')).click()
# This will trigger a POST to problem_check so wait until the response is returned.
self.wait_for_ajax()
def check_feedback(self):
"""
Return visibility of active problem's feedback.
"""
self.wait_for_element_visibility(
self.active_problem_selector('.tag-status.correct'), "Correct is visible"
)
return self.q(css=self.active_problem_selector('.tag-status.correct')).visible
def click_return_to_annotation(self):
"""
Click on active problem's "Return to Annotation" link.
"""
self.q(css=self.active_problem_selector('.annotation-return')).click()
def check_scroll_to_annotation(self):
"""
Return visibility of active annotation component header.
"""
annotation_header_selector = '.annotation-header'
return self.q(css=annotation_header_selector).visible
| agpl-3.0 |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/netlogon/netr_SamBaseInfo.py | 1 | 3596 | # encoding: utf-8
# module samba.dcerpc.netlogon
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/netlogon.so
# by generator 1.135
""" netlogon DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class netr_SamBaseInfo(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
account_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
acct_flags = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
allow_password_change = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
bad_password_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
domain_sid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
failed_logon_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
force_password_change = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
full_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
groups = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
home_directory = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
home_drive = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
key = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
kickoff_time = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
last_failed_logon = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
last_password_change = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
last_successful_logon = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
LMSessKey = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
logoff_time = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
logon_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
logon_domain = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
logon_script = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
logon_server = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
logon_time = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
primary_gid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
profile_path = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
reserved = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
rid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
sub_auth_status = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
user_flags = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
| gpl-2.0 |
bengovernment/pyfawn | python-challenge/inputs/input-puzzle_5.py | 27 | 5002 | (lp0
(lp1
(S' '
p2
I95
tp3
aa(lp4
(g2
I14
tp5
a(S'#'
p6
I5
tp7
a(g2
I70
tp8
a(g6
I5
tp9
a(g2
I1
tp10
aa(lp11
(g2
I15
tp12
a(g6
I4
tp13
a(g2
I71
tp14
a(g6
I4
tp15
a(g2
I1
tp16
aa(lp17
(g2
I15
tp18
a(g6
I4
tp19
a(g2
I71
tp20
a(g6
I4
tp21
a(g2
I1
tp22
aa(lp23
(g2
I15
tp24
a(g6
I4
tp25
a(g2
I71
tp26
a(g6
I4
tp27
a(g2
I1
tp28
aa(lp29
(g2
I15
tp30
a(g6
I4
tp31
a(g2
I71
tp32
a(g6
I4
tp33
a(g2
I1
tp34
aa(lp35
(g2
I15
tp36
a(g6
I4
tp37
a(g2
I71
tp38
a(g6
I4
tp39
a(g2
I1
tp40
aa(lp41
(g2
I15
tp42
a(g6
I4
tp43
a(g2
I71
tp44
a(g6
I4
tp45
a(g2
I1
tp46
aa(lp47
(g2
I15
tp48
a(g6
I4
tp49
a(g2
I71
tp50
a(g6
I4
tp51
a(g2
I1
tp52
aa(lp53
(g2
I6
tp54
a(g6
I3
tp55
a(g2
I6
tp56
a(g6
I4
tp57
a(g2
I3
tp58
a(g6
I3
tp59
a(g2
I9
tp60
a(g6
I3
tp61
a(g2
I7
tp62
a(g6
I5
tp63
a(g2
I3
tp64
a(g6
I3
tp65
a(g2
I4
tp66
a(g6
I5
tp67
a(g2
I3
tp68
a(g6
I3
tp69
a(g2
I10
tp70
a(g6
I3
tp71
a(g2
I7
tp72
a(g6
I4
tp73
a(g2
I1
tp74
aa(lp75
(g2
I3
tp76
a(g6
I3
tp77
a(g2
I3
tp78
a(g6
I2
tp79
a(g2
I4
tp80
a(g6
I4
tp81
a(g2
I1
tp82
a(g6
I7
tp83
a(g2
I5
tp84
a(g6
I2
tp85
a(g2
I2
tp86
a(g6
I3
tp87
a(g2
I6
tp88
a(g6
I4
tp89
a(g2
I1
tp90
a(g6
I7
tp91
a(g2
I3
tp92
a(g6
I4
tp93
a(g2
I1
tp94
a(g6
I7
tp95
a(g2
I5
tp96
a(g6
I3
tp97
a(g2
I2
tp98
a(g6
I3
tp99
a(g2
I5
tp100
a(g6
I4
tp101
a(g2
I1
tp102
aa(lp103
(g2
I2
tp104
a(g6
I3
tp105
a(g2
I5
tp106
a(g6
I3
tp107
a(g2
I2
tp108
a(g6
I5
tp109
a(g2
I4
tp110
a(g6
I4
tp111
a(g2
I3
tp112
a(g6
I3
tp113
a(g2
I3
tp114
a(g6
I4
tp115
a(g2
I4
tp116
a(g6
I5
tp117
a(g2
I4
tp118
a(g6
I4
tp119
a(g2
I2
tp120
a(g6
I5
tp121
a(g2
I4
tp122
a(g6
I4
tp123
a(g2
I3
tp124
a(g6
I3
tp125
a(g2
I5
tp126
a(g6
I3
tp127
a(g2
I3
tp128
a(g6
I4
tp129
a(g2
I1
tp130
aa(lp131
(g2
I1
tp132
a(g6
I3
tp133
a(g2
I11
tp134
a(g6
I4
tp135
a(g2
I5
tp136
a(g6
I4
tp137
a(g2
I3
tp138
a(g6
I3
tp139
a(g2
I4
tp140
a(g6
I3
tp141
a(g2
I4
tp142
a(g6
I4
tp143
a(g2
I5
tp144
a(g6
I4
tp145
a(g2
I2
tp146
a(g6
I4
tp147
a(g2
I5
tp148
a(g6
I4
tp149
a(g2
I2
tp150
a(g6
I3
tp151
a(g2
I6
tp152
a(g6
I4
tp153
a(g2
I2
tp154
a(g6
I4
tp155
a(g2
I1
tp156
aa(lp157
(g2
I1
tp158
a(g6
I3
tp159
a(g2
I11
tp160
a(g6
I4
tp161
a(g2
I5
tp162
a(g6
I4
tp163
a(g2
I10
tp164
a(g6
I3
tp165
a(g2
I4
tp166
a(g6
I4
tp167
a(g2
I5
tp168
a(g6
I4
tp169
a(g2
I2
tp170
a(g6
I4
tp171
a(g2
I5
tp172
a(g6
I4
tp173
a(g2
I2
tp174
a(g6
I3
tp175
a(g2
I7
tp176
a(g6
I3
tp177
a(g2
I2
tp178
a(g6
I4
tp179
a(g2
I1
tp180
aa(lp181
(g6
I4
tp182
a(g2
I11
tp183
a(g6
I4
tp184
a(g2
I5
tp185
a(g6
I4
tp186
a(g2
I5
tp187
a(g6
I2
tp188
a(g2
I3
tp189
a(g6
I3
tp190
a(g2
I4
tp191
a(g6
I4
tp192
a(g2
I5
tp193
a(g6
I4
tp194
a(g2
I2
tp195
a(g6
I4
tp196
a(g2
I5
tp197
a(g6
I4
tp198
a(g2
I1
tp199
a(g6
I4
tp200
a(g2
I7
tp201
a(g6
I3
tp202
a(g2
I2
tp203
a(g6
I4
tp204
a(g2
I1
tp205
aa(lp206
(g6
I4
tp207
a(g2
I11
tp208
a(g6
I4
tp209
a(g2
I5
tp210
a(g6
I4
tp211
a(g2
I3
tp212
a(g6
I10
tp213
a(g2
I4
tp214
a(g6
I4
tp215
a(g2
I5
tp216
a(g6
I4
tp217
a(g2
I2
tp218
a(g6
I4
tp219
a(g2
I5
tp220
a(g6
I4
tp221
a(g2
I1
tp222
a(g6
I14
tp223
a(g2
I2
tp224
a(g6
I4
tp225
a(g2
I1
tp226
aa(lp227
(g6
I4
tp228
a(g2
I11
tp229
a(g6
I4
tp230
a(g2
I5
tp231
a(g6
I4
tp232
a(g2
I2
tp233
a(g6
I3
tp234
a(g2
I4
tp235
a(g6
I4
tp236
a(g2
I4
tp237
a(g6
I4
tp238
a(g2
I5
tp239
a(g6
I4
tp240
a(g2
I2
tp241
a(g6
I4
tp242
a(g2
I5
tp243
a(g6
I4
tp244
a(g2
I1
tp245
a(g6
I4
tp246
a(g2
I12
tp247
a(g6
I4
tp248
a(g2
I1
tp249
aa(lp250
(g6
I4
tp251
a(g2
I11
tp252
a(g6
I4
tp253
a(g2
I5
tp254
a(g6
I4
tp255
a(g2
I1
tp256
a(g6
I4
tp257
a(g2
I5
tp258
a(g6
I3
tp259
a(g2
I4
tp260
a(g6
I4
tp261
a(g2
I5
tp262
a(g6
I4
tp263
a(g2
I2
tp264
a(g6
I4
tp265
a(g2
I5
tp266
a(g6
I4
tp267
a(g2
I1
tp268
a(g6
I4
tp269
a(g2
I12
tp270
a(g6
I4
tp271
a(g2
I1
tp272
aa(lp273
(g2
I1
tp274
a(g6
I3
tp275
a(g2
I11
tp276
a(g6
I4
tp277
a(g2
I5
tp278
a(g6
I4
tp279
a(g2
I1
tp280
a(g6
I4
tp281
a(g2
I5
tp282
a(g6
I3
tp283
a(g2
I4
tp284
a(g6
I4
tp285
a(g2
I5
tp286
a(g6
I4
tp287
a(g2
I2
tp288
a(g6
I4
tp289
a(g2
I5
tp290
a(g6
I4
tp291
a(g2
I2
tp292
a(g6
I3
tp293
a(g2
I12
tp294
a(g6
I4
tp295
a(g2
I1
tp296
aa(lp297
(g2
I2
tp298
a(g6
I3
tp299
a(g2
I6
tp300
a(g6
I2
tp301
a(g2
I2
tp302
a(g6
I4
tp303
a(g2
I5
tp304
a(g6
I4
tp305
a(g2
I2
tp306
a(g6
I3
tp307
a(g2
I4
tp308
a(g6
I4
tp309
a(g2
I4
tp310
a(g6
I4
tp311
a(g2
I5
tp312
a(g6
I4
tp313
a(g2
I2
tp314
a(g6
I4
tp315
a(g2
I5
tp316
a(g6
I4
tp317
a(g2
I3
tp318
a(g6
I3
tp319
a(g2
I6
tp320
a(g6
I2
tp321
a(g2
I3
tp322
a(g6
I4
tp323
a(g2
I1
tp324
aa(lp325
(g2
I3
tp326
a(g6
I3
tp327
a(g2
I4
tp328
a(g6
I2
tp329
a(g2
I3
tp330
a(g6
I4
tp331
a(g2
I5
tp332
a(g6
I4
tp333
a(g2
I3
tp334
a(g6
I11
tp335
a(g2
I3
tp336
a(g6
I4
tp337
a(g2
I5
tp338
a(g6
I4
tp339
a(g2
I2
tp340
a(g6
I4
tp341
a(g2
I5
tp342
a(g6
I4
tp343
a(g2
I4
tp344
a(g6
I3
tp345
a(g2
I4
tp346
a(g6
I2
tp347
a(g2
I4
tp348
a(g6
I4
tp349
a(g2
I1
tp350
aa(lp351
(g2
I6
tp352
a(g6
I3
tp353
a(g2
I5
tp354
a(g6
I6
tp355
a(g2
I4
tp356
a(g6
I5
tp357
a(g2
I4
tp358
a(g6
I2
tp359
a(g2
I4
tp360
a(g6
I4
tp361
a(g2
I1
tp362
a(g6
I6
tp363
a(g2
I4
tp364
a(g6
I11
tp365
a(g2
I4
tp366
a(g6
I5
tp367
a(g2
I6
tp368
a(g6
I3
tp369
a(g2
I6
tp370
a(g6
I6
tp371
aa(lp372
(g2
I95
tp373
aa. | mit |
minhtuancn/odoo | addons/resource/resource.py | 81 | 42822 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP SA (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
import pytz
class resource_calendar(osv.osv):
""" Calendar model for a resource. It has
- attendance_ids: list of resource.calendar.attendance that are a working
interval in a given weekday.
- leave_ids: list of leaves linked to this calendar. A leave can be general
or linked to a specific resource, depending on its resource_id.
All methods in this class use intervals. An interval is a tuple holding
(begin_datetime, end_datetime). A list of intervals is therefore a list of
tuples, holding several intervals of work or leaves. """
_name = "resource.calendar"
_description = "Resource Calendar"
_columns = {
'name': fields.char("Name", required=True),
'company_id': fields.many2one('res.company', 'Company', required=False),
'attendance_ids': fields.one2many('resource.calendar.attendance', 'calendar_id', 'Working Time', copy=True),
'manager': fields.many2one('res.users', 'Workgroup Manager'),
'leave_ids': fields.one2many(
'resource.calendar.leaves', 'calendar_id', 'Leaves',
help=''
),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.calendar', context=context)
}
# --------------------------------------------------
# Utility methods
# --------------------------------------------------
def interval_clean(self, intervals):
""" Utility method that sorts and removes overlapping inside datetime
intervals. The intervals are sorted based on increasing starting datetime.
Overlapping intervals are merged into a single one.
:param list intervals: list of intervals; each interval is a tuple
(datetime_from, datetime_to)
:return list cleaned: list of sorted intervals without overlap """
intervals = sorted(intervals, key=itemgetter(0)) # sort on first datetime
cleaned = []
working_interval = None
while intervals:
current_interval = intervals.pop(0)
if not working_interval: # init
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[0]: # interval is disjoint
cleaned.append(tuple(working_interval))
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[1]: # union of greater intervals
working_interval[1] = current_interval[1]
if working_interval: # handle void lists
cleaned.append(tuple(working_interval))
return cleaned
def interval_remove_leaves(self, interval, leave_intervals):
""" Utility method that remove leave intervals from a base interval:
- clean the leave intervals, to have an ordered list of not-overlapping
intervals
- initiate the current interval to be the base interval
- for each leave interval:
- finishing before the current interval: skip, go to next
- beginning after the current interval: skip and get out of the loop
because we are outside range (leaves are ordered)
- beginning within the current interval: close the current interval
and begin a new current interval that begins at the end of the leave
interval
- ending within the current interval: update the current interval begin
to match the leave interval ending
:param tuple interval: a tuple (beginning datetime, ending datetime) that
is the base interval from which the leave intervals
will be removed
:param list leave_intervals: a list of tuples (beginning datetime, ending datetime)
that are intervals to remove from the base interval
:return list intervals: a list of tuples (begin datetime, end datetime)
that are the remaining valid intervals """
if not interval:
return interval
if leave_intervals is None:
leave_intervals = []
intervals = []
leave_intervals = self.interval_clean(leave_intervals)
current_interval = [interval[0], interval[1]]
for leave in leave_intervals:
if leave[1] <= current_interval[0]:
continue
if leave[0] >= current_interval[1]:
break
if current_interval[0] < leave[0] < current_interval[1]:
current_interval[1] = leave[0]
intervals.append((current_interval[0], current_interval[1]))
current_interval = [leave[1], interval[1]]
# if current_interval[0] <= leave[1] <= current_interval[1]:
if current_interval[0] <= leave[1]:
current_interval[0] = leave[1]
if current_interval and current_interval[0] < interval[1]: # remove intervals moved outside base interval due to leaves
intervals.append((current_interval[0], current_interval[1]))
return intervals
def interval_schedule_hours(self, intervals, hour, remove_at_end=True):
""" Schedule hours in intervals. The last matching interval is truncated
to match the specified hours.
It is possible to truncate the last interval at its beginning or ending.
However this does nothing on the given interval order that should be
submitted accordingly.
:param list intervals: a list of tuples (beginning datetime, ending datetime)
:param int/float hours: number of hours to schedule. It will be converted
into a timedelta, but should be submitted as an
int or float.
:param boolean remove_at_end: remove extra hours at the end of the last
matching interval. Otherwise, do it at the
beginning.
:return list results: a list of intervals. If the number of hours to schedule
is greater than the possible scheduling in the intervals, no extra-scheduling
is done, and results == intervals. """
results = []
res = datetime.timedelta()
limit = datetime.timedelta(hours=hour)
for interval in intervals:
res += interval[1] - interval[0]
if res > limit and remove_at_end:
interval = (interval[0], interval[1] + relativedelta(seconds=seconds(limit-res)))
elif res > limit:
interval = (interval[0] + relativedelta(seconds=seconds(res-limit)), interval[1])
results.append(interval)
if res > limit:
break
return results
# --------------------------------------------------
# Date and hours computation
# --------------------------------------------------
def get_attendances_for_weekdays(self, cr, uid, id, weekdays, context=None):
""" Given a list of weekdays, return matching resource.calendar.attendance"""
calendar = self.browse(cr, uid, id, context=None)
return [att for att in calendar.attendance_ids if int(att.dayofweek) in weekdays]
def get_weekdays(self, cr, uid, id, default_weekdays=None, context=None):
""" Return the list of weekdays that contain at least one working interval.
If no id is given (no calendar), return default weekdays. """
if id is None:
return default_weekdays if default_weekdays is not None else [0, 1, 2, 3, 4]
calendar = self.browse(cr, uid, id, context=None)
weekdays = set()
for attendance in calendar.attendance_ids:
weekdays.add(int(attendance.dayofweek))
return list(weekdays)
def get_next_day(self, cr, uid, id, day_date, context=None):
""" Get following date of day_date, based on resource.calendar. If no
calendar is provided, just return the next day.
:param int id: id of a resource.calendar. If not given, simply add one day
to the submitted date.
:param date day_date: current day as a date
:return date: next day of calendar, or just next day """
if not id:
return day_date + relativedelta(days=1)
weekdays = self.get_weekdays(cr, uid, id, context)
base_index = -1
for weekday in weekdays:
if weekday > day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days < 0:
days = 7 + days
return day_date + relativedelta(days=days)
def get_previous_day(self, cr, uid, id, day_date, context=None):
""" Get previous date of day_date, based on resource.calendar. If no
calendar is provided, just return the previous day.
:param int id: id of a resource.calendar. If not given, simply remove
one day from the submitted date.
:param date day_date: current day as a date
:return date: previous day of calendar, or just previous day """
if not id:
return day_date + relativedelta(days=-1)
weekdays = self.get_weekdays(cr, uid, id, context)
weekdays.reverse()
base_index = -1
for weekday in weekdays:
if weekday < day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days > 0:
days = days - 7
return day_date + relativedelta(days=days)
def get_leave_intervals(self, cr, uid, id, resource_id=None,
start_datetime=None, end_datetime=None,
context=None):
"""Get the leaves of the calendar. Leaves can be filtered on the resource,
the start datetime or the end datetime.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param datetime start_datetime: if provided, do not take into account leaves
ending before this date.
:param datetime end_datetime: if provided, do not take into account leaves
beginning after this date.
:return list leaves: list of tuples (start_datetime, end_datetime) of
leave intervals
"""
resource_calendar = self.browse(cr, uid, id, context=context)
leaves = []
for leave in resource_calendar.leave_ids:
if leave.resource_id and not resource_id == leave.resource_id.id:
continue
date_from = datetime.datetime.strptime(leave.date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if end_datetime and date_from > end_datetime:
continue
date_to = datetime.datetime.strptime(leave.date_to, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if start_datetime and date_to < start_datetime:
continue
leaves.append((date_from, date_to))
return leaves
def get_working_intervals_of_day(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working intervals of the day based on calendar. This method
handle leaves that come directly from the leaves parameter or can be computed.
:param int id: resource.calendar id; take the first one if is a list
:param datetime start_dt: datetime object that is the beginning hours
for the working intervals computation; any
working interval beginning before start_dt
will be truncated. If not set, set to end_dt
or today() if no end_dt at 00.00.00.
:param datetime end_dt: datetime object that is the ending hour
for the working intervals computation; any
working interval ending after end_dt
will be truncated. If not set, set to start_dt()
at 23.59.59.
:param list leaves: a list of tuples(start_datetime, end_datetime) that
represent leaves.
:param boolean compute_leaves: if set and if leaves is None, compute the
leaves based on calendar and resource.
If leaves is None and compute_leaves false
no leaves are taken into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return list intervals: a list of tuples (start_datetime, end_datetime)
of work intervals """
if isinstance(id, (list, tuple)):
id = id[0]
# Computes start_dt, end_dt (with default values if not set) + off-interval work limits
work_limits = []
if start_dt is None and end_dt is not None:
start_dt = end_dt.replace(hour=0, minute=0, second=0)
elif start_dt is None:
start_dt = datetime.datetime.now().replace(hour=0, minute=0, second=0)
else:
work_limits.append((start_dt.replace(hour=0, minute=0, second=0), start_dt))
if end_dt is None:
end_dt = start_dt.replace(hour=23, minute=59, second=59)
else:
work_limits.append((end_dt, end_dt.replace(hour=23, minute=59, second=59)))
assert start_dt.date() == end_dt.date(), 'get_working_intervals_of_day is restricted to one day'
intervals = []
work_dt = start_dt.replace(hour=0, minute=0, second=0)
# no calendar: try to use the default_interval, then return directly
if id is None:
working_interval = []
if default_interval:
working_interval = (start_dt.replace(hour=default_interval[0], minute=0, second=0), start_dt.replace(hour=default_interval[1], minute=0, second=0))
intervals = self.interval_remove_leaves(working_interval, work_limits)
return intervals
working_intervals = []
tz_info = fields.datetime.context_timestamp(cr, uid, work_dt, context=context).tzinfo
for calendar_working_day in self.get_attendances_for_weekdays(cr, uid, id, [start_dt.weekday()], context):
x = work_dt.replace(hour=int(calendar_working_day.hour_from))
y = work_dt.replace(hour=int(calendar_working_day.hour_to))
x = x.replace(tzinfo=tz_info).astimezone(pytz.UTC).replace(tzinfo=None)
y = y.replace(tzinfo=tz_info).astimezone(pytz.UTC).replace(tzinfo=None)
working_interval = (x, y)
working_intervals += self.interval_remove_leaves(working_interval, work_limits)
# find leave intervals
if leaves is None and compute_leaves:
leaves = self.get_leave_intervals(cr, uid, id, resource_id=resource_id, context=None)
# filter according to leaves
for interval in working_intervals:
work_intervals = self.interval_remove_leaves(interval, leaves)
intervals += work_intervals
return intervals
def get_working_hours_of_date(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working hours of the day based on calendar. This method uses
get_working_intervals_of_day to have the work intervals of the day. It
then calculates the number of hours contained in those intervals. """
res = datetime.timedelta()
intervals = self.get_working_intervals_of_day(
cr, uid, id,
start_dt, end_dt, leaves,
compute_leaves, resource_id,
default_interval, context)
for interval in intervals:
res += interval[1] - interval[0]
return seconds(res) / 3600.0
def get_working_hours(self, cr, uid, id, start_dt, end_dt, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
hours = 0.0
for day in rrule.rrule(rrule.DAILY, dtstart=start_dt,
until=(end_dt + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0),
byweekday=self.get_weekdays(cr, uid, id, context=context)):
day_start_dt = day.replace(hour=0, minute=0, second=0)
if start_dt and day.date() == start_dt.date():
day_start_dt = start_dt
day_end_dt = day.replace(hour=23, minute=59, second=59)
if end_dt and day.date() == end_dt.date():
day_end_dt = end_dt
hours += self.get_working_hours_of_date(
cr, uid, id, start_dt=day_start_dt, end_dt=day_end_dt,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
return hours
# --------------------------------------------------
# Hours scheduling
# --------------------------------------------------
def _schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Schedule hours of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int hours: number of hours to schedule. Use a negative number to
compute a backwards scheduling.
:param datetime day_dt: reference date to compute working days. If days is
> 0 date is the starting date. If days is < 0
date is the ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Note: Why not using rrule.rrule ? Because rrule does not seem to allow
getting back in time.
"""
if day_dt is None:
day_dt = datetime.datetime.now()
backwards = (hours < 0)
hours = abs(hours)
intervals = []
remaining_hours = hours * 1.0
iterations = 0
current_datetime = day_dt
call_args = dict(compute_leaves=compute_leaves, resource_id=resource_id, default_interval=default_interval, context=context)
while float_compare(remaining_hours, 0.0, precision_digits=2) in (1, 0) and iterations < 1000:
if backwards:
call_args['end_dt'] = current_datetime
else:
call_args['start_dt'] = current_datetime
working_intervals = self.get_working_intervals_of_day(cr, uid, id, **call_args)
if id is None and not working_intervals: # no calendar -> consider working 8 hours
remaining_hours -= 8.0
elif working_intervals:
if backwards:
working_intervals.reverse()
new_working_intervals = self.interval_schedule_hours(working_intervals, remaining_hours, not backwards)
if backwards:
new_working_intervals.reverse()
res = datetime.timedelta()
for interval in working_intervals:
res += interval[1] - interval[0]
remaining_hours -= (seconds(res) / 3600.0)
if backwards:
intervals = new_working_intervals + intervals
else:
intervals = intervals + new_working_intervals
# get next day
if backwards:
current_datetime = datetime.datetime.combine(self.get_previous_day(cr, uid, id, current_datetime, context), datetime.time(23, 59, 59))
else:
current_datetime = datetime.datetime.combine(self.get_next_day(cr, uid, id, current_datetime, context), datetime.time())
# avoid infinite loops
iterations += 1
return intervals
def schedule_hours_get_date(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the beginning/ending datetime of
an hours scheduling. """
res = self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
return res and res[0][0] or False
def schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the working intervals of an hours
scheduling. """
return self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Days scheduling
# --------------------------------------------------
def _schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
"""Schedule days of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int days: number of days to schedule. Use a negative number to
compute a backwards scheduling.
:param date day_date: reference date to compute working days. If days is > 0
date is the starting date. If days is < 0 date is the
ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Implementation note: rrule.rrule is not used because rrule it des not seem
to allow getting back in time.
"""
if day_date is None:
day_date = datetime.datetime.now()
backwards = (days < 0)
days = abs(days)
intervals = []
planned_days = 0
iterations = 0
current_datetime = day_date.replace(hour=0, minute=0, second=0)
while planned_days < days and iterations < 1000:
working_intervals = self.get_working_intervals_of_day(
cr, uid, id, current_datetime,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
if id is None or working_intervals: # no calendar -> no working hours, but day is considered as worked
planned_days += 1
intervals += working_intervals
# get next day
if backwards:
current_datetime = self.get_previous_day(cr, uid, id, current_datetime, context)
else:
current_datetime = self.get_next_day(cr, uid, id, current_datetime, context)
# avoid infinite loops
iterations += 1
return intervals
def schedule_days_get_date(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the beginning/ending datetime of
a days scheduling. """
res = self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
return res and res[-1][1] or False
def schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the working intervals of a days
scheduling. """
return self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Compatibility / to clean / to remove
# --------------------------------------------------
def working_hours_on_day(self, cr, uid, resource_calendar_id, day, context=None):
""" Used in hr_payroll/hr_payroll.py
:deprecated: OpenERP saas-3. Use get_working_hours_of_date instead. Note:
since saas-3, take hour/minutes into account, not just the whole day."""
if isinstance(day, datetime.datetime):
day = day.replace(hour=0, minute=0)
return self.get_working_hours_of_date(cr, uid, resource_calendar_id.id, start_dt=day, context=None)
def interval_min_get(self, cr, uid, id, dt_from, hours, resource=False):
""" Schedule hours backwards. Used in mrp_operations/mrp_operations.py.
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note: since
saas-3, counts leave hours instead of all-day leaves."""
return self.schedule_hours(
cr, uid, id, hours * -1.0,
day_dt=dt_from.replace(minute=0, second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
def interval_get_multi(self, cr, uid, date_and_hours_by_cal, resource=False, byday=True):
""" Used in mrp_operations/mrp_operations.py (default parameters) and in
interval_get()
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note:
Byday was not used. Since saas-3, counts Leave hours instead of all-day leaves."""
res = {}
for dt_str, hours, calendar_id in date_and_hours_by_cal:
result = self.schedule_hours(
cr, uid, calendar_id, hours,
day_dt=datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S').replace(second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
res[(dt_str, hours, calendar_id)] = result
return res
def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
""" Unifier of interval_get_multi. Used in: mrp_operations/mrp_operations.py,
crm/crm_lead.py (res given).
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
res = self.interval_get_multi(
cr, uid, [(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)], resource, byday)[(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)]
return res
def interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource=False):
""" Unused wrapper.
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
return self._interval_hours_get(cr, uid, id, dt_from, dt_to, resource_id=resource)
def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None):
""" Computes working hours between two dates, taking always same hour/minuts.
:deprecated: OpenERP saas-3. Use get_working_hours instead. Note: since saas-3,
now resets hour/minuts. Now counts leave hours instead of all-day leaves."""
return self.get_working_hours(
cr, uid, id, dt_from, dt_to,
compute_leaves=(not exclude_leaves), resource_id=resource_id,
default_interval=(8, 16), context=context)
class resource_calendar_attendance(osv.osv):
_name = "resource.calendar.attendance"
_description = "Work Detail"
_columns = {
'name' : fields.char("Name", required=True),
'dayofweek': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')], 'Day of Week', required=True, select=True),
'date_from' : fields.date('Starting Date'),
'hour_from' : fields.float('Work from', required=True, help="Start and End time of working.", select=True),
'hour_to' : fields.float("Work to", required=True),
'calendar_id' : fields.many2one("resource.calendar", "Resource's Calendar", required=True),
}
_order = 'dayofweek, hour_from'
_defaults = {
'dayofweek' : '0'
}
def hours_time_string(hours):
""" convert a number of hours (float) into a string with format '%H:%M' """
minutes = int(round(hours * 60))
return "%02d:%02d" % divmod(minutes, 60)
class resource_resource(osv.osv):
_name = "resource.resource"
_description = "Resource Detail"
_columns = {
'name': fields.char("Name", required=True),
'code': fields.char('Code', size=16, copy=False),
'active' : fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the resource record without removing it."),
'company_id' : fields.many2one('res.company', 'Company'),
'resource_type': fields.selection([('user','Human'),('material','Material')], 'Resource Type', required=True),
'user_id' : fields.many2one('res.users', 'User', help='Related user name for the resource to manage its access.'),
'time_efficiency' : fields.float('Efficiency Factor', size=8, required=True, help="This field depict the efficiency of the resource to complete tasks. e.g resource put alone on a phase of 5 days with 5 tasks assigned to him, will show a load of 100% for this phase by default, but if we put a efficiency of 200%, then his load will only be 50%."),
'calendar_id' : fields.many2one("resource.calendar", "Working Time", help="Define the schedule of resource"),
}
_defaults = {
'resource_type' : 'user',
'time_efficiency' : 1,
'active' : True,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.resource', context=context)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name', False):
default.update(name=_('%s (copy)') % (self.browse(cr, uid, id, context=context).name))
return super(resource_resource, self).copy(cr, uid, id, default, context)
def generate_resources(self, cr, uid, user_ids, calendar_id, context=None):
"""
Return a list of Resource Class objects for the resources allocated to the phase.
NOTE: Used in project/project.py
"""
resource_objs = {}
user_pool = self.pool.get('res.users')
for user in user_pool.browse(cr, uid, user_ids, context=context):
resource_objs[user.id] = {
'name' : user.name,
'vacation': [],
'efficiency': 1.0,
}
resource_ids = self.search(cr, uid, [('user_id', '=', user.id)], context=context)
if resource_ids:
for resource in self.browse(cr, uid, resource_ids, context=context):
resource_objs[user.id]['efficiency'] = resource.time_efficiency
resource_cal = resource.calendar_id.id
if resource_cal:
leaves = self.compute_vacation(cr, uid, calendar_id, resource.id, resource_cal, context=context)
resource_objs[user.id]['vacation'] += list(leaves)
return resource_objs
def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None):
"""
Compute the vacation from the working calendar of the resource.
@param calendar_id : working calendar of the project
@param resource_id : resource working on phase/task
@param resource_calendar : working calendar of the resource
NOTE: used in project/project.py, and in generate_resources
"""
resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves')
leave_list = []
if resource_id:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id),
('calendar_id', '=', resource_calendar),
('resource_id', '=', resource_id)
], context=context)
else:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id),
('resource_id', '=', False)
], context=context)
leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context)
for i in range(len(leaves)):
dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S')
dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S')
no = dt_end - dt_start
[leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
leave_list.sort()
return leave_list
def compute_working_calendar(self, cr, uid, calendar_id=False, context=None):
"""
Change the format of working calendar from 'Openerp' format to bring it into 'Faces' format.
@param calendar_id : working calendar of the project
NOTE: used in project/project.py
"""
if not calendar_id:
# Calendar is not specified: working days: 24/7
return [('fri', '8:0-12:0','13:0-17:0'), ('thu', '8:0-12:0','13:0-17:0'), ('wed', '8:0-12:0','13:0-17:0'),
('mon', '8:0-12:0','13:0-17:0'), ('tue', '8:0-12:0','13:0-17:0')]
resource_attendance_pool = self.pool.get('resource.calendar.attendance')
time_range = "8:00-8:00"
non_working = ""
week_days = {"0": "mon", "1": "tue", "2": "wed","3": "thu", "4": "fri", "5": "sat", "6": "sun"}
wk_days = {}
wk_time = {}
wktime_list = []
wktime_cal = []
week_ids = resource_attendance_pool.search(cr, uid, [('calendar_id', '=', calendar_id)], context=context)
weeks = resource_attendance_pool.read(cr, uid, week_ids, ['dayofweek', 'hour_from', 'hour_to'], context=context)
# Convert time formats into appropriate format required
# and create a list like [('mon', '8:00-12:00'), ('mon', '13:00-18:00')]
for week in weeks:
res_str = ""
day = None
if week_days.get(week['dayofweek'],False):
day = week_days[week['dayofweek']]
wk_days[week['dayofweek']] = week_days[week['dayofweek']]
else:
raise osv.except_osv(_('Configuration Error!'),_('Make sure the Working time has been configured with proper week days!'))
hour_from_str = hours_time_string(week['hour_from'])
hour_to_str = hours_time_string(week['hour_to'])
res_str = hour_from_str + '-' + hour_to_str
wktime_list.append((day, res_str))
# Convert into format like [('mon', '8:00-12:00', '13:00-18:00')]
for item in wktime_list:
if wk_time.has_key(item[0]):
wk_time[item[0]].append(item[1])
else:
wk_time[item[0]] = [item[0]]
wk_time[item[0]].append(item[1])
for k,v in wk_time.items():
wktime_cal.append(tuple(v))
# Add for the non-working days like: [('sat, sun', '8:00-8:00')]
for k, v in wk_days.items():
if week_days.has_key(k):
week_days.pop(k)
for v in week_days.itervalues():
non_working += v + ','
if non_working:
wktime_cal.append((non_working[:-1], time_range))
return wktime_cal
class resource_calendar_leaves(osv.osv):
_name = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'name' : fields.char("Name"),
'company_id' : fields.related('calendar_id','company_id',type='many2one',relation='res.company',string="Company", store=True, readonly=True),
'calendar_id' : fields.many2one("resource.calendar", "Working Time"),
'date_from' : fields.datetime('Start Date', required=True),
'date_to' : fields.datetime('End Date', required=True),
'resource_id' : fields.many2one("resource.resource", "Resource", help="If empty, this is a generic holiday for the company. If a resource is set, the holiday/leave is only for this resource"),
}
def check_dates(self, cr, uid, ids, context=None):
for leave in self.browse(cr, uid, ids, context=context):
if leave.date_from and leave.date_to and leave.date_from > leave.date_to:
return False
return True
_constraints = [
(check_dates, 'Error! leave start-date must be lower then leave end-date.', ['date_from', 'date_to'])
]
def onchange_resource(self, cr, uid, ids, resource, context=None):
result = {}
if resource:
resource_pool = self.pool.get('resource.resource')
result['calendar_id'] = resource_pool.browse(cr, uid, resource, context=context).calendar_id.id
return {'value': result}
return {'value': {'calendar_id': []}}
def seconds(td):
assert isinstance(td, datetime.timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sosreport/sos | sos/report/plugins/ipa.py | 2 | 6983 | # Copyright (C) 2007 Red Hat, Inc., Kent Lamb <klamb@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin, SoSPredicate
from glob import glob
class Ipa(Plugin, RedHatPlugin):
short_desc = 'Identity, policy, audit'
plugin_name = 'ipa'
profiles = ('identity', 'apache')
ipa_server = False
ipa_client = False
files = ('/etc/ipa',)
packages = ('ipa-server', 'ipa-client', 'freeipa-server', 'freeipa-client')
def check_ipa_server_version(self):
if self.is_installed("pki-server") \
or self.path_exists("/var/lib/pki") \
or self.path_exists("/usr/share/doc/ipa-server-4.2.0"):
return "v4"
elif self.is_installed("pki-common") \
or self.path_exists("/var/lib/pki-ca/"):
return "v3"
return None
def ca_installed(self):
# Follow the same checks as IPA CA installer code
if self.path_exists("%s/conf/ca/CS.cfg" % self.pki_tomcat_dir_v4) \
or self.path_exists("%s/conf/CS.cfg" % self.pki_tomcat_dir_v3):
return True
def ipa_server_installed(self):
if self.is_installed("ipa-server") \
or self.is_installed("freeipa-server"):
return True
def retrieve_pki_logs(self, ipa_version):
if ipa_version == "v4":
self.add_copy_spec([
"/var/log/pki/pki-tomcat/ca/debug*",
"/var/log/pki/pki-tomcat/ca/system",
"/var/log/pki/pki-tomcat/ca/transactions",
"/var/log/pki/pki-tomcat/ca/selftests.log",
"/var/log/pki/pki-tomcat/catalina.*",
"/var/log/pki/pki-ca-spawn.*",
"/var/log/pki/pki-tomcat/kra/debug*",
"/var/log/pki/pki-tomcat/kra/system",
"/var/log/pki/pki-tomcat/kra/transactions",
"/var/log/pki/pki-kra-spawn.*"
])
elif ipa_version == "v3":
self.add_copy_spec([
"/var/log/pki-ca/debug",
"/var/log/pki-ca/system",
"/var/log/pki-ca/transactions",
"/var/log/pki-ca/selftests.log",
"/var/log/pki-ca/catalina.*",
"/var/log/pki/pki-ca-spawn.*"
])
def setup(self):
self.pki_tomcat_dir_v4 = "/var/lib/pki/pki-tomcat"
self.pki_tomcat_dir_v3 = "/var/lib/pki-ca"
self.pki_tomcat_conf_dir_v4 = "/etc/pki/pki-tomcat/ca"
self.pki_tomcat_conf_dir_v3 = "/etc/pki-ca"
# Returns "v3", "v4", or None
ipa_version = self.check_ipa_server_version()
if self.ipa_server_installed():
self._log_debug("IPA server install detected")
self._log_debug("IPA version is [%s]" % ipa_version)
self.add_copy_spec([
"/var/log/ipaserver-install.log",
"/var/log/ipaserver-kra-install.log",
"/var/log/ipareplica-install.log",
"/var/log/ipareplica-ca-install.log",
"/var/log/ipa-custodia.audit.log"
])
if self.ca_installed():
self._log_debug("CA is installed: retrieving PKI logs")
self.retrieve_pki_logs(ipa_version)
self.add_copy_spec([
"/var/log/ipaclient-install.log",
"/var/log/ipaupgrade.log",
"/var/log/krb5kdc.log",
"/var/log/dirsrv/slapd-*/logs/access",
"/var/log/dirsrv/slapd-*/logs/errors",
"/etc/dirsrv/slapd-*/dse.ldif",
"/etc/dirsrv/slapd-*/schema/99user.ldif",
"/etc/hosts",
"/etc/httpd/alias/*",
"/etc/named.*",
"/etc/ipa/ca.crt",
"/etc/ipa/default.conf",
"/etc/ipa/kdcproxy/kdcproxy.conf",
"/etc/ipa/kdcproxy/ipa-kdc-proxy.conf",
"/etc/ipa/kdcproxy.conf",
"/root/.ipa/log/cli.log",
"/var/lib/certmonger/requests/[0-9]*",
"/var/lib/certmonger/cas/[0-9]*",
"/var/lib/ipa/ra-agent.pem",
"/var/lib/ipa/certs/httpd.crt",
"/var/kerberos/krb5kdc/kdc.crt",
"/var/lib/ipa/sysrestore/sysrestore.state",
"/var/log/ipa/healthcheck/healthcheck.log*"
])
# Make sure to use the right PKI config and NSS DB folders
if ipa_version == "v4":
self.pki_tomcat_dir = self.pki_tomcat_dir_v4
self.pki_tomcat_conf_dir = self.pki_tomcat_conf_dir_v4
else:
self.pki_tomcat_dir = self.pki_tomcat_dir_v3
self.pki_tomcat_conf_dir = self.pki_tomcat_conf_dir_v3
self.add_cmd_output("certutil -L -d %s/alias" % self.pki_tomcat_dir)
self.add_copy_spec("%s/CS.cfg" % self.pki_tomcat_conf_dir)
self.add_forbidden_path([
"/etc/pki/nssdb/key*",
"/etc/dirsrv/slapd-*/key*",
"/etc/dirsrv/slapd-*/pin.txt",
"/etc/dirsrv/slapd-*/pwdfile.txt",
"/etc/httpd/alias/ipasession.key",
"/etc/httpd/alias/key*",
"/etc/httpd/alias/pin.txt",
"/etc/httpd/alias/pwdfile.txt",
"/etc/named.keytab",
"%s/alias/key*" % self.pki_tomcat_dir,
"%s/flatfile.txt" % self.pki_tomcat_conf_dir,
"%s/password.conf" % self.pki_tomcat_conf_dir,
])
self.add_cmd_output([
"ls -la /etc/dirsrv/slapd-*/schema/",
"certutil -L -d /etc/httpd/alias/",
"pki-server cert-find --show-all",
"pki-server subsystem-cert-validate ca",
"klist -ket /etc/dirsrv/ds.keytab",
"klist -ket /etc/httpd/conf/ipa.keytab",
"klist -ket /var/lib/ipa/gssproxy/http.keytab"
])
getcert_pred = SoSPredicate(self,
services=['certmonger'])
self.add_cmd_output("getcert list", pred=getcert_pred)
for certdb_directory in glob("/etc/dirsrv/slapd-*/"):
self.add_cmd_output("certutil -L -d %s" % certdb_directory)
return
def postproc(self):
match = r"(\s*arg \"password )[^\"]*"
subst = r"\1********"
self.do_file_sub("/etc/named.conf", match, subst)
self.do_cmd_output_sub("getcert list",
r"(pin=)'(\d+)'",
r"\1'***'")
request_logs = "/var/lib/certmonger/requests/[0-9]*"
for request_log in glob(request_logs):
self.do_file_sub(request_log,
r"(key_pin=)(\d+)",
r"\1***")
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
Mithrilwoodrat/QtConky | src/Meter.py | 1 | 5521 | #coding=utf-8
import sys
from math import sin, cos
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from PyQt4.QtCore import QPoint
from PyQt4.QtCore import QTimer
from PyQt4.QtGui import QPainter
from PyQt4.QtGui import QColor
from PyQt4.QtGui import QPolygon
from PyQt4.QtCore import QString
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Meter(QtGui.QWidget):
"""
a PyQt instance of QtMeter from Qt example code
"""
def __init__(self,parent=None):
QtGui.QWidget.__init__(self,parent)
self.value = 0
self.minValue = 0
self.maxValue = 100
self.logo = "CPU"
self.scaleMajor = 10
self.scaleMijor = 10
self.startAngle = 60
self.endAngle = 60
self.crownColor = Qt.blue
self.foreground = Qt.green
self.background = Qt.black
self.timer = QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(200)
self.resize(200,200)
def updateValue(self):
pass
def paintEvent(self,QPaintEvent):
self.updateValue()
self.side = min(self.width(),self.height())
self.painter = QPainter()
self.painter.begin(self)
self.painter.setRenderHint(QPainter.Antialiasing)
self.painter.translate(self.width()/2,self.height()/2)
self.painter.scale(self.side / 200.0, self.side / 200.0)
self.painter.setPen(Qt.NoPen)
self.drawCrown()
self.drawBackgroud()
self.drawLogo()
self.drawScale()
self.drawScaleNum()
self.drawNumbericValue()
self.drawPointer()
self.painter.end()
def setValue(self,updatefun):
self.value = updatefun()
def setLogo(self,logo):
self.logo = logo
def drawCrown(self):
self.painter.save()
self.painter.setPen(QtGui.QPen(self.crownColor, 3))
self.painter.drawEllipse(-92, -92, 184, 184)
self.painter.restore()
def drawBackgroud(self):
self.painter.save()
self.painter.setBrush(self.background)
self.painter.drawEllipse(-92, -92, 184, 184)
self.painter.restore()
def drawScale(self):
self.painter.save()
self.painter.rotate(self.startAngle)
self.painter.setPen(self.foreground)
steps = self.scaleMajor * self.scaleMijor
angleStep = (360.0 - self.startAngle - self.endAngle) /steps
pen = QtGui.QPen(self.painter.pen())
for i in xrange(steps+1):
if i % self.scaleMajor == 0:
pen.setWidth(1)
self.painter.setPen(pen)
self.painter.drawLine(0, 62, 0, 72)
else:
pen.setWidth(0)
self.painter.setPen(pen)
self.painter.drawLine(0, 62, 0, 65)
self.painter.rotate(angleStep)
self.painter.restore()
def drawScaleNum(self):
self.painter.save()
self.painter.setPen(self.foreground)
startRad = (360 - self.startAngle - 90) * (3.14 / 180)
deltaRad = (360 - self.startAngle - self.endAngle) * (3.14 / 180) / self.scaleMajor
fm = QtGui.QFontMetricsF(self.font())
for i in xrange(self.scaleMajor+1):
sina = sin(startRad - i * deltaRad)
cosa = cos(startRad - i * deltaRad)
tmpVal = 1.0 * i *((self.maxValue - self.minValue) / self.scaleMajor) + self.minValue
numstr = QString( "%1" ).arg(tmpVal)
w = fm.size(Qt.TextSingleLine,numstr).width()
h = fm.size(Qt.TextSingleLine,numstr).height()
x = 82 * cosa - w / 2
y = -82 * sina + h / 4
self.painter.drawText(x, y, numstr)
self.painter.restore()
def drawLogo(self):
self.painter.save()
self.painter.setPen(self.foreground)
self.painter.setBrush(self.foreground)
logostr = QString(self.logo)
fm = QtGui.QFontMetricsF(self.font())
w = fm.size(Qt.TextSingleLine,logostr).width()
self.painter.drawText(-w / 2, -30, logostr)
self.painter.restore()
def drawNumbericValue(self):
self.painter.save()
color = QtGui.QColor(150, 150, 200)
pen = self.painter.pen()
pen.setWidth(3)
self.painter.setPen(pen)
self.painter.setPen(color)
self.painter.drawRect(-30, 30, 60, 14)
cpustr = QString("%1").arg(self.value)
fm = QtGui.QFontMetricsF(self.font())
w = fm.size(Qt.TextSingleLine,cpustr).width()
self.painter.setPen(self.foreground)
self.painter.drawText(-w / 2, 42, cpustr)
self.painter.restore()
def drawPointer(self):
self.painter.save()
self.pointerHand=QPolygon([-2,0, 2,0, 0,60])
self.pointerColor = QColor(127 , 0, 127)
self.painter.setBrush(self.pointerColor)
self.painter.rotate(self.startAngle)
degRotate = (360.0 - self.startAngle - self.endAngle)/(self.maxValue - self.minValue)*(self.value - self.minValue)
self.painter.rotate(degRotate)
self.painter.drawConvexPolygon(self.pointerHand)
self.painter.restore()
| gpl-2.0 |
coderbone/SickRage-alt | lib/tornado/test/httpclient_test.py | 18 | 27293 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import base64
import binascii
from contextlib import closing
import copy
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8, native_str
from tornado import gen
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.write('redirects can have bodies too')
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
class SetHeaderHandler(RequestHandler):
def get(self):
# Use get_arguments for keys to get strings, but
# request.arguments for values to get bytes.
for k, v in zip(self.get_arguments('k'),
self.request.arguments['v']):
self.set_header(k, v)
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
url('/set_header', SetHeaderHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u"\xe9"
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u"foo")
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/1.1 101'):
# Upgrading to HTTP/2
pass
elif header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.lower().startswith('content-type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u"MyUserAgent", b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_multi_line_headers(self):
# Multi-line http headers are rare but rfc-allowed
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
X-XSS-Protection: 1;
\tmode=block
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block")
self.io_loop.remove_handler(sock.fileno())
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
def test_body_sanity_checks(self):
# These methods require a body.
for method in ('POST', 'PUT', 'PATCH'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method)
resp.rethrow()
self.assertIn('must not be None', str(context.exception))
resp = self.fetch('/all_methods', method=method,
allow_nonstandard_methods=True)
self.assertEqual(resp.code, 200)
# These methods don't allow a body.
for method in ('GET', 'DELETE', 'OPTIONS'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method, body=b'asdf')
resp.rethrow()
self.assertIn('must be None', str(context.exception))
# In most cases this can be overridden, but curl_httpclient
# does not allow body with a GET at all.
if method != 'GET':
resp = self.fetch('/all_methods', method=method, body=b'asdf',
allow_nonstandard_methods=True)
resp.rethrow()
self.assertEqual(resp.code, 200)
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
def test_non_ascii_header(self):
# Non-ascii headers are sent as latin1.
response = self.fetch("/set_header?k=foo&v=%E9")
response.rethrow()
self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by one iteration because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
self.server_ioloop.add_callback(self.server_ioloop.stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
class HTTPErrorTestCase(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_plain_error(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
def test_error_with_response(self):
resp = HTTPResponse(HTTPRequest('http://example.com/'), 403)
with self.assertRaises(HTTPError) as cm:
resp.rethrow()
e = cm.exception
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
| gpl-3.0 |
kageiit/buck | python-dsl/buck_parser/select_support.py | 5 | 1872 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, with_statement
class SelectorValue:
"""
Represents a single select statement that contains conditions and an optional error message
"""
def __init__(self, conditions, no_match_message):
self.__conditions = conditions
self.__no_match_message = no_match_message
def conditions(self):
return self.__conditions
def no_match_message(self):
return self.__no_match_message
class SelectorList:
"""
Represents a list of concatenated object that can be SelectorValue or objects of other types
This is used to store the representation of select statements and resolve the actual attributes
when configuration is present.
"""
def __init__(self, items):
self.__items = items
def items(self):
return self.__items
def __radd__(self, obj):
if isinstance(obj, SelectorList):
return SelectorList(obj.items() + self.__items)
else:
return SelectorList([obj] + self.__items)
def __add__(self, obj):
if isinstance(obj, SelectorList):
return SelectorList(self.__items + obj.items())
else:
return SelectorList(self.__items + [obj])
| apache-2.0 |
run2/citytour | 4symantec/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py | 505 | 1421 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from . import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return _base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
if attr.namespaceURI:
attrs[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrs[(None, attr.name)] = attr.value
return (_base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return _base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (_base.DOCUMENT,)
else:
return _base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
| mit |
jbhuang0604/WSL | lib/roi_data_layer/layer.py | 43 | 5930 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""The data layer used during training to train a Fast R-CNN network.
RoIDataLayer implements a Caffe Python layer.
"""
import caffe
from fast_rcnn.config import cfg
from roi_data_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
class RoIDataLayer(caffe.Layer):
"""Fast R-CNN data layer used for training."""
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH:
return self._blob_queue.get()
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes)
def set_roidb(self, roidb):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds()
if cfg.TRAIN.USE_PREFETCH:
self._blob_queue = Queue(10)
self._prefetch_process = BlobFetcher(self._blob_queue,
self._roidb,
self._num_classes)
self._prefetch_process.start()
# Terminate the child process when the parent exists
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {
'data': 0,
'rois': 1,
'labels': 2}
# data blob: holds a batch of N images, each with 3 channels
# The height and width (100 x 100) are dummy values
top[0].reshape(1, 3, 100, 100)
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[1].reshape(1, 5)
# labels blob: R categorical labels in [0, ..., K] for K foreground
# classes plus background
top[2].reshape(1)
if cfg.TRAIN.BBOX_REG:
self._name_to_top_map['bbox_targets'] = 3
self._name_to_top_map['bbox_loss_weights'] = 4
# bbox_targets blob: R bounding-box regression targets with 4
# targets per class
top[3].reshape(1, self._num_classes * 4)
# bbox_loss_weights blob: At most 4 targets per roi are active;
# thisbinary vector sepcifies the subset of active targets
top[4].reshape(1, self._num_classes * 4)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
class BlobFetcher(Process):
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, queue, roidb, num_classes):
super(BlobFetcher, self).__init__()
self._queue = queue
self._roidb = roidb
self._num_classes = num_classes
self._perm = None
self._cur = 0
self._shuffle_roidb_inds()
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
# TODO(rbg): remove duplicated code
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
# TODO(rbg): remove duplicated code
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def run(self):
print 'BlobFetcher started'
while True:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs = get_minibatch(minibatch_db, self._num_classes)
self._queue.put(blobs)
| mit |
julien78910/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/funnyordie.py | 27 | 2538 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class FunnyOrDieIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
_TESTS = [{
'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
'info_dict': {
'id': '0732f586d7',
'ext': 'mp4',
'title': 'Heart-Shaped Box: Literal Video Version',
'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',
'thumbnail': 're:^http:.*\.jpg$',
},
}, {
'url': 'http://www.funnyordie.com/embed/e402820827',
'info_dict': {
'id': 'e402820827',
'ext': 'mp4',
'title': 'Please Use This Song (Jon Lajoie)',
'description': 'Please use this to sell something. www.jonlajoie.com',
'thumbnail': 're:^http:.*\.jpg$',
},
}, {
'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
links = re.findall(r'<source src="([^"]+/v)[^"]+\.([^"]+)" type=\'video', webpage)
if not links:
raise ExtractorError('No media links available for %s' % video_id)
links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0)
bitrates = self._html_search_regex(r'<source src="[^"]+/v,((?:\d+,)+)\.mp4\.csmil', webpage, 'video bitrates')
bitrates = [int(b) for b in bitrates.rstrip(',').split(',')]
bitrates.sort()
formats = []
for bitrate in bitrates:
for link in links:
formats.append({
'url': '%s%d.%s' % (link[0], bitrate, link[1]),
'format_id': '%s-%d' % (link[1], bitrate),
'vbr': bitrate,
})
post_json = self._search_regex(
r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details')
post = json.loads(post_json)
return {
'id': video_id,
'title': post['name'],
'description': post.get('description'),
'thumbnail': post.get('picture'),
'formats': formats,
}
| gpl-3.0 |
tboyce021/home-assistant | homeassistant/components/google_pubsub/__init__.py | 9 | 2655 | """Support for Google Cloud Pub/Sub."""
import datetime
import json
import logging
import os
from typing import Any, Dict
from google.cloud import pubsub_v1
import voluptuous as vol
from homeassistant.const import EVENT_STATE_CHANGED, STATE_UNAVAILABLE, STATE_UNKNOWN
from homeassistant.core import Event, HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import FILTER_SCHEMA
_LOGGER = logging.getLogger(__name__)
DOMAIN = "google_pubsub"
CONF_PROJECT_ID = "project_id"
CONF_TOPIC_NAME = "topic_name"
CONF_SERVICE_PRINCIPAL = "credentials_json"
CONF_FILTER = "filter"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PROJECT_ID): cv.string,
vol.Required(CONF_TOPIC_NAME): cv.string,
vol.Required(CONF_SERVICE_PRINCIPAL): cv.string,
vol.Required(CONF_FILTER): FILTER_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass: HomeAssistant, yaml_config: Dict[str, Any]):
"""Activate Google Pub/Sub component."""
config = yaml_config[DOMAIN]
project_id = config[CONF_PROJECT_ID]
topic_name = config[CONF_TOPIC_NAME]
service_principal_path = os.path.join(
hass.config.config_dir, config[CONF_SERVICE_PRINCIPAL]
)
if not os.path.isfile(service_principal_path):
_LOGGER.error("Path to credentials file cannot be found")
return False
entities_filter = config[CONF_FILTER]
publisher = pubsub_v1.PublisherClient.from_service_account_json(
service_principal_path
)
topic_path = publisher.topic_path( # pylint: disable=no-member
project_id, topic_name
)
encoder = DateTimeJSONEncoder()
def send_to_pubsub(event: Event):
"""Send states to Pub/Sub."""
state = event.data.get("new_state")
if (
state is None
or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE)
or not entities_filter(state.entity_id)
):
return
as_dict = state.as_dict()
data = json.dumps(obj=as_dict, default=encoder.encode).encode("utf-8")
publisher.publish(topic_path, data=data)
hass.bus.listen(EVENT_STATE_CHANGED, send_to_pubsub)
return True
class DateTimeJSONEncoder(json.JSONEncoder):
"""Encode python objects.
Additionally add encoding for datetime objects as isoformat.
"""
def default(self, o):
"""Implement encoding logic."""
if isinstance(o, datetime.datetime):
return o.isoformat()
return super().default(o)
| apache-2.0 |
willingc/oh-mainline | mysite/search/migrations/0027_change_name_of_python_the_project.py | 17 | 4545 | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
no_dry_run = True
def forwards(self, orm):
"Write your forwards migration here"
number_found = 0
for project in orm.Project.objects.all():
if project.name == 'Python':
print "Changing a project:", project
project.name = 'Python (project)'
project.save()
number_found += 1
print "Total changed:", number_found
def backwards(self, orm):
"Write your backwards migration here"
models = {
'search.bug': {
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.hitcountcache': {
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True'})
}
}
complete_apps = ['search']
| agpl-3.0 |
dpendl00/headphones | lib/mako/compat.py | 13 | 4016 | import sys
import time
py3k = sys.version_info >= (3, 0)
py33 = sys.version_info >= (3, 3)
py26 = sys.version_info >= (2, 6)
py25 = sys.version_info >= (2, 5)
jython = sys.platform.startswith('java')
win32 = sys.platform.startswith('win')
pypy = hasattr(sys, 'pypy_version_info')
if py3k:
from io import StringIO
import builtins as compat_builtins
from urllib.parse import quote_plus, unquote_plus
from html.entities import codepoint2name, name2codepoint
string_types = str,
binary_type = bytes
text_type = str
from io import BytesIO as byte_buffer
def u(s):
return s
def octal(lit):
return eval("0o" + lit)
else:
import __builtin__ as compat_builtins
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
byte_buffer = StringIO
from urllib import quote_plus, unquote_plus
from htmlentitydefs import codepoint2name, name2codepoint
string_types = basestring,
binary_type = str
text_type = unicode
def u(s):
return unicode(s, "utf-8")
def octal(lit):
return eval("0" + lit)
if py33:
from importlib import machinery
def load_module(module_id, path):
return machinery.SourceFileLoader(module_id, path).load_module()
else:
import imp
def load_module(module_id, path):
fp = open(path, 'rb')
try:
return imp.load_source(module_id, path, fp)
finally:
fp.close()
def exception_as():
return sys.exc_info()[1]
try:
import threading
if py3k:
import _thread as thread
else:
import thread
except ImportError:
import dummy_threading as threading
if py3k:
import _dummy_thread as thread
else:
import dummy_thread as thread
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
try:
from functools import partial
except:
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
return newfunc
if not py25:
def all(iterable):
for i in iterable:
if not i:
return False
return True
def exception_name(exc):
try:
return exc.__class__.__name__
except AttributeError:
return exc.__name__
else:
all = all
def exception_name(exc):
return exc.__class__.__name__
try:
from inspect import CO_VARKEYWORDS, CO_VARARGS
def inspect_func_args(fn):
if py3k:
co = fn.__code__
else:
co = fn.func_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
if py3k:
return args, varargs, varkw, fn.__defaults__
else:
return args, varargs, varkw, fn.func_defaults
except ImportError:
import inspect
def inspect_func_args(fn):
return inspect.getargspec(fn)
if py3k:
def callable(fn):
return hasattr(fn, '__call__')
else:
callable = callable
################################################
# cross-compatible metaclass implementation
# Copyright (c) 2010-2012 Benjamin Peterson
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("%sBase" % meta.__name__, (base,), {})
################################################
def arg_stringname(func_arg):
"""Gets the string name of a kwarg or vararg
In Python3.4 a function's args are
of _ast.arg type not _ast.name
"""
if hasattr(func_arg, 'arg'):
return func_arg.arg
else:
return str(func_arg)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.