repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Pafcholini/emotion_kernel_tw_p | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Ali-aqrabawi/ezclinic | lib/django/core/mail/backends/console.py | 696 | 1477 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
from django.utils import six
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def write_message(self, message):
msg = message.message()
msg_data = msg.as_bytes()
if six.PY3:
charset = msg.get_charset().get_output_charset() if msg.get_charset() else 'utf-8'
msg_data = msg_data.decode(charset)
self.stream.write('%s\n' % msg_data)
self.stream.write('-' * 79)
self.stream.write('\n')
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.write_message(message)
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count
| mit |
mnestis/provglish | provglish/tests/nl-test.py | 1 | 10674 | import unittest
import subprocess
from time import sleep
import os
import provglish
from provglish import prov
from provglish.nl import tools
from helper_funcs import load_fixture
nlgserv = None
def setUpModule():
global nlgserv
print "Starting up nlgserv..."
nlgserv = subprocess.Popen(["nlgserv", "localhost", "8080"],
stdout=open(os.devnull, "w"),
stderr=open(os.devnull, "w"),
preexec_fn=os.setsid)
sleep(60)
print "Commencing testing..."
def tearDownModule():
global nlgserv
print "Shutting down nlgserv..."
os.killpg(nlgserv.pid, subprocess.signal.SIGTERM)
nlgserv.wait()
class TestServerSetup(unittest.TestCase):
def test_server_there(self):
sentence = {}
sentence["subject"] = "John"
sentence["verb"] = "kick"
sentence["object"] = "Steve"
output = tools.realise_sentence({"sentence": sentence})
self.assertEqual(output, "John kicks Steve.")
class TestUsage(unittest.TestCase):
def test_usage_string(self):
graph = load_fixture("nl_templates/usage.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.usage.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 3)
self.assertIn("Act1 used something at 2014-10-10T10:00:00+01:00.", strings)
self.assertIn("Act2 used ent2.", strings)
self.assertIn("Act3 used ent3 at 2014-11-11T11:00:00+00:00.", strings)
class TestGeneration(unittest.TestCase):
def test_generation_string(self):
graph = load_fixture("nl_templates/generation.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.generation.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 3)
self.assertIn("Ent1 was generated at 2014-10-10T11:00:00+01:00.", strings)
self.assertIn("Ent2 was generated by act2.", strings)
self.assertIn("Ent3 was generated at 2014-10-11T12:00:00+01:00 by act3.", strings)
class TestDelegation(unittest.TestCase):
def test_delegation_string(self):
graph = load_fixture("nl_templates/delegation.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.delegation.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 2)
self.assertIn("John acted on behalf of James.", strings)
self.assertIn("Jack did fetching on behalf of Jill.", strings)
class TestAssociation(unittest.TestCase):
def test_association_string(self):
graph = load_fixture("nl_templates/association.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.association.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Swimming was associated with John.", strings)
def test_association_qualified_string(self):
graph = load_fixture("nl_templates/association_qualified.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.association.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Swimming was associated with John.", strings)
class TestAgent(unittest.TestCase):
def test_agent_string(self):
graph = load_fixture("nl_templates/agent.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.agent.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Fred was an agent.", strings)
class TestAttribution(unittest.TestCase):
def test_attribution_string(self):
graph = load_fixture("nl_templates/attribution.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.attribution.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Entity was attributed to agent.", strings)
def test_attribution_qualified_string(self):
graph = load_fixture("nl_templates/attribution_qualified.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.attribution.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Entity was attributed to agent.", strings)
class TestCommunication(unittest.TestCase):
def test_communication_string(self):
graph = load_fixture("nl_templates/communication.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.communication.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Baking was informed by reading.", strings)
def test_communication_qualified_string(self):
graph = load_fixture("nl_templates/communication_qualified.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.communication.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Baking was informed by reading.", strings)
class TestEntity(unittest.TestCase):
def test_entity_string(self):
graph = load_fixture("nl_templates/entity.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.entity.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Ball was an entity.", strings)
class TestInvalidation(unittest.TestCase):
def test_invalidation_string(self):
graph = load_fixture("nl_templates/invalidation.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.invalidation.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 3)
self.assertIn("Ent1 was invalidated at 2014-10-10T11:00:00+01:00.", strings)
self.assertIn("Ent2 was invalidated by act2.", strings)
self.assertIn("Ent3 was invalidated at 2014-10-11T11:00:00+01:00 by act3.", strings)
class TestActivity(unittest.TestCase):
def test_activity(self):
graph = load_fixture("nl_templates/activity.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.activity.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Activity was an activity.", strings)
def test_activity_start(self):
graph = load_fixture("nl_templates/activity_start.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.activity_start.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Activity was an activity that started at 2011-11-16T16:05:00+00:00.", strings)
def test_activity_end(self):
graph = load_fixture("nl_templates/activity_end.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.activity_end.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Activity was an activity that ended at 2015-11-16T16:05:00+00:00.", strings)
def test_activity_times(self):
graph = load_fixture("nl_templates/activity_times.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.activity_duration.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
self.assertEqual(len(sentences), 1)
self.assertIn("Activity was an activity that started at 2015-11-16T16:05:00+00:00 and ended at 2016-11-16T16:05:00+00:00.", strings)
class TestEnd(unittest.TestCase):
def test_end(self):
graph = load_fixture("nl_templates/end.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.end.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
print strings
self.assertEqual(len(sentences), 4)
self.assertIn("The end of activity1 was triggered by trigger1.", strings)
self.assertIn("The activity2 was ended by ender2.", strings)
self.assertIn("The activity3 was ended at 2014-10-10T11:00:00+01:00.", strings)
self.assertIn("The activity4 was ended at 2014-10-10T11:00:00+01:00 by ender4.", strings)
class TestStart(unittest.TestCase):
def test_start(self):
graph = load_fixture("nl_templates/start.ttl")
graph = prov.load_prov_ontology(graph)
sentences = provglish.nl.templates.start.generate_sentences(graph)
strings = []
for sentence in sentences:
strings.append(str(sentence))
print strings
self.assertEqual(len(sentences), 4)
self.assertIn("The start of activity1 was triggered by trigger1.", strings)
self.assertIn("The activity2 was started by starter2.", strings)
self.assertIn("The activity3 was started at 2014-10-10T11:00:00+01:00.", strings)
self.assertIn("The activity4 was started at 2014-10-10T11:00:00+01:00 by starter4.", strings)
| mit |
renyi533/tensorflow | tensorflow/python/kernel_tests/concat_op_test.py | 9 | 29007 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConcatOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testHStack(self):
with self.session(use_gpu=True):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 0)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], params[p1])
self.assertAllEqual(result[4:, :], params[p2])
@test_util.run_deprecated_v1
def testVStack(self):
with self.session(use_gpu=True):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 1)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:, :4], params[p1])
self.assertAllEqual(result[:, 4:], params[p2])
def testInt32GPU(self):
with test_util.use_gpu():
p1 = np.random.rand(2, 3).astype("i")
p2 = np.random.rand(2, 3).astype("i")
x1 = constant_op.constant(p1)
x2 = constant_op.constant(p2)
c = array_ops.concat([x1, x2], 0)
result = self.evaluate(c)
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
def testRefType(self):
with test_util.use_gpu():
p1 = np.random.rand(4, 4).astype("f")
p2 = np.random.rand(4, 4).astype("f")
v1 = variables.Variable(p1)
v2 = variables.Variable(p2)
c = array_ops.concat([v1, v2], 0)
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(c)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], p1)
self.assertAllEqual(result[4:, :], p2)
def _testRandom(self, dtype):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
# Random number of tensors, but always > 1.
num_tensors = np.random.randint(2, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
params = {}
if dtype == dtypes.bfloat16:
dtype_feed = dtypes.float32
else:
dtype_feed = dtype
with self.session(use_gpu=True):
p = []
for i in np.arange(num_tensors):
input_shape = shape
input_shape[concat_dim] = np.random.randint(1, 5)
placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
p.append(placeholder)
t = dtype_feed.as_numpy_dtype
params[placeholder] = np.random.rand(*input_shape).astype(t)
if dtype != dtype_feed:
concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
else:
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
if dtype != dtype_feed:
c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]
ind[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
if dtype == dtype_feed:
self.assertAllEqual(result[ind], params[p[i]])
else:
self.assertAllClose(result[ind], params[p[i]], 0.01)
@test_util.run_deprecated_v1
def testRandom(self):
self._testRandom(dtypes.bool)
self._testRandom(dtypes.float32)
self._testRandom(dtypes.int16)
self._testRandom(dtypes.int32)
self._testRandom(dtypes.int64)
self._testRandom(dtypes.bfloat16)
self._testRandom(dtypes.complex64)
self._testRandom(dtypes.complex128)
@test_util.run_deprecated_v1
def testInvalidConcatDimTypeAndShape(self):
a = variables.Variable(constant_op.constant(1.0, shape=[1]))
b = variables.Variable(constant_op.constant(2.0, shape=[1]))
with self.assertRaises(ValueError):
array_ops.concat(b, a)
with self.assertRaises(TypeError):
array_ops.concat(1, 4.2)
with self.assertRaises(ValueError):
array_ops.concat(1, a)
with self.assertRaises(TypeError):
array_ops.concat([a, b], a)
with self.assertRaises(ValueError):
array_ops.concat([a, b], [3])
with self.assertRaises(ValueError):
array_ops.concat([], 0)
# An integer tensor for shape dim should throw no error.
array_ops.concat(1, constant_op.constant(0, shape=[]))
# A non-scalar tensor for shape should throw ValueError.
with self.assertRaises(ValueError):
array_ops.concat(1, constant_op.constant(0, shape=[1]))
def _testGradientsSimple(self, dtype):
# Test both positive and negative concat axis.
# -2 and 1 correspond to the same axis for 3-dimensional tensors.
for axis in [-2, 1]:
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, x, 2]
t = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
t += -1j * t
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtype))
c = array_ops.concat(inp_tensors, axis)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
grad_inp += -1j * grad_inp
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsSimple(self):
self._testGradientsSimple(dtypes.float32)
self._testGradientsSimple(dtypes.complex64)
@test_util.run_deprecated_v1
def testGradientsFirstDim(self):
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [x, 10, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 0)
output_shape = [9, 10, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 0)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsLastDim(self):
# Test both positive and negative concat axis.
# -1 and 2 correspond to the same axis for 3-dimensional tensors.
for axis in [-1, 2]:
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, 2, x]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def _RunAndVerifyGradientsRandom(self):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(12, 20)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(t.flatten(), shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsRandom(self):
for _ in range(5):
self._RunAndVerifyGradientsRandom()
@test_util.run_deprecated_v1
def testGradientWithUnknownInputDim(self):
with self.session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = array_ops.concat([x, y], 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], [x, y], [grad_tensor])
concated_grad = array_ops.concat(grad, 2)
params = {
x: np.random.rand(10, 2, 3).astype("f"),
y: np.random.rand(10, 2, 6).astype("f")
}
result = concated_grad.eval(feed_dict=params)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testShapeError(self):
# Rank doesn't match.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 1)
# Dimensions don't match in a non-concat dim.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[1, 2, 1]),
constant_op.constant(20.0, shape=[3, 2, 1])
], 1)
# concat_dim out of range.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 3)
# concat_dim out of range
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], -4)
@test_util.run_deprecated_v1
def testShapeWithUnknownConcatDim(self):
p1 = array_ops.placeholder(dtypes.float32)
c1 = constant_op.constant(10.0, shape=[4, 4, 4, 4])
p2 = array_ops.placeholder(dtypes.float32)
c2 = constant_op.constant(20.0, shape=[4, 4, 4, 4])
dim = array_ops.placeholder(dtypes.int32)
concat = array_ops.concat([p1, c1, p2, c2], dim)
self.assertEqual(4, concat.get_shape().ndims)
# All dimensions unknown.
concat2 = array_ops.concat([p1, p2], dim)
self.assertEqual(None, concat2.get_shape())
# Rank doesn't match.
c3 = constant_op.constant(30.0, shape=[4, 4, 4])
with self.assertRaises(ValueError):
array_ops.concat([p1, c1, p2, c3], dim)
@test_util.run_deprecated_v1
def testZeroSize(self):
# Verify that concat doesn't crash and burn for zero size inputs
np.random.seed(7)
with test_util.use_gpu():
for shape0 in (), (2,):
axis = len(shape0)
for shape1 in (), (3,):
for n0 in 0, 1, 2:
for n1 in 0, 1, 2:
x0 = np.random.randn(*(shape0 + (n0,) + shape1))
x1 = np.random.randn(*(shape0 + (n1,) + shape1))
correct = np.concatenate([x0, x1], axis=axis)
# TODO(irving): Make tf.concat handle map, then drop list().
xs = list(map(constant_op.constant, [x0, x1]))
c = array_ops.concat(xs, axis)
self.assertAllEqual(self.evaluate(c), correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
dxs = self.evaluate(gradients_impl.gradients(c, xs, dc))
self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
@test_util.run_deprecated_v1
def testTensorConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [44, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 0)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testTensorConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [20, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 1)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [4, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 0)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim2Grad(self):
x_shapes = [[20, 7, 3], [20, 7, 1], [20, 7, 2]]
output_shape = [4, 7, 6]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 2)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim1Grad_UnknownInputDim(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
with self.cached_session():
x_1 = array_ops.placeholder(dtypes.float64)
x_2 = array_ops.placeholder(dtypes.float64)
x_3 = array_ops.placeholder(dtypes.float64)
xs = [x_1, x_2, x_3]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
params = {
x_1: np.random.random_sample(x_shapes[0]).astype(np.float64),
x_2: np.random.random_sample(x_shapes[1]).astype(np.float64),
x_3: np.random.random_sample(x_shapes[2]).astype(np.float64)
}
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape,
extra_feed_dict=params)
self.assertLess(err, 1e-11)
def testConcatTuple(self):
c1 = np.random.rand(4, 4)
c2 = np.random.rand(4, 4)
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(
self.evaluate(concat_list_t), self.evaluate(concat_tuple_t))
@test_util.run_deprecated_v1
def testConcatNoScalars(self):
scalar = constant_op.constant(7)
dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
array_ops.concat([scalar, scalar, scalar], dim)
# important as gpu implementation could fail if
# shared memory is not large for all the inputs
@test_util.run_deprecated_v1
def testConcatLargeNumberOfTensors(self):
with self.session(use_gpu=True):
for concat_dim in range(2):
params = {}
p = []
shape = np.array([7, 13])
if test.is_gpu_available():
num_tensors = 5000
else:
num_tensors = 500
for i in np.arange(num_tensors):
input_shape = shape
placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
p.append(placeholder)
params[placeholder] = np.random.rand(*input_shape).astype(np.float32)
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
index = [slice(0, params[p[i]].shape[j]) for j in np.arange(2)]
index[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
self.assertAllEqual(result[index], params[p[i]])
def testConcatEmpty(self):
with test_util.use_gpu():
t1 = []
t2 = []
output = gen_array_ops.concat_v2([t1, t2], 0)
self.assertFalse(self.evaluate(output)) # Checks that output is empty
@test_util.run_deprecated_v1
def testConcatInvalidAxis(self):
with self.assertRaises(ValueError):
with test_util.use_gpu():
t1 = [1]
t2 = [2]
gen_array_ops.concat_v2([t1, t2], 1).eval()
def testConcatNegativeAxis(self):
with test_util.use_gpu():
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
c = gen_array_ops.concat_v2([t1, t2], -2)
self.assertEqual([4, 3], c.get_shape().as_list())
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
output)
c = gen_array_ops.concat_v2([t1, t2], -1)
self.assertEqual([2, 6], c.get_shape().as_list())
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
def _testGradientsForAxis(
self, inp_tensors, axis, output_shape, feed_dict=None):
with self.cached_session():
c = array_ops.concat(inp_tensors, axis)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
def _testIndexedSlicesGradientsForAxis(
self, inp_tensors, axis, output_shape, gather_indexes, feed_dict=None):
with self.cached_session():
c = array_ops.gather(
array_ops.concat(inp_tensors, axis), gather_indexes)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.gather(
array_ops.concat(grad, axis), gather_indexes)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsNegativeAxis(self):
x1 = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
x2 = [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]
inp_tensors = [constant_op.constant(x1, shape=(2, 3), dtype=dtypes.float32),
constant_op.constant(x2, shape=(2, 3), dtype=dtypes.float32)]
# Test concat gradient with axis == -2
self._testGradientsForAxis(inp_tensors, -2, output_shape=[4, 3])
# Test concat gradient with unknown-shape tensors.
x1_placeholder = array_ops.placeholder(dtypes.float32)
x2_placeholder = array_ops.placeholder(dtypes.float32)
inp_tensors_placeholders = [x1_placeholder, x2_placeholder]
feed_dict = {x1_placeholder: x1, x2_placeholder: x2}
self._testGradientsForAxis(
inp_tensors_placeholders, -1, output_shape=[2, 6], feed_dict=feed_dict)
# Test IndexedSlices concat gradient.
self._testIndexedSlicesGradientsForAxis(
inp_tensors, -2, output_shape=[2, 3], gather_indexes=[2, 0])
# We don't support calculating IndexedSlices concat gradient for
# negative indexes when rank is not known.
with self.assertRaises(ValueError):
self._testIndexedSlicesGradientsForAxis(
inp_tensors_placeholders, -2, output_shape=[2, 3],
gather_indexes=[2, 0], feed_dict=feed_dict)
def testConcatAxisType(self):
for dtype in [dtypes.int32, dtypes.int64]:
with test_util.use_gpu():
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
c = gen_array_ops.concat_v2([t1, t2],
constant_op.constant(1, dtype=dtype))
self.assertEqual([2, 6], c.get_shape().as_list())
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
class ConcatOffsetTest(test.TestCase):
def testBasic(self):
with test_util.use_gpu():
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
@test_util.run_deprecated_v1
def testNotVector(self):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([[2, 3, 5]], dtypes.int32)
s1 = constant_op.constant([[2, 7, 5]], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should be a vector"):
self.evaluate(off)
@test_util.run_deprecated_v1
def testConcatDimOutOfRange(self):
cdim = constant_op.constant(4, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Concat dim is out of range: 4 vs. 3"):
self.evaluate(off)
@test_util.run_deprecated_v1
def testDimMismatch(self):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5, 10], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should contain 3 elem"):
self.evaluate(off)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testSizeMismatch(self):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 10], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
r"and doesn't match input 0 with shape \[2 3 5\]."):
self.evaluate(off)
def testNegativeDim(self):
with test_util.use_gpu():
cdim = constant_op.constant(-2, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
cdim = constant_op.constant(-3, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([1, 3, 5], dtypes.int32)
s2 = constant_op.constant([3, 3, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [2, 0, 0], [3, 0, 0]])
def testCreateMemDecBlockedFormat(self):
"""Try to create the mkl concat operation
when one of the input's memory descriptor is in blocked format
"""
if test_util.IsMklEnabled():
s0 = np.ones((1, 8188, 4092, 1), dtype=np.uint8).astype(np.float32)
s1 = array_ops.strided_slice(
s0, [0, 1, 1, 0], [0, -1, -1, 0], [1, 1, 1, 1],
begin_mask=9,
end_mask=9)
s2 = array_ops.slice(s1, [0, 0, 0, 0], [-1, -1, -1, 1])
s3_1 = array_ops.slice(s2, [0, 4, 4, 0], [-1, 8178, 4082, 1])
s3_2 = array_ops.slice(s2, [0, 4, 4, 0], [-1, 8178, 4082, 1])
filter4_1 = constant_op.constant([[[[1.18, -0.51]]]])
s4_1 = nn_ops.conv2d(
s3_1, filter4_1, strides=[1, 1, 1, 1], padding="VALID")
filter4_2 = constant_op.constant([[[[1.38, -0.11]]]])
s4_2 = nn_ops.conv2d(
s3_2, filter4_2, strides=[1, 1, 1, 1], padding="VALID")
s5_1 = array_ops.slice(s4_1, [0, 6, 6, 0], [-1, 1, 1, -1])
s5_2 = array_ops.slice(s4_2, [0, 6, 6, 0], [-1, 1, 1, -1])
x_concat = array_ops.concat([s5_1, s5_2], 3)
self.evaluate(
x_concat
) # This test is only meant to check the creation is not crashed
if __name__ == "__main__":
test.main()
| apache-2.0 |
ModdedPA/android_external_chromium | testing/gtest/test/gtest_list_tests_unittest.py | 1068 | 5415 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
Abc.
Xyz
Def
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output: the expected output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
msg = ('when %s is %s, the output of "%s" is "%s".' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))
if expected_output is not None:
self.assert_(output == expected_output, msg)
else:
self.assert_(output != EXPECTED_OUTPUT_NO_FILTER, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_FILTER_FOO,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
LaoZhongGu/kbengine | kbe/res/scripts/common/Lib/threading.py | 4 | 36652 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
import _thread
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those originaly names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
_get_ident = _thread.get_ident
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Debug support (adapted from ihooks.py).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self._verbose = verbose
def _note(self, format, *args):
if self._verbose:
format = format % args
# Issue #4188: calling current_thread() can incur an infinite
# recursion if it has to create a DummyThread on the fly.
ident = _get_ident()
try:
name = _active[ident].name
except KeyError:
name = "<OS thread %d>" % ident
format = "%s: %s\n" % (name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(verbose=None, *args, **kwargs):
if verbose is None:
verbose = _VERBOSE
if (__debug__ and verbose) or _CRLock is None:
return _PyRLock(verbose, *args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self._count)
def acquire(self, blocking=True, timeout=-1):
me = _get_ident()
if self._owner == me:
self._count = self._count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
if self._owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == _get_ident()
_PyRLock = _RLock
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = []
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(0):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
if __debug__:
self._note("%s.wait(): got it", self)
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self._waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
return gotit
finally:
self._acquire_restore(saved_state)
def wait_for(self, predicate, timeout=None):
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
if __debug__:
self._note("%s.wait_for(%r, %r): Timed out.",
self, predicate, timeout)
break
if __debug__:
self._note("%s.wait_for(%r, %r): Waiting with timeout=%s.",
self, predicate, timeout, waittime)
self.wait(waittime)
result = predicate()
else:
if __debug__:
self._note("%s.wait_for(%r, %r): Success.",
self, predicate, timeout)
return result
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self._waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
self.notify(len(self._waiters))
notifyAll = notify_all
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
self._cond.acquire()
while self._value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self._value)
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value = self._value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self._value)
rc = True
self._cond.release()
return rc
__enter__ = acquire
def release(self):
self._cond.acquire()
self._value = self._value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self._value)
self._cond.notify()
self._cond.release()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self._cond = Condition(Lock())
self._flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self._cond.__init__()
def is_set(self):
return self._flag
isSet = is_set
def set(self):
self._cond.acquire()
try:
self._flag = True
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
self._cond.acquire()
try:
self._flag = False
finally:
self._cond.release()
def wait(self, timeout=None):
self._cond.acquire()
try:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
finally:
self._cond.release()
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier(_Verbose):
"""
Barrier. Useful for synchronizing a fixed number of threads
at known synchronization points. Threads block on 'wait()' and are
simultaneously once they have all made that call.
"""
def __init__(self, parties, action=None, timeout=None, verbose=None):
"""
Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called
by one of the threads after they have all entered the
barrier and just prior to releasing them all.
If a 'timeout' is provided, it is uses as the default for
all subsequent 'wait()' calls.
"""
_Verbose.__init__(self, verbose)
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""
Wait for the barrier. When the specified number of threads have
started waiting, they are all simultaneously awoken. If an 'action'
was provided for the barrier, one of the threads will have executed
that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are relased. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""
Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""
Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and
threads attempting to 'wait()' will have BrokenBarrierError
raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""
Return the number of threads required to trip the barrier.
"""
return self._parties
@property
def n_waiting(self):
"""
Return the number of threads that are currently waiting at the barrier.
"""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""
Return True if the barrier is in a broken state
"""
return self._state == -2
#exception raised by the Barrier class
class BrokenBarrierError(RuntimeError): pass
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# For debug and leak testing
_dangling = WeakSet()
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
#XXX __exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
self._daemonic = self._set_daemon()
self._ident = None
self._started = Event()
self._stopped = False
self._block = Condition(Lock())
self._initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr
_dangling.add(self)
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_block'): # DummyThread deletes _block
self._block.__init__()
self._started._reset_internal_locks()
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
if self._stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = _get_ident()
def _bootstrap_inner(self):
try:
self._set_ident()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if __debug__:
self._note("%s._bootstrap(): thread started", self)
if _trace_hook:
self._note("%s._bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s._bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s._bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s._bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self._stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self._exc_info()
try:
print((
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):"), file=self._stderr)
print((
"Traceback (most recent call last):"), file=self._stderr)
while exc_tb:
print((
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
exc_tb = exc_tb.tb_next
print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s._bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
#XXX self.__exc_clear()
pass
finally:
with _active_limbo_lock:
self._stop()
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def _stop(self):
self._block.acquire()
self._stopped = True
self._block.notify_all()
self._block.release()
def _delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with _dummy_thread:
#
# Must take care to not raise an exception if _dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). _dummy_thread.get_ident() always returns -1 since
# there is only one thread if _dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from _dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self._stopped:
self._note("%s.join(): waiting until thread stops", self)
self._block.acquire()
try:
if timeout is None:
while not self._stopped:
self._block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self._stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self._block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self._block.release()
@property
def name(self):
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
assert self._initialized, "Thread.__init__() not called"
return self._ident
def is_alive(self):
assert self._initialized, "Thread.__init__() not called"
return self._started.is_set() and not self._stopped
isAlive = is_alive
@property
def daemon(self):
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread._block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._block
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _set_daemon(self):
return True
def _stop(self):
pass
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
currentThread = current_thread
def active_count():
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _active.values():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
| lgpl-3.0 |
JimCircadian/ansible | lib/ansible/modules/monitoring/zabbix/zabbix_proxy.py | 58 | 12457 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Alen Komic
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_proxy
short_description: Zabbix proxy creates/deletes/gets/updates
description:
- This module allows you to create, modify, get and delete Zabbix proxy entries.
version_added: "2.5"
author:
- "Alen Komic"
requirements:
- "python >= 2.6"
- "zabbix-api >= 0.5.3"
options:
proxy_name:
description:
- Name of the proxy in Zabbix.
required: true
description:
description:
- Description of the proxy..
required: false
status:
description:
- Type of proxy. (4 - active, 5 - passive)
required: false
choices: ['active', 'passive']
default: "active"
tls_connect:
description:
- Connections to proxy.
required: false
choices: ['no_encryption','PSK','certificate']
default: 'no_encryption'
tls_accept:
description:
- Connections from proxy.
required: false
choices: ['no_encryption','PSK','certificate']
default: 'no_encryption'
tls_issuer:
description:
- Certificate issuer.
required: false
tls_subject:
description:
- Certificate subject.
required: false
tls_psk_identity:
description:
- PSK identity. Required if either I(tls_connect) or I(tls_accept) has PSK enabled.
required: false
tls_psk:
description:
- The preshared key, at least 32 hex digits. Required if either I(tls_connect) or I(tls_accept) has PSK enabled.
required: false
state:
description:
- State of the proxy.
- On C(present), it will create if proxy does not exist or update the proxy if the associated data is different.
- On C(absent) will remove a proxy if it exists.
required: false
choices: ['present', 'absent']
default: "present"
interface:
description:
- Dictionary with params for the interface when proxy is in passive mode
- 'Available values are: dns, ip, main, port, type and useip.'
- Please review the interface documentation for more information on the supported properties
- U(https://www.zabbix.com/documentation/3.2/manual/api/reference/proxy/object#proxy_interface)
required: false
default: {}
extends_documentation_fragment:
- zabbix
'''
EXAMPLES = '''
- name: Create a new proxy or update an existing proxies info
local_action:
module: zabbix_proxy
server_url: http://monitor.example.com
login_user: username
login_password: password
proxy_name: ExampleProxy
description: ExampleProxy
status: active
state: present
interface:
type: 0
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
try:
from zabbix_api import ZabbixAPI
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
class Proxy(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
self.existing_data = None
def proxy_exists(self, proxy_name):
result = self._zapi.proxy.get({
'output': 'extend', 'selectInterface': 'extend',
'filter': {'host': proxy_name}})
if len(result) > 0 and 'proxyid' in result[0]:
self.existing_data = result[0]
return result[0]['proxyid']
else:
return result
def add_proxy(self, data):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {}
for item in data:
if data[item]:
parameters[item] = data[item]
proxy_ids_list = self._zapi.proxy.create(parameters)
self._module.exit_json(changed=True,
result="Successfully added proxy %s (%s)" %
(data['host'], data['status']))
if len(proxy_ids_list) >= 1:
return proxy_ids_list['proxyids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create proxy %s: %s" %
(data['host'], e))
def delete_proxy(self, proxy_id, proxy_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.proxy.delete([proxy_id])
self._module.exit_json(changed=True,
result="Successfully deleted" +
" proxy %s" % proxy_name)
except Exception as e:
self._module.fail_json(msg="Failed to delete proxy %s: %s" %
(proxy_name, str(e)))
def compile_interface_params(self, new_interface):
old_interface = {}
if 'interface' in self.existing_data and \
len(self.existing_data['interface']) > 0:
old_interface = self.existing_data['interface']
final_interface = old_interface.copy()
final_interface.update(new_interface)
final_interface = dict((k, str(v)) for k, v in final_interface.items())
if final_interface != old_interface:
return final_interface
else:
return {}
def update_proxy(self, proxy_id, data):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'proxyid': proxy_id}
for item in data:
if data[item] and item in self.existing_data and \
self.existing_data[item] != data[item]:
parameters[item] = data[item]
if 'interface' in parameters:
parameters.pop('interface')
if 'interface' in data and data['status'] == '6':
new_interface = self.compile_interface_params(data['interface'])
if len(new_interface) > 0:
parameters['interface'] = new_interface
if len(parameters) > 1:
self._zapi.proxy.update(parameters)
self._module.exit_json(
changed=True,
result="Successfully updated proxy %s (%s)" %
(data['host'], proxy_id)
)
else:
self._module.exit_json(changed=False)
except Exception as e:
self._module.fail_json(msg="Failed to update proxy %s: %s" %
(data['host'], e))
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
proxy_name=dict(type='str', required=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False,
default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
status=dict(default="active", choices=['active', 'passive']),
state=dict(default="present", choices=['present', 'absent']),
description=dict(type='str', required=False),
tls_connect=dict(default='no_encryption',
choices=['no_encryption', 'PSK', 'certificate']),
tls_accept=dict(default='no_encryption',
choices=['no_encryption', 'PSK', 'certificate']),
tls_issuer=dict(type='str', required=False, default=None),
tls_subject=dict(type='str', required=False, default=None),
tls_psk_identity=dict(type='str', required=False, default=None),
tls_psk=dict(type='str', required=False, default=None),
timeout=dict(type='int', default=10),
interface=dict(type='dict', required=False, default={})
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module" +
" (check docs or install with:" +
" pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
proxy_name = module.params['proxy_name']
description = module.params['description']
status = module.params['status']
tls_connect = module.params['tls_connect']
tls_accept = module.params['tls_accept']
tls_issuer = module.params['tls_issuer']
tls_subject = module.params['tls_subject']
tls_psk_identity = module.params['tls_psk_identity']
tls_psk = module.params['tls_psk']
state = module.params['state']
timeout = module.params['timeout']
interface = module.params['interface']
# convert enabled to 0; disabled to 1
status = 6 if status == "passive" else 5
if tls_connect == 'certificate':
tls_connect = 4
elif tls_connect == 'PSK':
tls_connect = 2
else:
tls_connect = 1
if tls_accept == 'certificate':
tls_accept = 4
elif tls_accept == 'PSK':
tls_accept = 2
else:
tls_accept = 1
zbx = None
# login to zabbix
try:
zbx = ZabbixAPI(server_url, timeout=timeout,
user=http_login_user,
passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
proxy = Proxy(module, zbx)
# check if proxy already exists
proxy_id = proxy.proxy_exists(proxy_name)
if proxy_id:
if state == "absent":
# remove proxy
proxy.delete_proxy(proxy_id, proxy_name)
else:
proxy.update_proxy(proxy_id, {
'host': proxy_name,
'description': description,
'status': str(status),
'tls_connect': str(tls_connect),
'tls_accept': str(tls_accept),
'tls_issuer': tls_issuer,
'tls_subject': tls_subject,
'tls_psk_identity': tls_psk_identity,
'tls_psk': tls_psk,
'interface': interface
})
else:
if state == "absent":
# the proxy is already deleted.
module.exit_json(changed=False)
proxy_id = proxy.add_proxy(data={
'host': proxy_name,
'description': description,
'status': str(status),
'tls_connect': str(tls_connect),
'tls_accept': str(tls_accept),
'tls_issuer': tls_issuer,
'tls_subject': tls_subject,
'tls_psk_identity': tls_psk_identity,
'tls_psk': tls_psk,
'interface': interface
})
if __name__ == '__main__':
main()
| gpl-3.0 |
wrongerror/landsite | landsite/settings.py | 1 | 3848 | """
Django settings for landsite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
DEFAULT_FILE_STORAGE = 'django_hashedfilenamestorage.storage.HashedFilenameFileSystemStorage'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=q0#x6=md#j1tpr*-6+cb8^-)lu48s+@tyie#(j+)3&07rshby'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
APPEND_SLASH = True
# Application definition
INSTALLED_APPS = (
'grappelli',
'filebrowser',
'tinymce',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.home',
'apps.product',
'apps.service',
'apps.case',
'apps.civilization',
'apps.aboutus',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'profiling.middleware.RedirectMiddleware',
)
ROOT_URLCONF = 'landsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'landsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'landsite',
'USER': 'ubuntu',
'ATOMIC_REQUESTS': True
},
}
#Template
# TEMPLATES = [
# {
# 'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'APP_DIRS': True,
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh_cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = '/static/'
# Media Files
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
#tinymce
TINYMCE_DEFAULT_CONFIG = {
'plugins': "table,spellchecker,paste,searchreplace",
'theme': "advanced",
'cleanup_on_startup': True,
'custom_undo_redo_levels': 10,
# 'width': 1000,
'height': 650,
}
TINYMCE_SPELLCHECKER = True
TINYMCE_COMPRESSOR = True
TINYMCE_FILEBROWSER = True
FILEBROWSER_DIRECTORY = "uploads/"
import re
Redirect_USER_AGENTS = (re.compile(r'msie\s*[2-7]', re.IGNORECASE), )
| mit |
selimnairb/2014-02-25-swctest | lessons/misc-pandas/an-introduction-to-pandas.py | 1 | 2397 |
## Exercise 1:
# How would we get the second to last date (EDT) in the dataset?
# Combine head() and tail()
last_two_dates = data.EDT.tail(2)
second_to_last_date = last_two_dates.head(1)
print second_to_last_date
## Exercise 2:
# What is the range of temperatures in the dataset?
hottest_temp = data.max_temp.max() # Highest of the highs
coldest_temp = data.min_temp.min() # Lowest of the lows
print "Temperature range:", hottest_temp - coldest_temp, "degrees F"
# Temperature range: 105 degrees F
## Exercise 3:
# Print out the cloud cover for each day in May.
# *Hint: you can make datetime objects with the `datetime(year, month, day)` function*
datetime(2012, 5, 1) # May 1st of 2012
data[datetime(2012, 5, 1):datetime(2012, 5, 31)].cloud_cover
## Exercise 4:
# Was there any November rain?
d = datetime(2012, 1, 1)
d.strftime("%B")
november_rain = False
for date_idx, row in data.iterrows():
if date_idx.strftime("%B") == "November" and "Rain" in row["events"]:
november_rain = True
if november_rain:
print "There was rain in November"
else:
print "There was *not* rain in November"
## Exercise 5:
# We'll replace "T" with a very small number, and convert the rest of the strings to floats:
# Convert precipitation to floating point number
# "T" means "trace of precipitation"
def precipitation_to_float(precip_str):
if precip_str == "T":
return 1e-10 # Very small value
return float(precip_str)
data.precipitation = data.precipitation.apply(precipitation_to_float)
data.precipitation.head()
## Exercise 6:
# Was the mean temperature more variable on days with rain and snow than on days with just rain or just snow?
days_with_rain = data[data.rain == True]
days_with_snow = data[data.snow == True]
rain_std = days_with_rain.mean_temp.std()
snow_std = days_with_snow.mean_temp.std()
if rain_std > snow_std:
print "Rainy days were more variable"
elif snow_std > rain_std:
print "Snowy days were more variable"
else:
print "They were the same"
## Exercise 7:
# Add the mean temperature to the previous plot using a green line. Also, add a legend with the `legend()` method of `ax`.
ax = data.max_temp.plot(title="Min and Max Temperatures")
data.min_temp.plot(style="red", ax=ax)
data.mean_temp.plot(style="green", ax=ax)
ax.set_ylabel("Temperature (F)")
| bsd-2-clause |
kapil-malik/airflow | airflow/executors/__init__.py | 12 | 1209 | import logging
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.celery_executor import CeleryExecutor
from airflow.executors.sequential_executor import SequentialExecutor
# TODO Fix this emergency fix
try:
from airflow.contrib.executors.mesos_executor import MesosExecutor
except:
pass
from airflow.utils import AirflowException
_EXECUTOR = conf.get('core', 'EXECUTOR')
if _EXECUTOR == 'LocalExecutor':
DEFAULT_EXECUTOR = LocalExecutor()
elif _EXECUTOR == 'CeleryExecutor':
DEFAULT_EXECUTOR = CeleryExecutor()
elif _EXECUTOR == 'SequentialExecutor':
DEFAULT_EXECUTOR = SequentialExecutor()
elif _EXECUTOR == 'MesosExecutor':
DEFAULT_EXECUTOR = MesosExecutor()
else:
# Loading plugins
from airflow.plugins_manager import executors as _executors
for _executor in _executors:
globals()[_executor.__name__] = _executor
if _EXECUTOR in globals():
DEFAULT_EXECUTOR = globals()[_EXECUTOR]()
else:
raise AirflowException("Executor {0} not supported.".format(_EXECUTOR))
logging.info("Using executor " + _EXECUTOR)
| apache-2.0 |
xuxiao19910803/edx | cms/djangoapps/contentstore/tests/utils.py | 28 | 18019 | # pylint: disable=no-member
'''
Utilities for contentstore tests
'''
import json
import textwrap
from mock import Mock
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import Client
from opaque_keys.edx.locations import SlashSeparatedCourseKey, AssetLocation
from contentstore.utils import reverse_url # pylint: disable=import-error
from student.models import Registration # pylint: disable=import-error
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.tests.utils import ProceduralCourseTestMixin
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
def parse_json(response):
"""Parse response, which is assumed to be json"""
return json.loads(response.content)
def user(email):
"""look up a user by email"""
return User.objects.get(email=email)
def registration(email):
"""look up registration object by email"""
return Registration.objects.get(user__email=email)
class AjaxEnabledTestClient(Client):
"""
Convenience class to make testing easier.
"""
def ajax_post(self, path, data=None, content_type="application/json", **kwargs):
"""
Convenience method for client post which serializes the data into json and sets the accept type
to json
"""
if not isinstance(data, basestring):
data = json.dumps(data or {})
kwargs.setdefault("HTTP_X_REQUESTED_WITH", "XMLHttpRequest")
kwargs.setdefault("HTTP_ACCEPT", "application/json")
return self.post(path=path, data=data, content_type=content_type, **kwargs)
def get_html(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to html
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="text/html", **extra)
def get_json(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to json
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="application/json", **extra)
class CourseTestCase(ProceduralCourseTestMixin, ModuleStoreTestCase):
"""
Base class for Studio tests that require a logged in user and a course.
Also provides helper methods for manipulating and verifying the course.
"""
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client can log them in.
The test user is created in the ModuleStoreTestCase setUp method.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
self.user_password = super(CourseTestCase, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=self.user_password)
self.course = CourseFactory.create()
def create_non_staff_authed_user_client(self, authenticate=True):
"""
Create a non-staff user, log them in (if authenticate=True), and return the client, user to use for testing.
"""
nonstaff, password = self.create_non_staff_user()
client = AjaxEnabledTestClient()
if authenticate:
client.login(username=nonstaff.username, password=password)
nonstaff.is_authenticated = lambda: authenticate
return client, nonstaff
def reload_course(self):
"""
Reloads the course object from the database
"""
self.course = self.store.get_course(self.course.id)
def save_course(self):
"""
Updates the course object in the database
"""
self.course.save()
self.store.update_item(self.course, self.user.id)
TEST_VERTICAL = 'vertical_test'
ORPHAN_DRAFT_VERTICAL = 'orphan_draft_vertical'
ORPHAN_DRAFT_HTML = 'orphan_draft_html'
PRIVATE_VERTICAL = 'a_private_vertical'
PUBLISHED_VERTICAL = 'a_published_vertical'
SEQUENTIAL = 'vertical_sequential'
DRAFT_HTML = 'draft_html'
DRAFT_VIDEO = 'draft_video'
LOCKED_ASSET_KEY = AssetLocation.from_deprecated_string('/c4x/edX/toy/asset/sample_static.txt')
def import_and_populate_course(self):
"""
Imports the test toy course and populates it with additional test data
"""
content_store = contentstore()
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=content_store)
course_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
# create an Orphan
# We had a bug where orphaned draft nodes caused export to fail. This is here to cover that case.
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
vertical.location = vertical.location.replace(name='no_references')
self.store.update_item(vertical, self.user.id, allow_not_found=True)
orphan_vertical = self.store.get_item(vertical.location)
self.assertEqual(orphan_vertical.location.name, 'no_references')
self.assertEqual(len(orphan_vertical.children), len(vertical.children))
# create an orphan vertical and html; we already don't try to import
# the orphaned vertical, but we should make sure we don't import
# the orphaned vertical's child html, too
orphan_draft_vertical = self.store.create_item(
self.user.id, course_id, 'vertical', self.ORPHAN_DRAFT_VERTICAL
)
orphan_draft_html = self.store.create_item(
self.user.id, course_id, 'html', self.ORPHAN_DRAFT_HTML
)
orphan_draft_vertical.children.append(orphan_draft_html.location)
self.store.update_item(orphan_draft_vertical, self.user.id)
# create a Draft vertical
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
draft_vertical = self.store.convert_to_draft(vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(draft_vertical))
# create a Private (draft only) vertical
private_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PRIVATE_VERTICAL)
self.assertFalse(self.store.has_published_version(private_vertical))
# create a Published (no draft) vertical
public_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PUBLISHED_VERTICAL)
public_vertical = self.store.publish(public_vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(public_vertical))
# add the new private and new public as children of the sequential
sequential = self.store.get_item(course_id.make_usage_key('sequential', self.SEQUENTIAL))
sequential.children.append(private_vertical.location)
sequential.children.append(public_vertical.location)
self.store.update_item(sequential, self.user.id)
# create an html and video component to make drafts:
draft_html = self.store.create_item(self.user.id, course_id, 'html', self.DRAFT_HTML)
draft_video = self.store.create_item(self.user.id, course_id, 'video', self.DRAFT_VIDEO)
# add them as children to the public_vertical
public_vertical.children.append(draft_html.location)
public_vertical.children.append(draft_video.location)
self.store.update_item(public_vertical, self.user.id)
# publish changes to vertical
self.store.publish(public_vertical.location, self.user.id)
# convert html/video to draft
self.store.convert_to_draft(draft_html.location, self.user.id)
self.store.convert_to_draft(draft_video.location, self.user.id)
# lock an asset
content_store.set_attr(self.LOCKED_ASSET_KEY, 'locked', True)
# create a non-portable link - should be rewritten in new courses
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
new_data = html_module.data = html_module.data.replace(
'/static/',
'/c4x/{0}/{1}/asset/'.format(course_id.org, course_id.course)
)
self.store.update_item(html_module, self.user.id)
html_module = self.store.get_item(html_module.location)
self.assertEqual(new_data, html_module.data)
return course_id
def check_populated_course(self, course_id):
"""
Verifies the content of the given course, per data that was populated in import_and_populate_course
"""
items = self.store.get_items(
course_id,
qualifiers={'category': 'vertical'},
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.check_verticals(items)
def verify_item_publish_state(item, publish_state):
"""Verifies the publish state of the item is as expected."""
self.assertEqual(self.store.has_published_version(item), publish_state)
def get_and_verify_publish_state(item_type, item_name, publish_state):
"""
Gets the given item from the store and verifies the publish state
of the item is as expected.
"""
item = self.store.get_item(course_id.make_usage_key(item_type, item_name))
verify_item_publish_state(item, publish_state)
return item
# verify draft vertical has a published version with published children
vertical = get_and_verify_publish_state('vertical', self.TEST_VERTICAL, True)
for child in vertical.get_children():
verify_item_publish_state(child, True)
# verify that it has a draft too
self.assertTrue(getattr(vertical, "is_draft", False))
# make sure that we don't have a sequential that is in draft mode
sequential = get_and_verify_publish_state('sequential', self.SEQUENTIAL, True)
self.assertFalse(getattr(sequential, "is_draft", False))
# verify that we have the private vertical
private_vertical = get_and_verify_publish_state('vertical', self.PRIVATE_VERTICAL, False)
# verify that we have the public vertical
public_vertical = get_and_verify_publish_state('vertical', self.PUBLISHED_VERTICAL, True)
# verify that we have the draft html
draft_html = self.store.get_item(course_id.make_usage_key('html', self.DRAFT_HTML))
self.assertTrue(getattr(draft_html, 'is_draft', False))
# verify that we have the draft video
draft_video = self.store.get_item(course_id.make_usage_key('video', self.DRAFT_VIDEO))
self.assertTrue(getattr(draft_video, 'is_draft', False))
# verify verticals are children of sequential
for vert in [vertical, private_vertical, public_vertical]:
self.assertIn(vert.location, sequential.children)
# verify draft html is the child of the public vertical
self.assertIn(draft_html.location, public_vertical.children)
# verify draft video is the child of the public vertical
self.assertIn(draft_video.location, public_vertical.children)
# verify textbook exists
course = self.store.get_course(course_id)
self.assertGreater(len(course.textbooks), 0)
# verify asset attributes of locked asset key
self.assertAssetsEqual(self.LOCKED_ASSET_KEY, self.LOCKED_ASSET_KEY.course_key, course_id)
# verify non-portable links are rewritten
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
self.assertIn('/static/foo.jpg', html_module.data)
return course
def assertCoursesEqual(self, course1_id, course2_id):
"""
Verifies the content of the two given courses are equal
"""
course1_items = self.store.get_items(course1_id)
course2_items = self.store.get_items(course2_id)
self.assertGreater(len(course1_items), 0) # ensure it found content instead of [] == []
if len(course1_items) != len(course2_items):
course1_block_ids = set([item.location.block_id for item in course1_items])
course2_block_ids = set([item.location.block_id for item in course2_items])
raise AssertionError(
u"Course1 extra blocks: {}; course2 extra blocks: {}".format(
course1_block_ids - course2_block_ids, course2_block_ids - course1_block_ids
)
)
for course1_item in course1_items:
course1_item_loc = course1_item.location
course2_item_loc = course2_id.make_usage_key(course1_item_loc.block_type, course1_item_loc.block_id)
if course1_item_loc.block_type == 'course':
# mongo uses the run as the name, split uses 'course'
store = self.store._get_modulestore_for_courselike(course2_id) # pylint: disable=protected-access
new_name = 'course' if isinstance(store, SplitMongoModuleStore) else course2_item_loc.run
course2_item_loc = course2_item_loc.replace(name=new_name)
course2_item = self.store.get_item(course2_item_loc)
# compare published state
self.assertEqual(
self.store.has_published_version(course1_item),
self.store.has_published_version(course2_item)
)
# compare data
self.assertEqual(hasattr(course1_item, 'data'), hasattr(course2_item, 'data'))
if hasattr(course1_item, 'data'):
self.assertEqual(course1_item.data, course2_item.data)
# compare meta-data
self.assertEqual(own_metadata(course1_item), own_metadata(course2_item))
# compare children
self.assertEqual(course1_item.has_children, course2_item.has_children)
if course1_item.has_children:
expected_children = []
for course1_item_child in course1_item.children:
expected_children.append(
course2_id.make_usage_key(course1_item_child.block_type, course1_item_child.block_id)
)
self.assertEqual(expected_children, course2_item.children)
# compare assets
content_store = self.store.contentstore
course1_assets, count_course1_assets = content_store.get_all_content_for_course(course1_id)
_, count_course2_assets = content_store.get_all_content_for_course(course2_id)
self.assertEqual(count_course1_assets, count_course2_assets)
for asset in course1_assets:
asset_son = asset.get('content_son', asset['_id'])
self.assertAssetsEqual(asset_son, course1_id, course2_id)
def check_verticals(self, items):
""" Test getting the editing HTML for each vertical. """
# assert is here to make sure that the course being tested actually has verticals (units) to check.
self.assertGreater(len(items), 0, "Course has no verticals (units) to check")
for descriptor in items:
resp = self.client.get_html(get_url('container_handler', descriptor.location))
self.assertEqual(resp.status_code, 200)
def assertAssetsEqual(self, asset_son, course1_id, course2_id):
"""Verifies the asset of the given key has the same attributes in both given courses."""
content_store = contentstore()
category = asset_son.block_type if hasattr(asset_son, 'block_type') else asset_son['category']
filename = asset_son.block_id if hasattr(asset_son, 'block_id') else asset_son['name']
course1_asset_attrs = content_store.get_attrs(course1_id.make_asset_key(category, filename))
course2_asset_attrs = content_store.get_attrs(course2_id.make_asset_key(category, filename))
self.assertEqual(len(course1_asset_attrs), len(course2_asset_attrs))
for key, value in course1_asset_attrs.iteritems():
if key in ['_id', 'filename', 'uploadDate', 'content_son', 'thumbnail_location']:
pass
else:
self.assertEqual(value, course2_asset_attrs[key])
def mock_requests_get(*args, **kwargs):
"""
Returns mock responses for the youtube API.
"""
# pylint: disable=unused-argument
response_transcript_list = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
response_transcript = textwrap.dedent("""
<transcript>
<text start="100" dur="100">subs #1</text>
<text start="200" dur="40">subs #2</text>
<text start="240" dur="140">subs #3</text>
</transcript>
""")
if kwargs == {'params': {'lang': 'en', 'v': 'good_id_2'}}:
return Mock(status_code=200, text='')
elif kwargs == {'params': {'type': 'list', 'v': 'good_id_2'}}:
return Mock(status_code=200, text=response_transcript_list, content=response_transcript_list)
elif kwargs == {'params': {'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}}:
return Mock(status_code=200, text=response_transcript, content=response_transcript)
return Mock(status_code=404, text='')
def get_url(handler_name, key_value, key_name='usage_key_string', kwargs=None):
"""
Helper function for getting HTML for a page in Studio and checking that it does not error.
"""
return reverse_url(handler_name, key_name, key_value, kwargs)
| agpl-3.0 |
kobejean/tensorflow | tensorflow/contrib/slim/nets.py | 191 | 1609 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF-Slim Nets.
## Standard Networks.
@@alexnet_v2
@@inception_v1
@@inception_v1_base
@@inception_v2
@@inception_v2_base
@@inception_v3
@@inception_v3_base
@@overfeat
@@vgg_a
@@vgg_16
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,
# Collapse nets into a single namespace.
from tensorflow.contrib.slim.python.slim.nets import alexnet
from tensorflow.contrib.slim.python.slim.nets import inception
from tensorflow.contrib.slim.python.slim.nets import overfeat
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.contrib.slim.python.slim.nets import resnet_v2
from tensorflow.contrib.slim.python.slim.nets import vgg
from tensorflow.python.util.all_util import make_all
# pylint: enable=unused-import
__all__ = make_all(__name__)
| apache-2.0 |
crosick/zhishu | ENV/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/escsm.py | 2930 | 7839 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
HZ_cls = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_st = (
eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17
5,eError, 6,eError, 5, 5, 4,eError,# 18-1f
4,eError, 4, 4, 4,eError, 4,eError,# 20-27
4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f
)
HZCharLenTable = (0, 0, 0, 0, 0, 0)
HZSMModel = {'classTable': HZ_cls,
'classFactor': 6,
'stateTable': HZ_st,
'charLenTable': HZCharLenTable,
'name': "HZ-GB-2312"}
ISO2022CN_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27
5, 6,eError,eError,eError,eError,eError,eError,# 28-2f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37
eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f
)
ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CNSMModel = {'classTable': ISO2022CN_cls,
'classFactor': 9,
'stateTable': ISO2022CN_st,
'charLenTable': ISO2022CNCharLenTable,
'name': "ISO-2022-CN"}
ISO2022JP_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f
eError, 5,eError,eError,eError, 4,eError,eError,# 20-27
eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f
eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47
)
ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JPSMModel = {'classTable': ISO2022JP_cls,
'classFactor': 10,
'stateTable': ISO2022JP_st,
'charLenTable': ISO2022JPCharLenTable,
'name': "ISO-2022-JP"}
ISO2022KR_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_st = (
eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17
eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f
eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27
)
ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0)
ISO2022KRSMModel = {'classTable': ISO2022KR_cls,
'classFactor': 6,
'stateTable': ISO2022KR_st,
'charLenTable': ISO2022KRCharLenTable,
'name': "ISO-2022-KR"}
# flake8: noqa
| mit |
fergalmoran/Chrome2Kindle | server/html5lib/treebuilders/etree_lxml.py | 26 | 12625 | import new
import warnings
import re
import _base
from html5lib.constants import DataLossWarning
import html5lib.constants as constants
import etree as etree_builders
from html5lib import ihatexml
try:
import lxml.etree as etree
except ImportError:
pass
fullTree = True
"""Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
finalText = None
filter = ihatexml.InfosetFilter()
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
#Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>"%element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">"""%(
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s"%(' '*(indent+2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent+2)
next_element = next_element.getnext()
elif isinstance(element, basestring):
#Text in a fragment
rv.append("|%s\"%s\""%(' '*indent, element))
else:
#Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent+2)
elif type(element.tag) == type(etree.Comment):
rv.append("|%s<!-- %s -->"%(' '*indent, element.text))
else:
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>"%(' '*indent, prefix,
filter.fromXmlName(tag)))
else:
rv.append("|%s<%s>"%(' '*indent,
filter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
for name, value in element.attrib.iteritems():
nsmatch = etree_builders.tag_regexp.match(name)
if nsmatch:
ns = nsmatch.group(1)
name = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append('|%s%s %s="%s"' % (' '*(indent+2),
prefix,
filter.fromXmlName(name),
value))
else:
rv.append('|%s%s="%s"' % (' '*(indent+2),
filter.fromXmlName(name),
value))
if element.text:
rv.append("|%s\"%s\"" %(' '*(indent+2), element.text))
indent += 2
for child in element.getchildren():
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" %(' '*(indent-2), element.tail))
serializeElement(element, 0)
if finalText is not None:
rv.append("|%s\"%s\""%(' '*2, finalText))
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
finalText = None
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>"%element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif type(element.tag) == type(etree.Comment):
rv.append("<!--%s-->"%(element.text,))
else:
#This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>"%(element.tag,))
else:
attr = " ".join(["%s=\"%s\""%(name, value)
for name, value in element.attrib.iteritems()])
rv.append("<%s %s>"%(element.tag, attr))
if element.text:
rv.append(element.text)
for child in element.getchildren():
serializeElement(child)
rv.append("</%s>"%(element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
if finalText is not None:
rv.append("%s\""%(' '*2, finalText))
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
def __init__(self, namespaceHTMLElements, fullTree = False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
filter = self.filter = ihatexml.InfosetFilter()
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value={}):
self._element = element
dict.__init__(self, value)
for key, value in self.iteritems():
if isinstance(key, tuple):
name = "{%s}%s"%(key[2], filter.coerceAttribute(key[1]))
else:
name = filter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s"%(key[2], filter.coerceAttribute(key[1]))
else:
name = filter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = filter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = filter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return filter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = filter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = filter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = filter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = builder.Comment
#self.fragmentClass = builder.DocumentFragment
_base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
_base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(element.getchildren())
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name or ihatexml.nonXmlNameBMPRegexp.search(name) or name[0] == '"':
warnings.warn("lxml cannot represent null or non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(name, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
self.initial_comments.append(data)
def insertRoot(self, token):
"""Create the document root"""
#Because of the way libxml2 works, it doesn't seem to be possible to
#alter information like the doctype after the tree has been parsed.
#Therefore we need to use the built-in parser to create our iniial
#tree, after which we can add elements like normal
docStr = ""
if self.doctype and self.doctype.name and not self.doctype.name.startswith('"'):
docStr += "<!DOCTYPE %s"%self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += ' PUBLIC "%s" "%s"'%(self.doctype.publicId or "",
self.doctype.systemId or "")
docStr += ">"
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
try:
root = etree.fromstring(docStr)
except etree.XMLSyntaxError:
print docStr
raise
#Append the initial comments:
for comment_token in self.initial_comments:
root.addprevious(etree.Comment(comment_token["data"]))
#Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s"%(namespace, name)
root.tag = etree_tag
#Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
#Reset to the default insert comment function
self.insertComment = super(TreeBuilder, self).insertComment
| mit |
xgfone/homepage | homepage/homepage/django_packages/south/introspection_plugins/django_audit_log.py | 154 | 1436 | """
South introspection rules for django-audit-log
"""
from django.contrib.auth.models import User
from django.conf import settings
from south.modelsinspector import add_introspection_rules
if "audit_log" in settings.INSTALLED_APPS:
try:
# Try and import the field so we can see if audit_log is available
from audit_log.models import fields
# Make sure the `to` and `null` parameters will be ignored
rules = [(
(fields.LastUserField,),
[],
{
'to': ['rel.to', {'default': User}],
'null': ['null', {'default': True}],
},
)]
# Add the rules for the `LastUserField`
add_introspection_rules(
rules,
['^audit_log\.models\.fields\.LastUserField'],
)
except ImportError:
pass
| bsd-3-clause |
wgcv/SWW-Crashphone | lib/python2.7/site-packages/django/contrib/gis/sitemaps/kml.py | 60 | 2510 | from django.apps import apps
from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
from django.contrib.gis.db.models.fields import GeometryField
from django.db import models
class KMLSitemap(Sitemap):
"""
A minimal hook to produce KML sitemaps.
"""
geo_format = 'kml'
def __init__(self, locations=None):
# If no locations specified, then we try to build for
# every model in installed applications.
self.locations = self._build_kml_sources(locations)
def _build_kml_sources(self, sources):
"""
Goes through the given sources and returns a 3-tuple of
the application label, module name, and field name of every
GeometryField encountered in the sources.
If no sources are provided, then all models.
"""
kml_sources = []
if sources is None:
sources = apps.get_models()
for source in sources:
if isinstance(source, models.base.ModelBase):
for field in source._meta.fields:
if isinstance(field, GeometryField):
kml_sources.append((source._meta.app_label,
source._meta.model_name,
field.name))
elif isinstance(source, (list, tuple)):
if len(source) != 3:
raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')
kml_sources.append(source)
else:
raise TypeError('KML Sources must be a model or a 3-tuple.')
return kml_sources
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls:
url['geo_format'] = self.geo_format
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.sitemaps.views.%s' % self.geo_format,
kwargs={'label': obj[0],
'model': obj[1],
'field_name': obj[2],
}
)
class KMZSitemap(KMLSitemap):
geo_format = 'kmz'
| apache-2.0 |
MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/setuptools/command/easy_install.py | 201 | 86354 | #!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise, PY2, PY3)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir', 'script_dir', 'build_directory',
'site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data', ])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, filter(None, PYTHONPATH)):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); "
"f.close()\n" % (ok_file,))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
if (basename.lower() == 'python.exe' and
os.path.exists(alt)):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable:
self.install_site_py()
try:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps,
True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = ("%r already exists in %s; build directory %s will not be "
"kept")
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
script_text = (ScriptWriter.get_header(script_text) +
self._load_template(dev_path) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,
os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink, (destination,), "Removing " +
destination)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m + " %s to %s") %
(os.path.basename(egg_path),
os.path.dirname(destination)))
update_dist_caches(destination,
fix_zipimporter_caches=new_dist_is_zipped)
except:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() +
'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers(
[os.path.join(script_dir, args[0]) for args in
ScriptWriter.get_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
cfg = ConfigParser.RawConfigParser(
{'version': '', 'target_version': ''})
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative, self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename, 'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func is os.remove and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
warnings.warn("Use JythonCommandSpec", DeprecationWarning, stacklevel=2)
if not JythonCommandSpec.relevant():
return executable
cmd = CommandSpec.best().from_param(executable)
cmd.install_options(options)
return cmd.as_header().lstrip('#!').rstrip('\n')
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls if not JythonCommandSpec.relevant() else JythonCommandSpec
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class JythonCommandSpec(CommandSpec):
@classmethod
def relevant(cls):
return (
sys.platform.startswith('java')
and
__import__('java').lang.System.getProperty('os.name') != 'Linux'
)
def as_header(self):
"""
Workaround Jython's sys.executable being a .sh (an invalid
shebang line interpreter)
"""
if not is_sh(self[0]):
return super(JythonCommandSpec, self).as_header()
if self.options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
return super(JythonCommandSpec, self).as_header()
items = ['/usr/bin/env'] + self + list(self.options)
return self._render(items)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
return WindowsScriptWriter.best() if sys.platform == 'win32' else cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower() == 'arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
| mit |
scottellis/overo-oe | lib/oe/patch.py | 26 | 12532 | import subprocess
import os
import oe.path
import oe.process
import bb.fetch, bb.data
class PatchError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "Patch Error: %s" % self.msg
class PatchSet(object):
defaults = {
"strippath": 1
}
def __init__(self, dir, d):
self.dir = dir
self.d = d
self.patches = []
self._current = None
self.env = {"PATH": d.getVar("PATH", True)}
def current(self):
return self._current
def Clean(self):
"""
Clean out the patch set. Generally includes unapplying all
patches and wiping out all associated metadata.
"""
raise NotImplementedError()
def Import(self, patch, force):
if not patch.get("file"):
if not patch.get("remote"):
raise PatchError("Patch file must be specified in patch import.")
else:
patch["file"] = bb.fetch.localpath(patch["remote"], self.d)
for param in PatchSet.defaults:
if not patch.get(param):
patch[param] = PatchSet.defaults[param]
if patch.get("remote"):
patch["file"] = bb.data.expand(bb.fetch.localpath(patch["remote"], self.d), self.d)
patch["filemd5"] = bb.utils.md5_file(patch["file"])
def Push(self, force):
raise NotImplementedError()
def Pop(self, force):
raise NotImplementedError()
def Refresh(self, remote = None, all = None):
raise NotImplementedError()
class PatchTree(PatchSet):
def __init__(self, dir, d):
PatchSet.__init__(self, dir, d)
def Import(self, patch, force = None):
""""""
PatchSet.Import(self, patch, force)
if self._current is not None:
i = self._current + 1
else:
i = 0
self.patches.insert(i, patch)
def _applypatch(self, patch, force = False, reverse = False, run = True):
shellcmd = ["patch", "-p%s" % patch['strippath'], "-f"]
if reverse:
shellcmd.append('-R')
if not run:
return subprocess.list2cmdline(shellcmd)
patch = open(patch['file'], "r")
return oe.process.run(shellcmd, cwd=self.dir, env=self.env, stdin=patch)
def Push(self, force = False, all = False, run = True):
if all:
for i in self.patches:
if self._current is not None:
self._current = self._current + 1
else:
self._current = 0
self._applypatch(i, force)
else:
if self._current is not None:
self._current = self._current + 1
else:
self._current = 0
return self._applypatch(self.patches[self._current], force)
def Pop(self, force = None, all = None):
if all:
for i in self.patches:
self._applypatch(i, force, True)
else:
self._applypatch(self.patches[self._current], force, True)
def Clean(self):
""""""
class GitApplyTree(PatchTree):
def __init__(self, dir, d):
PatchTree.__init__(self, dir, d)
def _applypatch(self, patch, force = False, reverse = False, run = True):
shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']]
if reverse:
shellcmd.append('-R')
shellcmd.append(patch['file'])
if not run:
return subprocess.list2cmdline(shellcmd)
return oe.process.run(shellcmd, cwd=self.dir, env=self.env)
class QuiltTree(PatchSet):
def _runcmd(self, args, run = True):
quiltrc = bb.data.getVar('QUILTRCFILE', self.d, 1) or '-'
cmdline = ["quilt", "--quiltrc=%s" % quiltrc] + args
if not run:
return subprocess.list2cmdline(cmdline)
oe.process.run(cmdline, cwd=self.dir, env=self.env)
def _quiltpatchpath(self, file):
return os.path.join(self.dir, "patches", os.path.basename(file))
def __init__(self, dir, d):
PatchSet.__init__(self, dir, d)
self.initialized = False
p = os.path.join(self.dir, 'patches')
if not os.path.exists(p):
os.makedirs(p)
def Clean(self):
try:
self._runcmd(["pop", "-a", "-f"])
oe.path.remove(os.path.join(self.dir, "patches","series"))
except Exception:
pass
self.initialized = True
def InitFromDir(self):
# read series -> self.patches
seriespath = os.path.join(self.dir, 'patches', 'series')
if not os.path.exists(self.dir):
raise Exception("Error: %s does not exist." % self.dir)
if os.path.exists(seriespath):
series = file(seriespath, 'r')
for line in series.readlines():
patch = {}
parts = line.strip().split()
patch["quiltfile"] = self._quiltpatchpath(parts[0])
patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
if len(parts) > 1:
patch["strippath"] = parts[1][2:]
self.patches.append(patch)
series.close()
# determine which patches are applied -> self._current
try:
output = oe.process.run(["quilt", "applied"], cwd=self.dir, \
env=self.env)
except oe.process.ExecutionError, exc:
if exc.stdout.strip() != "No patches applied":
raise
output = [val for val in output.split('\n') if not val.startswith('#')]
for patch in self.patches:
if os.path.basename(patch["quiltfile"]) == output[-1]:
self._current = self.patches.index(patch)
self.initialized = True
def Import(self, patch, force = None):
if not self.initialized:
self.InitFromDir()
PatchSet.Import(self, patch, force)
oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]))
f = open(os.path.join(self.dir, "patches","series"), "a");
f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"]+"\n")
f.close()
patch["quiltfile"] = self._quiltpatchpath(patch["file"])
patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
# TODO: determine if the file being imported:
# 1) is already imported, and is the same
# 2) is already imported, but differs
self.patches.insert(self._current or 0, patch)
def Push(self, force = False, all = False, run = True):
# quilt push [-f]
args = ["push"]
if force:
args.append("-f")
if all:
args.append("-a")
if not run:
return self._runcmd(args, run)
self._runcmd(args)
if self._current is not None:
self._current = self._current + 1
else:
self._current = 0
def Pop(self, force = None, all = None):
# quilt pop [-f]
args = ["pop"]
if force:
args.append("-f")
if all:
args.append("-a")
self._runcmd(args)
if self._current == 0:
self._current = None
if self._current is not None:
self._current = self._current - 1
def Refresh(self, **kwargs):
if kwargs.get("remote"):
patch = self.patches[kwargs["patch"]]
if not patch:
raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
(type, host, path, user, pswd, parm) = bb.decodeurl(patch["remote"])
if type == "file":
import shutil
if not patch.get("file") and patch.get("remote"):
patch["file"] = bb.fetch.localpath(patch["remote"], self.d)
shutil.copyfile(patch["quiltfile"], patch["file"])
else:
raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
else:
# quilt refresh
args = ["refresh"]
if kwargs.get("quiltfile"):
args.append(os.path.basename(kwargs["quiltfile"]))
elif kwargs.get("patch"):
args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
self._runcmd(args)
class Resolver(object):
def __init__(self, patchset):
raise NotImplementedError()
def Resolve(self):
raise NotImplementedError()
def Revert(self):
raise NotImplementedError()
def Finalize(self):
raise NotImplementedError()
class NOOPResolver(Resolver):
def __init__(self, patchset):
self.patchset = patchset
def Resolve(self):
olddir = os.path.abspath(os.curdir)
os.chdir(self.patchset.dir)
try:
self.patchset.Push()
except Exception:
import sys
os.chdir(olddir)
raise sys.exc_value
# Patch resolver which relies on the user doing all the work involved in the
# resolution, with the exception of refreshing the remote copy of the patch
# files (the urls).
class UserResolver(Resolver):
def __init__(self, patchset):
self.patchset = patchset
# Force a push in the patchset, then drop to a shell for the user to
# resolve any rejected hunks
def Resolve(self):
olddir = os.path.abspath(os.curdir)
os.chdir(self.patchset.dir)
try:
self.patchset.Push(False)
except oe.process.CmdError:
# Patch application failed
patchcmd = self.patchset.Push(True, False, False)
t = bb.data.getVar('T', self.patchset.d, 1)
if not t:
bb.msg.fatal(bb.msg.domain.Build, "T not set")
bb.mkdirhier(t)
import random
rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random())
f = open(rcfile, "w")
f.write("echo '*** Manual patch resolution mode ***'\n")
f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n")
f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n")
f.write("echo ''\n")
f.write(" ".join(patchcmd) + "\n")
f.write("#" + bb.data.getVar('TERMCMDRUN', self.patchset.d, 1))
f.close()
os.chmod(rcfile, 0775)
os.environ['TERMWINDOWTITLE'] = "Bitbake: Please fix patch rejects manually"
os.environ['TERMRCFILE'] = rcfile
rc = os.system(bb.data.getVar('TERMCMDRUN', self.patchset.d, 1))
if os.WIFEXITED(rc) and os.WEXITSTATUS(rc) != 0:
bb.msg.fatal(bb.msg.domain.Build, ("Cannot proceed with manual patch resolution - '%s' not found. " \
+ "Check TERMCMDRUN variable.") % bb.data.getVar('TERMCMDRUN', self.patchset.d, 1))
# Construct a new PatchSet after the user's changes, compare the
# sets, checking patches for modifications, and doing a remote
# refresh on each.
oldpatchset = self.patchset
self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
for patch in self.patchset.patches:
oldpatch = None
for opatch in oldpatchset.patches:
if opatch["quiltfile"] == patch["quiltfile"]:
oldpatch = opatch
if oldpatch:
patch["remote"] = oldpatch["remote"]
if patch["quiltfile"] == oldpatch["quiltfile"]:
if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
# user change? remote refresh
self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
else:
# User did not fix the problem. Abort.
raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
except Exception:
os.chdir(olddir)
raise
os.chdir(olddir)
| mit |
leifos/boxes | treasure-houses/asg/models.py | 2 | 3260 | from django.db import models
from django.contrib.auth.models import User
import os
import sys
#from django_countries import CountryField
from registration.signals import *
# Create your models here.
sys.path.append(os.getcwd())
from configuration import UPLOAD_DIR
class UserProfile(models.Model):
user = models.OneToOneField(User)
level = models.IntegerField(default=0,blank=True)
rating = models.IntegerField(default=0,blank=True)
last_time_played = models.DateTimeField(null=True, blank=True)
no_games_played = models.IntegerField(default=0,blank=True)
total_points = models.IntegerField(default=0,blank=True)
total_tokens = models.IntegerField(default=0,blank=True)
no_queries_issued = models.IntegerField(default=0,blank=True)
no_docs_assessed = models.IntegerField(default=0,blank=True)
def _get_ratio(self):
"Returns the person's full name."
return round(float(self.total_points) / (float(self.total_tokens+0.000001)),2)
def _get_average_points(self):
return round(float(self.total_points)/float(self.no_games_played+0.000001) ,2)
ratio = property(_get_ratio)
average_points = property(_get_average_points)
def __unicode__(self):
return self.user.username
class GameExperiment(models.Model):
name = models.CharField(max_length=128)
config = models.IntegerField(default=0, unique=True)
icon = models.ImageField(null=True, upload_to=UPLOAD_DIR, blank=True)
desc = models.TextField(null=True, blank=True)
level = models.IntegerField(default=0, blank=True)
times_played = models.IntegerField(default=0, blank=True)
no_queries_issued = models.IntegerField(default=0,blank=True)
no_docs_assessed = models.IntegerField(default=0,blank=True)
total_points = models.IntegerField(default=0,blank=True)
total_tokens = models.IntegerField(default=0,blank=True)
best_so_far = models.IntegerField(default=0,blank=True)
bronze = models.IntegerField(default=10,blank=True)
silver = models.IntegerField(default=20,blank=True)
gold = models.IntegerField(default=30,blank=True)
def _get_ratio(self):
"Returns the person's full name."
return round(float(self.total_points) / (float(self.total_tokens+0.000001)),2)
ratio = property(_get_ratio)
def __unicode__(self):
return self.name
class MaxHighScore(models.Model):
user = models.ForeignKey(User)
game_experiment = models.ForeignKey(GameExperiment, null=True)
points = models.IntegerField(default=0)
total_points = models.IntegerField(default=0,blank=True)
total_tokens = models.IntegerField(default=0,blank=True)
times_played = models.IntegerField(default=0, blank=True)
def _get_ratio(self):
"Returns the person's full name."
return round(float(self.total_points) / (float(self.total_tokens+0.000001)),2)
ratio = property(_get_ratio)
def __unicode__(self):
return '{0} {1} : {2}'.format(self.user.username, self.game_experiment.name, self.points)
#Need to signal to create a UserProfile when registering a User
def createUserProfile(sender, user, request, **kwargs):
UserProfile.objects.get_or_create(user=user)
user_registered.connect(createUserProfile)
| mit |
tsl143/zamboni | mkt/constants/search.py | 19 | 2850 | # These two dicts are mapping between language codes in zamboni and language
# analyzers in elasticsearch.
#
# Each key value of ANALYZER_MAP is language analyzer supported by
# elasticsearch. See
# http://www.elasticsearch.org/guide/reference/index-modules/analysis/lang-analyzer.html
#
# Each value of ANALYZER_MAP is a list which is supported by the key analyzer.
# All values are picked from AMO_LANGUAGES in settings.py.
#
# The rows commented out are that the language is not supported by
# elasticsearch yet. We should update it when elasticsearch supports new
# analyzer for the language.
SEARCH_ANALYZER_MAP = {
# '': ['af'], # Afrikaans
'arabic': ['ar'],
'bulgarian': ['bg'],
'catalan': ['ca'],
'czech': ['cs'],
'danish': ['da'],
'german': ['de'],
'greek': ['el'],
'english': ['en-us'],
'spanish': ['es'],
'basque': ['eu'],
'persian': ['fa'],
'finnish': ['fi'],
'french': ['fr'],
# '': ['ga-ie'], # Gaelic - Ireland
# '': ['he'], # Hebrew
'hungarian': ['hu'],
'indonesian': ['id'],
'italian': ['it'],
'cjk': ['ja', 'ko'],
# '': ['mn'], # Mongolian
'dutch': ['nl'],
# Polish requires the Elasticsearch plugin:
# https://github.com/elasticsearch/elasticsearch-analysis-stempel
'polish': ['pl'],
'brazilian': ['pt-br'],
'portuguese': ['pt-pt'],
'romanian': ['ro'],
'russian': ['ru'],
# '': ['sk'], # Slovak
# '': ['sl'], # Slovenian
# '': ['sq'], # Albanian
'swedish': ['sv-se'],
# '': ['uk'], # Ukrainian
# '': ['vi'], # Vietnamese
'chinese': ['zh-cn', 'zh-tw'],
}
# This dict is an inverse mapping of ANALYZER_MAP.
SEARCH_LANGUAGE_TO_ANALYZER = {}
for analyzer, languages in SEARCH_ANALYZER_MAP.items():
for language in languages:
SEARCH_LANGUAGE_TO_ANALYZER[language] = analyzer
# List of analyzers that require a plugin. Depending on settings.ES_USE_PLUGINS
# we may disable or bypass these.
SEARCH_ANALYZER_PLUGINS = [
'polish',
]
# Which stemmer to use for each langauge.
#
# Note: We use the keys of this dict for supported stop words, also, which is
# specified as, e.g., '_english_'.
STEMMER_MAP = {
'arabic': 'arabic',
'basque': 'basque',
'brazilian': 'brazilian',
'bulgarian': 'bulgarian',
'catalan': 'catalan',
'czech': 'czech',
'danish': 'danish',
'dutch': 'dutch',
'english': 'minimal_english',
'finnish': 'light_finish', # Yes, this is misspelled in ES.
'french': 'light_french',
'german': 'light_german',
'greek': 'greek',
'hungarian': 'light_hungarian',
'indonesian': 'indonesian',
'italian': 'light_italian',
'portuguese': 'light_portuguese',
'romanian': 'romanian',
'russian': 'russian',
'spanish': 'light_spanish',
'swedish': 'light_swedish',
}
| bsd-3-clause |
akretion/odoo | addons/pos_restaurant/__manifest__.py | 14 | 1249 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Restaurant',
'version': '1.0',
'category': 'Point of Sale',
'sequence': 6,
'summary': 'Restaurant extensions for the Point of Sale ',
'description': """
This module adds several restaurant features to the Point of Sale:
- Bill Printing: Allows you to print a receipt before the order is paid
- Bill Splitting: Allows you to split an order into different orders
- Kitchen Order Printing: allows you to print orders updates to kitchen or bar printers
""",
'depends': ['point_of_sale'],
'website': 'https://www.odoo.com/page/point-of-sale-restaurant',
'data': [
'security/ir.model.access.csv',
'views/pos_order_views.xml',
'views/pos_restaurant_views.xml',
'views/pos_config_views.xml',
'views/pos_restaurant_templates.xml',
],
'qweb': [
'static/src/xml/multiprint.xml',
'static/src/xml/splitbill.xml',
'static/src/xml/printbill.xml',
'static/src/xml/notes.xml',
'static/src/xml/floors.xml',
],
'demo': [
'data/pos_restaurant_demo.xml',
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
manazhao/tf_recsys | tensorflow/python/kernel_tests/distributions/bijector_test.py | 75 | 5536 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.platform import test
class BaseBijectorTest(test.TestCase):
"""Tests properties of the Bijector base-class."""
def testIsAbstract(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError,
("Can't instantiate abstract class Bijector "
"with abstract methods __init__")):
bijector.Bijector() # pylint: disable=abstract-class-instantiated
def testDefaults(self):
class _BareBonesBijector(bijector.Bijector):
"""Minimal specification of a `Bijector`."""
def __init__(self):
super(_BareBonesBijector, self).__init__()
with self.test_session() as sess:
bij = _BareBonesBijector()
self.assertEqual(None, bij.event_ndims)
self.assertEqual([], bij.graph_parents)
self.assertEqual(False, bij.is_constant_jacobian)
self.assertEqual(False, bij.validate_args)
self.assertEqual(None, bij.dtype)
self.assertEqual("bare_bones_bijector", bij.name)
for shape in [[], [1, 2], [1, 2, 3]]:
[
forward_event_shape_,
inverse_event_shape_,
] = sess.run([
bij.inverse_event_shape_tensor(shape),
bij.forward_event_shape_tensor(shape),
])
self.assertAllEqual(shape, forward_event_shape_)
self.assertAllEqual(shape, bij.forward_event_shape(shape))
self.assertAllEqual(shape, inverse_event_shape_)
self.assertAllEqual(shape, bij.inverse_event_shape(shape))
for fn in ["forward",
"inverse",
"inverse_log_det_jacobian",
"forward_log_det_jacobian"]:
with self.assertRaisesRegexp(
NotImplementedError, fn + " not implemented"):
getattr(bij, fn)(0)
class IntentionallyMissingError(Exception):
pass
class BrokenBijector(bijector.Bijector):
"""Forward and inverse are not inverses of each other."""
def __init__(self, forward_missing=False, inverse_missing=False):
super(BrokenBijector, self).__init__(
event_ndims=0, validate_args=False, name="broken")
self._forward_missing = forward_missing
self._inverse_missing = inverse_missing
def _forward(self, x):
if self._forward_missing:
raise IntentionallyMissingError
return 2 * x
def _inverse(self, y):
if self._inverse_missing:
raise IntentionallyMissingError
return y / 2.
def _inverse_log_det_jacobian(self, y): # pylint:disable=unused-argument
if self._inverse_missing:
raise IntentionallyMissingError
return -math_ops.log(2.)
def _forward_log_det_jacobian(self, x): # pylint:disable=unused-argument
if self._forward_missing:
raise IntentionallyMissingError
return math_ops.log(2.)
@six.add_metaclass(abc.ABCMeta)
class BijectorCachingTestBase(object):
@abc.abstractproperty
def broken_bijector_cls(self):
# return a BrokenBijector type Bijector, since this will test the caching.
raise IntentionallyMissingError("Not implemented")
def testCachingOfForwardResults(self):
broken_bijector = self.broken_bijector_cls(inverse_missing=True)
with self.test_session():
x = constant_op.constant(1.1)
# Call forward and forward_log_det_jacobian one-by-one (not together).
y = broken_bijector.forward(x)
_ = broken_bijector.forward_log_det_jacobian(x)
# Now, everything should be cached if the argument is y.
try:
broken_bijector.inverse(y)
broken_bijector.inverse_log_det_jacobian(y)
except IntentionallyMissingError:
raise AssertionError("Tests failed! Cached values not used.")
def testCachingOfInverseResults(self):
broken_bijector = self.broken_bijector_cls(forward_missing=True)
with self.test_session():
y = constant_op.constant(1.1)
# Call inverse and inverse_log_det_jacobian one-by-one (not together).
x = broken_bijector.inverse(y)
_ = broken_bijector.inverse_log_det_jacobian(y)
# Now, everything should be cached if the argument is x.
try:
broken_bijector.forward(x)
broken_bijector.forward_log_det_jacobian(x)
except IntentionallyMissingError:
raise AssertionError("Tests failed! Cached values not used.")
class BijectorCachingTest(BijectorCachingTestBase, test.TestCase):
"""Test caching with BrokenBijector."""
@property
def broken_bijector_cls(self):
return BrokenBijector
if __name__ == "__main__":
test.main()
| apache-2.0 |
mlocs/lineage-trees-clustering | clustering/meta_algorithm.py | 1 | 2148 | # This file is part of the Lineage Tree Clustering project.
# Copyright (C) 2014
# Author: Valeriy Khakhutskyy
#
# Lineage Tree Clustering is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lineage Tree Clustering is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Lineage Tree Clustering project files. If not, see <http://www.gnu.org/licenses/>.
import numpy
from VSH import *
from lagrangian_relaxation import *
from branches_and_bounds import *
# @param D distance matrix
# @param p number of centroids
def findMedian(D,p=-1):
if p == -1:
p = D[1]
D = D[0]
# consider the trivial case of p = 1
if p == 1:
Dsum = numpy.sum(D, axis=0)
idx = numpy.argmin(Dsum)
k = Dsum[idx]
return (D, p, ([idx], k))
phase1_iterations = 20
phase1_medians = []
phase1_Z_H = []
print "stage 1"
# Stage 1: Run 20 replications of the VS heuristic to obtain an upper bound on the optimal solution Z_H
for i in xrange(phase1_iterations):
(m,Z_H) = VS(D,p)
phase1_medians.append(m)
phase1_Z_H.append(Z_H)
iMax = numpy.argmin(phase1_Z_H)
Z_H = min(phase1_Z_H)
median = phase1_medians[iMax]
print Z_H," ",median
print "stage 2"
# Stage 2: Run the Lagrangian relaxation algorithm with 5 restarts of the procedure with a maximum of 4000 iterations each
if LR2(D,p,Z_H):
# If optimal solution is not found
return (D,p,(median,Z_H))
else:
print "stage 3"
# Stage 3: Run the Branch-and-Bound algorithm with an embedded Lagrangian relaxation scheme
return (D,p,BnB(D,median,p))
| gpl-3.0 |
crobinso/virt-manager | tests/test_inject.py | 2 | 3660 | # Copyright (C) 2013, 2014 Red Hat, Inc.
#
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
import os
import sys
_alldistros = {}
DEVFEDORA_URL = "http://dl.fedoraproject.org/pub/fedora/linux/development/%s/Server/%s/os/"
FEDORA_URL = "http://dl.fedoraproject.org/pub/fedora/linux/releases/%s/Server/%s/os/"
(WARN_RHEL5,
WARN_DEBIAN,
WARN_FEDORA) = range(1, 4)
def prompt():
sys.stdout.write("(press enter to continue)")
sys.stdout.flush()
return sys.stdin.readline()
KSOLD = "tests/data/inject/old-kickstart.ks"
KSNEW = "tests/data/inject/new-kickstart.ks"
PRESEED = "tests/data/inject/preseed.cfg"
class Distro(object):
def __init__(self, name, url, filename, warntype=WARN_FEDORA):
self.name = name
self.url = url
self.warntype = warntype
self.filename = filename
self.kernel = None
self.initrd = None
def _add(*args, **kwargs):
_d = Distro(*args, **kwargs)
_alldistros[_d.name] = _d
_add("centos-5.11", "http://vault.centos.org/5.11/os/x86_64/",
warntype=WARN_RHEL5, filename=KSOLD)
_add("centos-6-latest", "http://ftp.linux.ncsu.edu/pub/CentOS/6/os/x86_64/",
warntype=WARN_RHEL5, filename=KSOLD)
_add("centos-7-latest", "http://ftp.linux.ncsu.edu/pub/CentOS/7/os/x86_64/",
filename=KSNEW)
_add("fedora-29", FEDORA_URL % ("29", "x86_64"), filename=KSNEW)
_add("fedora-30", DEVFEDORA_URL % ("30", "x86_64"), filename=KSNEW)
_add("debian-9",
"http://ftp.us.debian.org/debian/dists/stretch/main/installer-amd64/",
filename=PRESEED, warntype=WARN_DEBIAN)
def _test_distro(distro):
os.system("clear")
print("\n")
if distro.warntype == WARN_RHEL5:
print("RHEL5, RHEL6, Fedora < 17: You'll get an error about a ")
print("bogus bootproto ITREADTHEKICKSTART. This means anaconda ")
print("read our busted kickstart.")
elif distro.warntype == WARN_DEBIAN:
print("Debian: Won't ask any questions, will autoconfig network, "
"then print a big red text box about a bad mirror config.")
elif distro.warntype == WARN_FEDORA:
print("RHEL7, Fedora >= 17: Chokes on the bogus URI in the early ")
print("console screen when fetching the installer squashfs image.")
os.environ.pop("VIRTINST_TEST_SUITE", None)
os.environ["VIRTINST_INITRD_TEST"] = "1"
if distro.warntype == WARN_DEBIAN:
append = "auto=true"
else:
append = "\"ks=file:/%s\"" % os.path.basename(distro.filename)
cmd = ("./virt-install --connect qemu:///system "
"--name __virtinst__test__initrd__ --ram 2048 "
"--transient --destroy-on-exit --disk none "
"--location %s --initrd-inject %s "
"--install kernel_args=%s,kernel_args_overwrite=yes" %
(distro.url, distro.filename, append))
print("\n\n" + cmd)
os.system(cmd)
def _print_intro():
print("""
This is an interactive test suite.
We are going to launch various transient virt-installs, using initrd
injections, that will cause installs to quickly fail. Look for the
failure pattern to confirm that initrd injections are working as expected.
""")
prompt()
def _build_testfunc(dobj, do_setup):
def testfunc():
if do_setup:
_print_intro()
_test_distro(dobj)
return testfunc
def _make_tests():
idx = 0
for dname, dobj in _alldistros.items():
idx += 1
name = "testInitrd%.3d_%s" % (idx, dname.replace("-", "_"))
do_setup = idx == 1
testfunc = _build_testfunc(dobj, do_setup)
globals()[name] = testfunc
_make_tests()
| gpl-2.0 |
hackforwesternmass/seednetwork | seednetwork/wsgi.py | 2 | 1434 | """
WSGI config for seednetwork project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "seednetwork.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "seednetwork.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit |
zefciu/django-powerdns-dnssec | powerdns/tests/test_uniqueness_constraints.py | 1 | 2130 | """Tests for keeping uniqueness constraints"""
from powerdns.tests.utils import RecordFactory, RecordTestCase
class TestUniquenessConstraints(RecordTestCase):
def setUp(self):
super(TestUniquenessConstraints, self).setUp()
self.a_record = RecordFactory(
domain=self.domain,
type='A',
name='www.example.com',
content='192.168.1.1',
)
self.cname_record = RecordFactory(
domain=self.domain,
type='CNAME',
name='blog.example.com',
content='www.example.com',
)
def test_nonconflicting_a_record(self):
"""The validation allows an A record when it doesn't conflict with
existing CNAME"""
self.validate(type='A', name='wiki.example.com', content='192.168.1.2')
def test_noconflict_with_itself(self):
"""A CNAME record can be resaved (it doesn't conflict with itself.)"""
self.cname_record.full_clean()
def test_conflicting_a_record(self):
"""The validation doesn't allow an A recrod when it conflicts with
existing CNAME"""
self.check_invalid(
type='A',
name='blog.example.com',
content='192.168.1.2',
)
def test_nonconflicting_cname_record(self):
"""The validation allows an CNAME record when it doesn't conflict with
existing A"""
self.validate(
type='CNAME',
name='wiki.example.com',
content='site.example.com'
)
def test_conflicting_cname_record(self):
"""The validation doesn't allow a CNAME record when it conflicts with
existing A"""
self.check_invalid(
type='CNAME',
name='www.example.com',
content='site.example.com'
)
def test_conflicting_second_cname_record(self):
"""The validation doesn't allow a CNAME record when it conflicts with
existing CNAME"""
self.check_invalid(
type='CNAME',
name='blog.example.com',
content='site.example.com'
)
| bsd-2-clause |
crosswalk-project/chromium-crosswalk-efl | tools/gypv8sh.py | 33 | 2079 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is used by chrome_tests.gypi's js2webui action to maintain the
argument lists and to generate inlinable tests.
"""
import json
import optparse
import os
import subprocess
import sys
import shutil
def main ():
parser = optparse.OptionParser()
parser.set_usage(
"%prog v8_shell mock.js test_api.js js2webui.js "
"testtype inputfile inputrelfile cxxoutfile jsoutfile")
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option('-n', '--impotent', action='store_true',
help="don't execute; just print (as if verbose)")
parser.add_option('--deps_js', action="store",
help=("Path to deps.js for dependency resolution, " +
"optional."))
(opts, args) = parser.parse_args()
if len(args) != 9:
parser.error('all arguments are required.')
(v8_shell, mock_js, test_api, js2webui, test_type,
inputfile, inputrelfile, cxxoutfile, jsoutfile) = args
cmd = [v8_shell]
icudatafile = os.path.join(os.path.dirname(v8_shell), 'icudtl.dat')
if os.path.exists(icudatafile):
cmd.extend(['--icu-data-file=%s' % icudatafile])
arguments = [js2webui, inputfile, inputrelfile, opts.deps_js,
cxxoutfile, test_type]
cmd.extend(['-e', "arguments=" + json.dumps(arguments), mock_js,
test_api, js2webui])
if opts.verbose or opts.impotent:
print cmd
if not opts.impotent:
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
out, err = p.communicate()
with open(cxxoutfile, 'wb') as f:
f.write(out)
shutil.copyfile(inputfile, jsoutfile)
except Exception, ex:
if os.path.exists(cxxoutfile):
os.remove(cxxoutfile)
if os.path.exists(jsoutfile):
os.remove(jsoutfile)
raise
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
danlrobertson/servo | tests/wpt/web-platform-tests/mixed-content/generic/expect.py | 26 | 4156 | import json, os, urllib, urlparse
def redirect(url, response):
response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def create_redirect_url(request, swap_scheme = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
scheme = parsed.scheme
if swap_scheme:
scheme = "http" if parsed.scheme == "https" else "https"
hostname = parsed.netloc.split(':')[0]
port = request.server.config["ports"][scheme][0]
destination_netloc = ":".join([hostname, str(port)])
# Remove "redirection" from query to avoid redirect loops.
parsed_query = dict(urlparse.parse_qsl(parsed.query))
assert "redirection" in parsed_query
del parsed_query["redirection"]
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
scheme = scheme,
netloc = destination_netloc,
path = parsed.path,
query = urllib.urlencode(parsed_query),
fragment = None))
return destination_url
def main(request, response):
if "redirection" in request.GET:
redirection = request.GET["redirection"]
if redirection == "no-redirect":
pass
elif redirection == "keep-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=False), response)
return
elif redirection == "swap-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=True), response)
return
else:
raise ValueError ("Invalid redirect type: %s" % redirection)
content_type = "text/plain"
response_data = ""
if "action" in request.GET:
action = request.GET["action"]
if "content_type" in request.GET:
content_type = request.GET["content_type"]
key = request.GET["key"]
stash = request.server.stash
path = request.GET.get("path", request.url.split('?'))[0]
if action == "put":
value = request.GET["value"]
stash.take(key=key, path=path)
stash.put(key=key, value=value, path=path)
response_data = json.dumps({"status": "success", "result": key})
elif action == "purge":
value = stash.take(key=key, path=path)
if content_type == "image/png":
response_data = open(os.path.join(request.doc_root,
"images",
"smiley.png"), "rb").read()
elif content_type == "audio/wav":
response_data = open(os.path.join(request.doc_root,
"webaudio", "resources", "sin_440Hz_-6dBFS_1s.wav"), "rb").read()
elif content_type == "video/ogg":
response_data = open(os.path.join(request.doc_root,
"media",
"movie_5.ogv"), "rb").read()
elif content_type == "application/javascript":
response_data = open(os.path.join(request.doc_root,
"mixed-content",
"generic",
"worker.js"), "rb").read()
else:
response_data = "/* purged */"
elif action == "take":
value = stash.take(key=key, path=path)
if value is None:
status = "allowed"
else:
status = "blocked"
response_data = json.dumps({"status": status, "result": value})
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", "no-cache; must-revalidate")
response.writer.end_headers()
response.writer.write(response_data)
| mpl-2.0 |
MiniPlayer/log-island | logisland-plugins/logisland-scripting-processors-plugin/src/main/resources/nltk/tag/perceptron.py | 7 | 11238 | # -*- coding: utf-8 -*-
# This module is a port of the Textblob Averaged Perceptron Tagger
# Author: Matthew Honnibal <honnibal+gh@gmail.com>,
# Long Duong <longdt219@gmail.com> (NLTK port)
# URL: <https://github.com/sloria/textblob-aptagger>
# <http://nltk.org/>
# Copyright 2013 Matthew Honnibal
# NLTK modifications Copyright 2015 The NLTK Project
#
# This module is provided under the terms of the MIT License.
from __future__ import absolute_import
from __future__ import print_function, division
import random
from collections import defaultdict
import pickle
import logging
from nltk.tag.api import TaggerI
from nltk.data import find, load
from nltk.compat import python_2_unicode_compatible
PICKLE = "averaged_perceptron_tagger.pickle"
class AveragedPerceptron(object):
'''An averaged perceptron, as implemented by Matthew Honnibal.
See more implementation details here:
http://spacy.io/blog/part-of-speech-POS-tagger-in-python/
'''
def __init__(self):
# Each feature gets its own weight vector, so weights is a dict-of-dicts
self.weights = {}
self.classes = set()
# The accumulated values, for the averaging. These will be keyed by
# feature/clas tuples
self._totals = defaultdict(int)
# The last time the feature was changed, for the averaging. Also
# keyed by feature/clas tuples
# (tstamps is short for timestamps)
self._tstamps = defaultdict(int)
# Number of instances seen
self.i = 0
def predict(self, features):
'''Dot-product the features and current weights and return the best label.'''
scores = defaultdict(float)
for feat, value in features.items():
if feat not in self.weights or value == 0:
continue
weights = self.weights[feat]
for label, weight in weights.items():
scores[label] += value * weight
# Do a secondary alphabetic sort, for stability
return max(self.classes, key=lambda label: (scores[label], label))
def update(self, truth, guess, features):
'''Update the feature weights.'''
def upd_feat(c, f, w, v):
param = (f, c)
self._totals[param] += (self.i - self._tstamps[param]) * w
self._tstamps[param] = self.i
self.weights[f][c] = w + v
self.i += 1
if truth == guess:
return None
for f in features:
weights = self.weights.setdefault(f, {})
upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
def average_weights(self):
'''Average weights from all iterations.'''
for feat, weights in self.weights.items():
new_feat_weights = {}
for clas, weight in weights.items():
param = (feat, clas)
total = self._totals[param]
total += (self.i - self._tstamps[param]) * weight
averaged = round(total / self.i, 3)
if averaged:
new_feat_weights[clas] = averaged
self.weights[feat] = new_feat_weights
def save(self, path):
'''Save the pickled model weights.'''
with open(path, 'wb') as fout:
return pickle.dump(dict(self.weights), fout)
def load(self, path):
'''Load the pickled model weights.'''
self.weights = load(path)
@python_2_unicode_compatible
class PerceptronTagger(TaggerI):
'''
Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal.
See more implementation details here:
http://spacy.io/blog/part-of-speech-POS-tagger-in-python/
>>> from nltk.tag.perceptron import PerceptronTagger
Train the model
>>> tagger = PerceptronTagger(load=False)
>>> tagger.train([[('today','NN'),('is','VBZ'),('good','JJ'),('day','NN')],
... [('yes','NNS'),('it','PRP'),('beautiful','JJ')]])
>>> tagger.tag(['today','is','a','beautiful','day'])
[('today', 'NN'), ('is', 'PRP'), ('a', 'PRP'), ('beautiful', 'JJ'), ('day', 'NN')]
Use the pretrain model (the default constructor)
>>> pretrain = PerceptronTagger()
>>> pretrain.tag('The quick brown fox jumps over the lazy dog'.split())
[('The', 'DT'), ('quick', 'JJ'), ('brown', 'NN'), ('fox', 'NN'), ('jumps', 'VBZ'), ('over', 'IN'), ('the', 'DT'), ('lazy', 'JJ'), ('dog', 'NN')]
>>> pretrain.tag("The red cat".split())
[('The', 'DT'), ('red', 'JJ'), ('cat', 'NN')]
'''
START = ['-START-', '-START2-']
END = ['-END-', '-END2-']
def __init__(self, load=True):
'''
:param load: Load the pickled model upon instantiation.
'''
self.model = AveragedPerceptron()
self.tagdict = {}
self.classes = set()
if load:
AP_MODEL_LOC = 'file:'+str(find('taggers/averaged_perceptron_tagger/'+PICKLE))
self.load(AP_MODEL_LOC)
def tag(self, tokens):
'''
Tag tokenized sentences.
:params tokens: list of word
:type tokens: list(str)
'''
prev, prev2 = self.START
output = []
context = self.START + [self.normalize(w) for w in tokens] + self.END
for i, word in enumerate(tokens):
tag = self.tagdict.get(word)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag = self.model.predict(features)
output.append((word, tag))
prev2 = prev
prev = tag
return output
def train(self, sentences, save_loc=None, nr_iter=5):
'''Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
controls the number of Perceptron training iterations.
:param sentences: A list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this location.
:param nr_iter: Number of training iterations.
'''
self._make_tagdict(sentences)
self.model.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for sentence in sentences:
words = [word for word,tag in sentence]
tags = [tag for word,tag in sentence]
prev, prev2 = self.START
context = self.START + [self.normalize(w) for w in words] \
+ self.END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev
prev = guess
c += guess == tags[i]
n += 1
random.shuffle(sentences)
logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n)))
self.model.average_weights()
# Pickle as a binary file
if save_loc is not None:
with open(save_loc, 'wb') as fout:
pickle.dump((self.model.weights, self.tagdict, self.classes), fout, -1)
def load(self, loc):
'''
:param loc: Load a pickled model at location.
:type loc: str
'''
self.model.weights, self.tagdict, self.classes = load(loc)
self.model.classes = self.classes
def normalize(self, word):
'''
Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
'''
if '-' in word and word[0] != '-':
return '!HYPHEN'
elif word.isdigit() and len(word) == 4:
return '!YEAR'
elif word[0].isdigit():
return '!DIGITS'
else:
return word.lower()
def _get_features(self, i, word, context, prev, prev2):
'''Map tokens into a feature representation, implemented as a
{hashable: float} dict. If the features change, a new model must be
trained.
'''
def add(name, *args):
features[' '.join((name,) + tuple(args))] += 1
i += len(self.START)
features = defaultdict(int)
# It's useful to have a constant feature, which acts sort of like a prior
add('bias')
add('i suffix', word[-3:])
add('i pref1', word[0])
add('i-1 tag', prev)
add('i-2 tag', prev2)
add('i tag+i-2 tag', prev, prev2)
add('i word', context[i])
add('i-1 tag+i word', prev, context[i])
add('i-1 word', context[i-1])
add('i-1 suffix', context[i-1][-3:])
add('i-2 word', context[i-2])
add('i+1 word', context[i+1])
add('i+1 suffix', context[i+1][-3:])
add('i+2 word', context[i+2])
return features
def _make_tagdict(self, sentences):
'''
Make a tag dictionary for single-tag words.
:param sentences: A list of list of (word, tag) tuples.
'''
counts = defaultdict(lambda: defaultdict(int))
for sentence in sentences:
for word, tag in sentence:
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (mode / n) >= ambiguity_thresh:
self.tagdict[word] = tag
def _pc(n, d):
return (n / d) * 100
def _load_data_conll_format(filename):
print ('Read from file: ', filename)
with open(filename,'rb') as fin:
sentences = []
sentence = []
for line in fin.readlines():
line = line.strip()
#print line
if len(line) ==0:
sentences.append(sentence)
sentence = []
continue
tokens = line.split('\t')
word = tokens[1]
tag = tokens[4]
sentence.append((word,tag))
return sentences
def _get_pretrain_model():
# Train and test on English part of ConLL data (WSJ part of Penn Treebank)
# Train: section 2-11
# Test : section 23
tagger = PerceptronTagger()
training = _load_data_conll_format('english_ptb_train.conll')
testing = _load_data_conll_format('english_ptb_test.conll')
print ('Size of training and testing (sentence)', len(training), len(testing))
# Train and save the model
tagger.train(training, PICKLE)
print ('Accuracy : ',tagger.evaluate(testing))
if __name__ == '__main__':
#_get_pretrain_model()
pass
| apache-2.0 |
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/encodings/koi8_r.py | 272 | 13779 | """ Python Character Mapping Codec koi8_r generated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-r',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u2580' # 0x8B -> UPPER HALF BLOCK
'\u2584' # 0x8C -> LOWER HALF BLOCK
'\u2588' # 0x8D -> FULL BLOCK
'\u258c' # 0x8E -> LEFT HALF BLOCK
'\u2590' # 0x8F -> RIGHT HALF BLOCK
'\u2591' # 0x90 -> LIGHT SHADE
'\u2592' # 0x91 -> MEDIUM SHADE
'\u2593' # 0x92 -> DARK SHADE
'\u2320' # 0x93 -> TOP HALF INTEGRAL
'\u25a0' # 0x94 -> BLACK SQUARE
'\u2219' # 0x95 -> BULLET OPERATOR
'\u221a' # 0x96 -> SQUARE ROOT
'\u2248' # 0x97 -> ALMOST EQUAL TO
'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
'\xa0' # 0x9A -> NO-BREAK SPACE
'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
'\xb0' # 0x9C -> DEGREE SIGN
'\xb2' # 0x9D -> SUPERSCRIPT TWO
'\xb7' # 0x9E -> MIDDLE DOT
'\xf7' # 0x9F -> DIVISION SIGN
'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
'\u2553' # 0xA4 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2555' # 0xA6 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2556' # 0xA7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u255c' # 0xAD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
'\u2562' # 0xB4 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2564' # 0xB6 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u256b' # 0xBD -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa9' # 0xBF -> COPYRIGHT SIGN
'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/lib/PyAMF/doc/tutorials/examples/gateways/appengine/demo/simplejson/encoder.py | 10 | 13260 | """
Implementation of JSONEncoder
"""
import re
try:
from simplejson import _speedups
except ImportError:
_speedups = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return FLOAT_REPR(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def encode_basestring_ascii(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = _speedups.encode_basestring_ascii
_need_utf8 = True
except AttributeError:
_need_utf8 = False
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_need_utf8 and _encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_need_utf8 and _encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo":["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks...
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8' and _need_utf8)):
o = o.decode(_encoding)
return encode_basestring_ascii(o)
# This doesn't pass the iterator directly to ''.join() because it
# sucks at reporting exceptions. It's going to do this internally
# anyway because it uses PySequence_Fast or similar.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
__all__ = ['JSONEncoder']
| apache-2.0 |
gsmaxwell/phase_offset_rx | gnuradio-core/src/python/gnuradio/gr/qa_probe_signal.py | 18 | 1957 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
from gnuradio import gr, gr_unittest
class test_probe_signal (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test_001(self):
value = 12.3
repeats = 100
src_data = [value] * repeats
src = gr.vector_source_f(src_data)
dst = gr.probe_signal_f()
self.tb.connect(src, dst)
self.tb.run()
output = dst.level()
self.assertAlmostEqual(value, output, places=6)
def test_002(self):
vector_length = 10
repeats = 10
value = [0.5+i for i in range(0, vector_length)]
src_data = value * repeats
src = gr.vector_source_f(src_data)
s2v = gr.stream_to_vector(gr.sizeof_float, vector_length)
dst = gr.probe_signal_vf(vector_length)
self.tb.connect(src, s2v, dst)
self.tb.run()
output = dst.level()
self.assertEqual(len(output), vector_length)
self.assertAlmostEqual(value[3], output[3], places=6)
if __name__ == '__main__':
gr_unittest.run(test_probe_signal, "test_probe_signal.xml")
| gpl-3.0 |
wunderlins/learning | python/zodb/lib/linux64/pkg_resources/_vendor/packaging/_compat.py | 901 | 1253 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# flake8: noqa
if PY3:
string_types = str,
else:
string_types = basestring,
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
| gpl-2.0 |
shitizadmirer/unimap.ns-3noc | bindings/python/apidefs/gcc-LP64/ns3_module_common.py | 10 | 139644 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## error-model.h: ns3::ErrorUnit [enumeration]
module.add_enum('ErrorUnit', ['EU_BIT', 'EU_BYTE', 'EU_PKT'])
## spectrum-model.h: ns3::BandInfo [struct]
module.add_class('BandInfo')
## buffer.h: ns3::Buffer [class]
module.add_class('Buffer')
## buffer.h: ns3::Buffer::Iterator [class]
module.add_class('Iterator', outer_class=root_module['ns3::Buffer'])
## packet.h: ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator')
## packet.h: ns3::ByteTagIterator::Item [class]
module.add_class('Item', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h: ns3::ByteTagList [class]
module.add_class('ByteTagList')
## byte-tag-list.h: ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h: ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', outer_class=root_module['ns3::ByteTagList::Iterator'])
## data-rate.h: ns3::DataRate [class]
module.add_class('DataRate')
## packet-metadata.h: ns3::PacketMetadata [class]
module.add_class('PacketMetadata')
## packet-metadata.h: ns3::PacketMetadata::Item [struct]
module.add_class('Item', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h: ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'])
## packet-metadata.h: ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', outer_class=root_module['ns3::PacketMetadata'])
## packet.h: ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator')
## packet.h: ns3::PacketTagIterator::Item [class]
module.add_class('Item', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h: ns3::PacketTagList [class]
module.add_class('PacketTagList')
## packet-tag-list.h: ns3::PacketTagList::TagData [struct]
module.add_class('TagData', outer_class=root_module['ns3::PacketTagList'])
## pcap-file.h: ns3::PcapFile [class]
module.add_class('PcapFile')
## sequence-number.h: ns3::SequenceNumber<unsigned int, int> [class]
module.add_class('SequenceNumber32')
## spectrum-type.h: ns3::SpectrumType [class]
module.add_class('SpectrumType')
## spectrum-type.h: ns3::SpectrumTypeFactory [class]
module.add_class('SpectrumTypeFactory')
## tag.h: ns3::Tag [class]
module.add_class('Tag', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h: ns3::TagBuffer [class]
module.add_class('TagBuffer')
## chunk.h: ns3::Chunk [class]
module.add_class('Chunk', parent=root_module['ns3::ObjectBase'])
## header.h: ns3::Header [class]
module.add_class('Header', parent=root_module['ns3::Chunk'])
## packet-burst.h: ns3::PacketBurst [class]
module.add_class('PacketBurst', parent=root_module['ns3::Object'])
## pcap-file-wrapper.h: ns3::PcapFileWrapper [class]
module.add_class('PcapFileWrapper', parent=root_module['ns3::Object'])
## propagation-delay-model.h: ns3::PropagationDelayModel [class]
module.add_class('PropagationDelayModel', parent=root_module['ns3::Object'])
## propagation-loss-model.h: ns3::PropagationLossModel [class]
module.add_class('PropagationLossModel', parent=root_module['ns3::Object'])
## propagation-delay-model.h: ns3::RandomPropagationDelayModel [class]
module.add_class('RandomPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel'])
## propagation-loss-model.h: ns3::RandomPropagationLossModel [class]
module.add_class('RandomPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h: ns3::RangePropagationLossModel [class]
module.add_class('RangePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SpectrumConverter, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumConverter> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::SpectrumConverter', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SpectrumConverter>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SpectrumModel, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumModel> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::SpectrumModel', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SpectrumModel>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SpectrumValue, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::SpectrumValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SpectrumValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## spectrum-converter.h: ns3::SpectrumConverter [class]
module.add_class('SpectrumConverter', parent=root_module['ns3::SimpleRefCount< ns3::SpectrumConverter, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumConverter> >'])
## spectrum-model.h: ns3::SpectrumModel [class]
module.add_class('SpectrumModel', parent=root_module['ns3::SimpleRefCount< ns3::SpectrumModel, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumModel> >'])
## spectrum-propagation-loss-model.h: ns3::SpectrumPropagationLossModel [class]
module.add_class('SpectrumPropagationLossModel', parent=root_module['ns3::Object'])
## spectrum-value.h: ns3::SpectrumValue [class]
module.add_class('SpectrumValue', parent=root_module['ns3::SimpleRefCount< ns3::SpectrumValue, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumValue> >'])
## propagation-loss-model.h: ns3::ThreeLogDistancePropagationLossModel [class]
module.add_class('ThreeLogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## trailer.h: ns3::Trailer [class]
module.add_class('Trailer', parent=root_module['ns3::Chunk'])
## propagation-loss-model.h: ns3::TwoRayGroundPropagationLossModel [class]
module.add_class('TwoRayGroundPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-delay-model.h: ns3::ConstantSpeedPropagationDelayModel [class]
module.add_class('ConstantSpeedPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel'])
## cost231-propagation-loss-model.h: ns3::Cost231PropagationLossModel [class]
module.add_class('Cost231PropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## cost231-propagation-loss-model.h: ns3::Cost231PropagationLossModel::Environment [enumeration]
module.add_enum('Environment', ['SubUrban', 'MediumCity', 'Metropolitan'], outer_class=root_module['ns3::Cost231PropagationLossModel'])
## data-rate.h: ns3::DataRateChecker [class]
module.add_class('DataRateChecker', parent=root_module['ns3::AttributeChecker'])
## data-rate.h: ns3::DataRateValue [class]
module.add_class('DataRateValue', parent=root_module['ns3::AttributeValue'])
## error-model.h: ns3::ErrorModel [class]
module.add_class('ErrorModel', parent=root_module['ns3::Object'])
## propagation-loss-model.h: ns3::FixedRssLossModel [class]
module.add_class('FixedRssLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h: ns3::FriisPropagationLossModel [class]
module.add_class('FriisPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## friis-spectrum-propagation-loss.h: ns3::FriisSpectrumPropagationLossModel [class]
module.add_class('FriisSpectrumPropagationLossModel', parent=root_module['ns3::SpectrumPropagationLossModel'])
## jakes-propagation-loss-model.h: ns3::JakesPropagationLossModel [class]
module.add_class('JakesPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## error-model.h: ns3::ListErrorModel [class]
module.add_class('ListErrorModel', parent=root_module['ns3::ErrorModel'])
## propagation-loss-model.h: ns3::LogDistancePropagationLossModel [class]
module.add_class('LogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h: ns3::MatrixPropagationLossModel [class]
module.add_class('MatrixPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h: ns3::NakagamiPropagationLossModel [class]
module.add_class('NakagamiPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## nix-vector.h: ns3::NixVector [class]
module.add_class('NixVector', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## output-stream-wrapper.h: ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h: ns3::Packet [class]
module.add_class('Packet', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## error-model.h: ns3::RateErrorModel [class]
module.add_class('RateErrorModel', parent=root_module['ns3::ErrorModel'])
## error-model.h: ns3::ReceiveListErrorModel [class]
module.add_class('ReceiveListErrorModel', parent=root_module['ns3::ErrorModel'])
module.add_container('ns3::SpectrumConverterMap_t', ('unsigned int', 'ns3::SpectrumConverter'), container_type='map')
module.add_container('ns3::Bands', 'ns3::BandInfo', container_type='vector')
typehandlers.add_type_alias('ns3::SequenceNumber< short unsigned int, short int >', 'ns3::SequenceNumber16')
typehandlers.add_type_alias('ns3::SequenceNumber< short unsigned int, short int >*', 'ns3::SequenceNumber16*')
typehandlers.add_type_alias('ns3::SequenceNumber< short unsigned int, short int >&', 'ns3::SequenceNumber16&')
typehandlers.add_type_alias('ns3::SequenceNumber< unsigned int, int >', 'ns3::SequenceNumber32')
typehandlers.add_type_alias('ns3::SequenceNumber< unsigned int, int >*', 'ns3::SequenceNumber32*')
typehandlers.add_type_alias('ns3::SequenceNumber< unsigned int, int >&', 'ns3::SequenceNumber32&')
typehandlers.add_type_alias('std::vector< double, std::allocator< double > >', 'ns3::Values')
typehandlers.add_type_alias('std::vector< double, std::allocator< double > >*', 'ns3::Values*')
typehandlers.add_type_alias('std::vector< double, std::allocator< double > >&', 'ns3::Values&')
typehandlers.add_type_alias('uint32_t', 'ns3::SpectrumModelUid_t')
typehandlers.add_type_alias('uint32_t*', 'ns3::SpectrumModelUid_t*')
typehandlers.add_type_alias('uint32_t&', 'ns3::SpectrumModelUid_t&')
typehandlers.add_type_alias('std::vector< ns3::BandInfo, std::allocator< ns3::BandInfo > >', 'ns3::Bands')
typehandlers.add_type_alias('std::vector< ns3::BandInfo, std::allocator< ns3::BandInfo > >*', 'ns3::Bands*')
typehandlers.add_type_alias('std::vector< ns3::BandInfo, std::allocator< ns3::BandInfo > >&', 'ns3::Bands&')
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3BandInfo_methods(root_module, root_module['ns3::BandInfo'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile'])
register_Ns3SequenceNumber32_methods(root_module, root_module['ns3::SequenceNumber32'])
register_Ns3SpectrumType_methods(root_module, root_module['ns3::SpectrumType'])
register_Ns3SpectrumTypeFactory_methods(root_module, root_module['ns3::SpectrumTypeFactory'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3PacketBurst_methods(root_module, root_module['ns3::PacketBurst'])
register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper'])
register_Ns3PropagationDelayModel_methods(root_module, root_module['ns3::PropagationDelayModel'])
register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel'])
register_Ns3RandomPropagationDelayModel_methods(root_module, root_module['ns3::RandomPropagationDelayModel'])
register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel'])
register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel'])
register_Ns3SpectrumConverter_methods(root_module, root_module['ns3::SpectrumConverter'])
register_Ns3SpectrumModel_methods(root_module, root_module['ns3::SpectrumModel'])
register_Ns3SpectrumPropagationLossModel_methods(root_module, root_module['ns3::SpectrumPropagationLossModel'])
register_Ns3SpectrumValue_methods(root_module, root_module['ns3::SpectrumValue'])
register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel'])
register_Ns3ConstantSpeedPropagationDelayModel_methods(root_module, root_module['ns3::ConstantSpeedPropagationDelayModel'])
register_Ns3Cost231PropagationLossModel_methods(root_module, root_module['ns3::Cost231PropagationLossModel'])
register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker'])
register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue'])
register_Ns3ErrorModel_methods(root_module, root_module['ns3::ErrorModel'])
register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel'])
register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel'])
register_Ns3FriisSpectrumPropagationLossModel_methods(root_module, root_module['ns3::FriisSpectrumPropagationLossModel'])
register_Ns3JakesPropagationLossModel_methods(root_module, root_module['ns3::JakesPropagationLossModel'])
register_Ns3ListErrorModel_methods(root_module, root_module['ns3::ListErrorModel'])
register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel'])
register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel'])
register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3RateErrorModel_methods(root_module, root_module['ns3::RateErrorModel'])
register_Ns3ReceiveListErrorModel_methods(root_module, root_module['ns3::ReceiveListErrorModel'])
return
def register_Ns3BandInfo_methods(root_module, cls):
## spectrum-model.h: ns3::BandInfo::BandInfo() [constructor]
cls.add_constructor([])
## spectrum-model.h: ns3::BandInfo::BandInfo(ns3::BandInfo const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BandInfo const &', 'arg0')])
## spectrum-model.h: ns3::BandInfo::fc [variable]
cls.add_instance_attribute('fc', 'double', is_const=False)
## spectrum-model.h: ns3::BandInfo::fh [variable]
cls.add_instance_attribute('fh', 'double', is_const=False)
## spectrum-model.h: ns3::BandInfo::fl [variable]
cls.add_instance_attribute('fl', 'double', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h: ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h: ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h: ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h: ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h: bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h: void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h: bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h: ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h: void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h: uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h: ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h: ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h: uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h: ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h: int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h: int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h: uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h: uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h: uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h: void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h: void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h: uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h: ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h: ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h: uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h: uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h: uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h: uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h: bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h: bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h: void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h: void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h: void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h: void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h: void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h: uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h: uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h: uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h: uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h: uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h: uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h: uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h: uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h: uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h: uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h: void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h: void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h: void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h: void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h: ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h: bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h: ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h: ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h: uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h: uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h: void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h: ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h: ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h: ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h: ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h: void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h: void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h: void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h: ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h: void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h: ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h: uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h: bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h: ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h: ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h: ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h: ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h: ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h: ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h: ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h: ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3DataRate_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('>=')
## data-rate.h: ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRate const &', 'arg0')])
## data-rate.h: ns3::DataRate::DataRate() [constructor]
cls.add_constructor([])
## data-rate.h: ns3::DataRate::DataRate(uint64_t bps) [constructor]
cls.add_constructor([param('uint64_t', 'bps')])
## data-rate.h: ns3::DataRate::DataRate(std::string rate) [constructor]
cls.add_constructor([param('std::string', 'rate')])
## data-rate.h: double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function]
cls.add_method('CalculateTxTime',
'double',
[param('uint32_t', 'bytes')],
is_const=True)
## data-rate.h: uint64_t ns3::DataRate::GetBitRate() const [member function]
cls.add_method('GetBitRate',
'uint64_t',
[],
is_const=True)
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h: ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h: ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h: void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h: void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h: void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h: void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h: ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h: ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h: uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h: static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h: static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h: uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h: uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h: void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h: void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h: void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h: void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h: uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h: ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h: ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h: ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h: ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h: ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h: ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h: ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h: ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h: ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h: ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h: bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h: ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h: ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h: bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h: ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h: ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h: void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h: ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h: ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h: ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h: void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h: ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h: bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h: bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h: void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h: ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h: ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h: ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h: ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h: ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h: ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PcapFile_methods(root_module, cls):
## pcap-file.h: ns3::PcapFile::PcapFile() [constructor]
cls.add_constructor([])
## pcap-file.h: void ns3::PcapFile::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file.h: void ns3::PcapFile::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file.h: static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function]
cls.add_method('Diff',
'bool',
[param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')],
is_static=True)
## pcap-file.h: bool ns3::PcapFile::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file.h: bool ns3::PcapFile::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file.h: uint32_t ns3::PcapFile::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
## pcap-file.h: uint32_t ns3::PcapFile::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file.h: uint32_t ns3::PcapFile::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file.h: uint32_t ns3::PcapFile::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file.h: bool ns3::PcapFile::GetSwapMode() [member function]
cls.add_method('GetSwapMode',
'bool',
[])
## pcap-file.h: int32_t ns3::PcapFile::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file.h: uint16_t ns3::PcapFile::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file.h: uint16_t ns3::PcapFile::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file.h: void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')])
## pcap-file.h: void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file.h: void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function]
cls.add_method('Read',
'void',
[param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')])
## pcap-file.h: void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')])
## pcap-file.h: void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<ns3::Packet const> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h: void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header & header, ns3::Ptr<ns3::Packet const> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h: ns3::PcapFile::SNAPLEN_DEFAULT [variable]
cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True)
## pcap-file.h: ns3::PcapFile::ZONE_DEFAULT [variable]
cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True)
return
def register_Ns3SequenceNumber32_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber32'], root_module['ns3::SequenceNumber32'], param('ns3::SequenceNumber< unsigned int, int > const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber32'], root_module['ns3::SequenceNumber32'], param('int', 'right'))
cls.add_inplace_numeric_operator('+=', param('int', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::SequenceNumber32'], root_module['ns3::SequenceNumber32'], param('int', 'right'))
cls.add_inplace_numeric_operator('-=', param('int', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('>=')
## sequence-number.h: ns3::SequenceNumber<unsigned int, int>::SequenceNumber() [constructor]
cls.add_constructor([])
## sequence-number.h: ns3::SequenceNumber<unsigned int, int>::SequenceNumber(unsigned int value) [constructor]
cls.add_constructor([param('unsigned int', 'value')])
## sequence-number.h: ns3::SequenceNumber<unsigned int, int>::SequenceNumber(ns3::SequenceNumber<unsigned int, int> const & value) [copy constructor]
cls.add_constructor([param('ns3::SequenceNumber< unsigned int, int > const &', 'value')])
## sequence-number.h: unsigned int ns3::SequenceNumber<unsigned int, int>::GetValue() const [member function]
cls.add_method('GetValue',
'unsigned int',
[],
is_const=True)
return
def register_Ns3SpectrumType_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## spectrum-type.h: ns3::SpectrumType::SpectrumType(ns3::SpectrumType const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SpectrumType const &', 'arg0')])
## spectrum-type.h: std::string ns3::SpectrumType::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## spectrum-type.h: uint32_t ns3::SpectrumType::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
return
def register_Ns3SpectrumTypeFactory_methods(root_module, cls):
## spectrum-type.h: ns3::SpectrumTypeFactory::SpectrumTypeFactory() [constructor]
cls.add_constructor([])
## spectrum-type.h: ns3::SpectrumTypeFactory::SpectrumTypeFactory(ns3::SpectrumTypeFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SpectrumTypeFactory const &', 'arg0')])
## spectrum-type.h: static ns3::SpectrumType ns3::SpectrumTypeFactory::Create(std::string name) [member function]
cls.add_method('Create',
'ns3::SpectrumType',
[param('std::string', 'name')],
is_static=True)
## spectrum-type.h: static std::string ns3::SpectrumTypeFactory::GetNameByUid(uint32_t uid) [member function]
cls.add_method('GetNameByUid',
'std::string',
[param('uint32_t', 'uid')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h: ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h: ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h: void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h: uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h: static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h: void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h: void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h: ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h: ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h: void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h: void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h: double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h: uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h: uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h: uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h: uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h: void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h: void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h: void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h: void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h: void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h: void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h: void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h: ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h: ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h: uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h: static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h: void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h: ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h: ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h: uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h: uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h: static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h: void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h: void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3PacketBurst_methods(root_module, cls):
## packet-burst.h: ns3::PacketBurst::PacketBurst(ns3::PacketBurst const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketBurst const &', 'arg0')])
## packet-burst.h: ns3::PacketBurst::PacketBurst() [constructor]
cls.add_constructor([])
## packet-burst.h: void ns3::PacketBurst::AddPacket(ns3::Ptr<ns3::Packet> packet) [member function]
cls.add_method('AddPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet')])
## packet-burst.h: std::_List_const_iterator<ns3::Ptr<ns3::Packet> > ns3::PacketBurst::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::Ptr< ns3::Packet > >',
[],
is_const=True)
## packet-burst.h: ns3::Ptr<ns3::PacketBurst> ns3::PacketBurst::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::PacketBurst >',
[],
is_const=True)
## packet-burst.h: std::_List_const_iterator<ns3::Ptr<ns3::Packet> > ns3::PacketBurst::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::Ptr< ns3::Packet > >',
[],
is_const=True)
## packet-burst.h: uint32_t ns3::PacketBurst::GetNPackets() const [member function]
cls.add_method('GetNPackets',
'uint32_t',
[],
is_const=True)
## packet-burst.h: std::list<ns3::Ptr<ns3::Packet>, std::allocator<ns3::Ptr<ns3::Packet> > > ns3::PacketBurst::GetPackets() const [member function]
cls.add_method('GetPackets',
'std::list< ns3::Ptr< ns3::Packet > >',
[],
is_const=True)
## packet-burst.h: uint32_t ns3::PacketBurst::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet-burst.h: static ns3::TypeId ns3::PacketBurst::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## packet-burst.h: void ns3::PacketBurst::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3PcapFileWrapper_methods(root_module, cls):
## pcap-file-wrapper.h: static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## pcap-file-wrapper.h: ns3::PcapFileWrapper::PcapFileWrapper() [constructor]
cls.add_constructor([])
## pcap-file-wrapper.h: bool ns3::PcapFileWrapper::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h: bool ns3::PcapFileWrapper::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h: void ns3::PcapFileWrapper::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file-wrapper.h: void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file-wrapper.h: void ns3::PcapFileWrapper::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file-wrapper.h: void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')])
## pcap-file-wrapper.h: void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<ns3::Packet const> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h: void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header & header, ns3::Ptr<ns3::Packet const> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h: void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')])
## pcap-file-wrapper.h: uint32_t ns3::PcapFileWrapper::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file-wrapper.h: uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file-wrapper.h: uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file-wrapper.h: int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file-wrapper.h: uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file-wrapper.h: uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file-wrapper.h: uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
return
def register_Ns3PropagationDelayModel_methods(root_module, cls):
## propagation-delay-model.h: ns3::PropagationDelayModel::PropagationDelayModel() [constructor]
cls.add_constructor([])
## propagation-delay-model.h: ns3::PropagationDelayModel::PropagationDelayModel(ns3::PropagationDelayModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PropagationDelayModel const &', 'arg0')])
## propagation-delay-model.h: ns3::Time ns3::PropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## propagation-delay-model.h: static ns3::TypeId ns3::PropagationDelayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3PropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::PropagationLossModel::PropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function]
cls.add_method('SetNext',
'void',
[param('ns3::Ptr< ns3::PropagationLossModel >', 'next')])
## propagation-loss-model.h: double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('CalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## propagation-loss-model.h: double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3RandomPropagationDelayModel_methods(root_module, cls):
## propagation-delay-model.h: ns3::RandomPropagationDelayModel::RandomPropagationDelayModel(ns3::RandomPropagationDelayModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomPropagationDelayModel const &', 'arg0')])
## propagation-delay-model.h: ns3::RandomPropagationDelayModel::RandomPropagationDelayModel() [constructor]
cls.add_constructor([])
## propagation-delay-model.h: ns3::Time ns3::RandomPropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True)
## propagation-delay-model.h: static ns3::TypeId ns3::RandomPropagationDelayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3RandomPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3RangePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3SpectrumConverter_methods(root_module, cls):
## spectrum-converter.h: ns3::SpectrumConverter::SpectrumConverter(ns3::SpectrumConverter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SpectrumConverter const &', 'arg0')])
## spectrum-converter.h: ns3::SpectrumConverter::SpectrumConverter(ns3::Ptr<ns3::SpectrumModel const> fromSpectrumModel, ns3::Ptr<ns3::SpectrumModel const> toSpectrumModel) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::SpectrumModel const >', 'fromSpectrumModel'), param('ns3::Ptr< ns3::SpectrumModel const >', 'toSpectrumModel')])
## spectrum-converter.h: ns3::SpectrumConverter::SpectrumConverter() [constructor]
cls.add_constructor([])
## spectrum-converter.h: ns3::Ptr<ns3::SpectrumValue> ns3::SpectrumConverter::Convert(ns3::Ptr<ns3::SpectrumValue const> vvf) const [member function]
cls.add_method('Convert',
'ns3::Ptr< ns3::SpectrumValue >',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'vvf')],
is_const=True)
return
def register_Ns3SpectrumModel_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## spectrum-model.h: ns3::SpectrumModel::SpectrumModel(ns3::SpectrumModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SpectrumModel const &', 'arg0')])
## spectrum-model.h: ns3::SpectrumModel::SpectrumModel(std::vector<double, std::allocator<double> > centerFreqs) [constructor]
cls.add_constructor([param('std::vector< double >', 'centerFreqs')])
## spectrum-model.h: ns3::SpectrumModel::SpectrumModel(ns3::Bands bands) [constructor]
cls.add_constructor([param('ns3::Bands', 'bands')])
## spectrum-model.h: __gnu_cxx::__normal_iterator<const ns3::BandInfo*,std::vector<ns3::BandInfo, std::allocator<ns3::BandInfo> > > ns3::SpectrumModel::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::BandInfo const *, std::vector< ns3::BandInfo > >',
[],
is_const=True)
## spectrum-model.h: __gnu_cxx::__normal_iterator<const ns3::BandInfo*,std::vector<ns3::BandInfo, std::allocator<ns3::BandInfo> > > ns3::SpectrumModel::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::BandInfo const *, std::vector< ns3::BandInfo > >',
[],
is_const=True)
## spectrum-model.h: size_t ns3::SpectrumModel::GetNumBands() const [member function]
cls.add_method('GetNumBands',
'size_t',
[],
is_const=True)
## spectrum-model.h: ns3::SpectrumModelUid_t ns3::SpectrumModel::GetUid() const [member function]
cls.add_method('GetUid',
'ns3::SpectrumModelUid_t',
[],
is_const=True)
return
def register_Ns3SpectrumPropagationLossModel_methods(root_module, cls):
## spectrum-propagation-loss-model.h: ns3::SpectrumPropagationLossModel::SpectrumPropagationLossModel(ns3::SpectrumPropagationLossModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SpectrumPropagationLossModel const &', 'arg0')])
## spectrum-propagation-loss-model.h: ns3::SpectrumPropagationLossModel::SpectrumPropagationLossModel() [constructor]
cls.add_constructor([])
## spectrum-propagation-loss-model.h: ns3::Ptr<ns3::SpectrumValue> ns3::SpectrumPropagationLossModel::CalcRxPowerSpectralDensity(ns3::Ptr<ns3::SpectrumValue const> txPsd, ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function]
cls.add_method('CalcRxPowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue >',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'txPsd'), param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')],
is_const=True)
## spectrum-propagation-loss-model.h: static ns3::TypeId ns3::SpectrumPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## spectrum-propagation-loss-model.h: void ns3::SpectrumPropagationLossModel::SetNext(ns3::Ptr<ns3::SpectrumPropagationLossModel> next) [member function]
cls.add_method('SetNext',
'void',
[param('ns3::Ptr< ns3::SpectrumPropagationLossModel >', 'next')])
## spectrum-propagation-loss-model.h: void ns3::SpectrumPropagationLossModel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## spectrum-propagation-loss-model.h: ns3::Ptr<ns3::SpectrumValue> ns3::SpectrumPropagationLossModel::DoCalcRxPowerSpectralDensity(ns3::Ptr<ns3::SpectrumValue const> txPsd, ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue >',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'txPsd'), param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3SpectrumValue_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_numeric_operator('*', root_module['ns3::SpectrumValue'], root_module['ns3::SpectrumValue'], param('double', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::SpectrumValue'], root_module['ns3::SpectrumValue'], param('ns3::SpectrumValue const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::SpectrumValue'], root_module['ns3::SpectrumValue'], param('double', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::SpectrumValue'], root_module['ns3::SpectrumValue'], param('ns3::SpectrumValue const &', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::SpectrumValue'], root_module['ns3::SpectrumValue'], param('double', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::SpectrumValue'], root_module['ns3::SpectrumValue'], param('ns3::SpectrumValue const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::SpectrumValue'], root_module['ns3::SpectrumValue'], param('double', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::SpectrumValue'], root_module['ns3::SpectrumValue'], param('ns3::SpectrumValue const &', 'right'))
cls.add_inplace_numeric_operator('*=', param('ns3::SpectrumValue const &', 'right'))
cls.add_inplace_numeric_operator('*=', param('double', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::SpectrumValue const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('double', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::SpectrumValue const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('double', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::SpectrumValue const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('double', 'right'))
## spectrum-value.h: ns3::SpectrumValue::SpectrumValue(ns3::SpectrumValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SpectrumValue const &', 'arg0')])
## spectrum-value.h: ns3::SpectrumValue::SpectrumValue(ns3::Ptr<ns3::SpectrumModel const> sm) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::SpectrumModel const >', 'sm')])
## spectrum-value.h: ns3::SpectrumValue::SpectrumValue() [constructor]
cls.add_constructor([])
## spectrum-value.h: __gnu_cxx::__normal_iterator<const ns3::BandInfo*,std::vector<ns3::BandInfo, std::allocator<ns3::BandInfo> > > ns3::SpectrumValue::ConstBandsBegin() const [member function]
cls.add_method('ConstBandsBegin',
'__gnu_cxx::__normal_iterator< ns3::BandInfo const *, std::vector< ns3::BandInfo > >',
[],
is_const=True)
## spectrum-value.h: __gnu_cxx::__normal_iterator<const ns3::BandInfo*,std::vector<ns3::BandInfo, std::allocator<ns3::BandInfo> > > ns3::SpectrumValue::ConstBandsEnd() const [member function]
cls.add_method('ConstBandsEnd',
'__gnu_cxx::__normal_iterator< ns3::BandInfo const *, std::vector< ns3::BandInfo > >',
[],
is_const=True)
## spectrum-value.h: __gnu_cxx::__normal_iterator<const double*,std::vector<double, std::allocator<double> > > ns3::SpectrumValue::ConstValuesBegin() const [member function]
cls.add_method('ConstValuesBegin',
'__gnu_cxx::__normal_iterator< double const *, std::vector< double > >',
[],
is_const=True)
## spectrum-value.h: __gnu_cxx::__normal_iterator<const double*,std::vector<double, std::allocator<double> > > ns3::SpectrumValue::ConstValuesEnd() const [member function]
cls.add_method('ConstValuesEnd',
'__gnu_cxx::__normal_iterator< double const *, std::vector< double > >',
[],
is_const=True)
## spectrum-value.h: ns3::Ptr<ns3::SpectrumValue> ns3::SpectrumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::SpectrumValue >',
[],
is_const=True)
## spectrum-value.h: ns3::Ptr<ns3::SpectrumModel const> ns3::SpectrumValue::GetSpectrumModel() const [member function]
cls.add_method('GetSpectrumModel',
'ns3::Ptr< ns3::SpectrumModel const >',
[],
is_const=True)
## spectrum-value.h: ns3::SpectrumModelUid_t ns3::SpectrumValue::GetSpectrumModelUid() const [member function]
cls.add_method('GetSpectrumModelUid',
'ns3::SpectrumModelUid_t',
[],
is_const=True)
## spectrum-value.h: __gnu_cxx::__normal_iterator<double*,std::vector<double, std::allocator<double> > > ns3::SpectrumValue::ValuesBegin() [member function]
cls.add_method('ValuesBegin',
'__gnu_cxx::__normal_iterator< double *, std::vector< double > >',
[])
## spectrum-value.h: __gnu_cxx::__normal_iterator<double*,std::vector<double, std::allocator<double> > > ns3::SpectrumValue::ValuesEnd() [member function]
cls.add_method('ValuesEnd',
'__gnu_cxx::__normal_iterator< double *, std::vector< double > >',
[])
return
def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h: ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h: ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h: uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h: uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h: static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h: void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h: void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: void ns3::TwoRayGroundPropagationLossModel::SetLambda(double frequency, double speed) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'frequency'), param('double', 'speed')])
## propagation-loss-model.h: void ns3::TwoRayGroundPropagationLossModel::SetLambda(double lambda) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'lambda')])
## propagation-loss-model.h: void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h: void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## propagation-loss-model.h: double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## propagation-loss-model.h: double ns3::TwoRayGroundPropagationLossModel::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## propagation-loss-model.h: double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h: void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function]
cls.add_method('SetHeightAboveZ',
'void',
[param('double', 'heightAboveZ')])
## propagation-loss-model.h: double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ConstantSpeedPropagationDelayModel_methods(root_module, cls):
## propagation-delay-model.h: ns3::ConstantSpeedPropagationDelayModel::ConstantSpeedPropagationDelayModel(ns3::ConstantSpeedPropagationDelayModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConstantSpeedPropagationDelayModel const &', 'arg0')])
## propagation-delay-model.h: ns3::ConstantSpeedPropagationDelayModel::ConstantSpeedPropagationDelayModel() [constructor]
cls.add_constructor([])
## propagation-delay-model.h: ns3::Time ns3::ConstantSpeedPropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True)
## propagation-delay-model.h: double ns3::ConstantSpeedPropagationDelayModel::GetSpeed() const [member function]
cls.add_method('GetSpeed',
'double',
[],
is_const=True)
## propagation-delay-model.h: static ns3::TypeId ns3::ConstantSpeedPropagationDelayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-delay-model.h: void ns3::ConstantSpeedPropagationDelayModel::SetSpeed(double speed) [member function]
cls.add_method('SetSpeed',
'void',
[param('double', 'speed')])
return
def register_Ns3Cost231PropagationLossModel_methods(root_module, cls):
## cost231-propagation-loss-model.h: static ns3::TypeId ns3::Cost231PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## cost231-propagation-loss-model.h: ns3::Cost231PropagationLossModel::Cost231PropagationLossModel() [constructor]
cls.add_constructor([])
## cost231-propagation-loss-model.h: double ns3::Cost231PropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## cost231-propagation-loss-model.h: void ns3::Cost231PropagationLossModel::SetBSAntennaHeight(double height) [member function]
cls.add_method('SetBSAntennaHeight',
'void',
[param('double', 'height')])
## cost231-propagation-loss-model.h: void ns3::Cost231PropagationLossModel::SetSSAntennaHeight(double height) [member function]
cls.add_method('SetSSAntennaHeight',
'void',
[param('double', 'height')])
## cost231-propagation-loss-model.h: void ns3::Cost231PropagationLossModel::SetEnvironment(ns3::Cost231PropagationLossModel::Environment env) [member function]
cls.add_method('SetEnvironment',
'void',
[param('ns3::Cost231PropagationLossModel::Environment', 'env')])
## cost231-propagation-loss-model.h: void ns3::Cost231PropagationLossModel::SetLambda(double lambda) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'lambda')])
## cost231-propagation-loss-model.h: void ns3::Cost231PropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## cost231-propagation-loss-model.h: double ns3::Cost231PropagationLossModel::GetBSAntennaHeight() const [member function]
cls.add_method('GetBSAntennaHeight',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h: double ns3::Cost231PropagationLossModel::GetSSAntennaHeight() const [member function]
cls.add_method('GetSSAntennaHeight',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h: ns3::Cost231PropagationLossModel::Environment ns3::Cost231PropagationLossModel::GetEnvironment() const [member function]
cls.add_method('GetEnvironment',
'ns3::Cost231PropagationLossModel::Environment',
[],
is_const=True)
## cost231-propagation-loss-model.h: double ns3::Cost231PropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h: double ns3::Cost231PropagationLossModel::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h: void ns3::Cost231PropagationLossModel::SetLambda(double frequency, double speed) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'frequency'), param('double', 'speed')])
## cost231-propagation-loss-model.h: double ns3::Cost231PropagationLossModel::GetShadowing() [member function]
cls.add_method('GetShadowing',
'double',
[])
## cost231-propagation-loss-model.h: void ns3::Cost231PropagationLossModel::SetShadowing(double shadowing) [member function]
cls.add_method('SetShadowing',
'void',
[param('double', 'shadowing')])
## cost231-propagation-loss-model.h: double ns3::Cost231PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3DataRateChecker_methods(root_module, cls):
## data-rate.h: ns3::DataRateChecker::DataRateChecker() [constructor]
cls.add_constructor([])
## data-rate.h: ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')])
return
def register_Ns3DataRateValue_methods(root_module, cls):
## data-rate.h: ns3::DataRateValue::DataRateValue() [constructor]
cls.add_constructor([])
## data-rate.h: ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')])
## data-rate.h: ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor]
cls.add_constructor([param('ns3::DataRate const &', 'value')])
## data-rate.h: ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## data-rate.h: bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## data-rate.h: ns3::DataRate ns3::DataRateValue::Get() const [member function]
cls.add_method('Get',
'ns3::DataRate',
[],
is_const=True)
## data-rate.h: std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## data-rate.h: void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::DataRate const &', 'value')])
return
def register_Ns3ErrorModel_methods(root_module, cls):
## error-model.h: ns3::ErrorModel::ErrorModel(ns3::ErrorModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ErrorModel const &', 'arg0')])
## error-model.h: ns3::ErrorModel::ErrorModel() [constructor]
cls.add_constructor([])
## error-model.h: void ns3::ErrorModel::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## error-model.h: void ns3::ErrorModel::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## error-model.h: static ns3::TypeId ns3::ErrorModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## error-model.h: bool ns3::ErrorModel::IsCorrupt(ns3::Ptr<ns3::Packet> pkt) [member function]
cls.add_method('IsCorrupt',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt')])
## error-model.h: bool ns3::ErrorModel::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True)
## error-model.h: void ns3::ErrorModel::Reset() [member function]
cls.add_method('Reset',
'void',
[])
## error-model.h: bool ns3::ErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> arg0) [member function]
cls.add_method('DoCorrupt',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'arg0')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## error-model.h: void ns3::ErrorModel::DoReset() [member function]
cls.add_method('DoReset',
'void',
[],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3FixedRssLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::FixedRssLossModel::FixedRssLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: void ns3::FixedRssLossModel::SetRss(double rss) [member function]
cls.add_method('SetRss',
'void',
[param('double', 'rss')])
## propagation-loss-model.h: double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3FriisPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: void ns3::FriisPropagationLossModel::SetLambda(double frequency, double speed) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'frequency'), param('double', 'speed')])
## propagation-loss-model.h: void ns3::FriisPropagationLossModel::SetLambda(double lambda) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'lambda')])
## propagation-loss-model.h: void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h: void ns3::FriisPropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## propagation-loss-model.h: double ns3::FriisPropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## propagation-loss-model.h: double ns3::FriisPropagationLossModel::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## propagation-loss-model.h: double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h: double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3FriisSpectrumPropagationLossModel_methods(root_module, cls):
## friis-spectrum-propagation-loss.h: ns3::FriisSpectrumPropagationLossModel::FriisSpectrumPropagationLossModel(ns3::FriisSpectrumPropagationLossModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FriisSpectrumPropagationLossModel const &', 'arg0')])
## friis-spectrum-propagation-loss.h: ns3::FriisSpectrumPropagationLossModel::FriisSpectrumPropagationLossModel() [constructor]
cls.add_constructor([])
## friis-spectrum-propagation-loss.h: double ns3::FriisSpectrumPropagationLossModel::CalculateLoss(double f, double d) const [member function]
cls.add_method('CalculateLoss',
'double',
[param('double', 'f'), param('double', 'd')],
is_const=True)
## friis-spectrum-propagation-loss.h: ns3::Ptr<ns3::SpectrumValue> ns3::FriisSpectrumPropagationLossModel::DoCalcRxPowerSpectralDensity(ns3::Ptr<ns3::SpectrumValue const> txPsd, ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue >',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'txPsd'), param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')],
is_const=True, is_virtual=True)
## friis-spectrum-propagation-loss.h: static ns3::TypeId ns3::FriisSpectrumPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3JakesPropagationLossModel_methods(root_module, cls):
## jakes-propagation-loss-model.h: ns3::JakesPropagationLossModel::JakesPropagationLossModel() [constructor]
cls.add_constructor([])
## jakes-propagation-loss-model.h: uint8_t ns3::JakesPropagationLossModel::GetNOscillators() const [member function]
cls.add_method('GetNOscillators',
'uint8_t',
[],
is_const=True)
## jakes-propagation-loss-model.h: uint8_t ns3::JakesPropagationLossModel::GetNRays() const [member function]
cls.add_method('GetNRays',
'uint8_t',
[],
is_const=True)
## jakes-propagation-loss-model.h: static ns3::TypeId ns3::JakesPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## jakes-propagation-loss-model.h: void ns3::JakesPropagationLossModel::SetNOscillators(uint8_t nOscillators) [member function]
cls.add_method('SetNOscillators',
'void',
[param('uint8_t', 'nOscillators')])
## jakes-propagation-loss-model.h: void ns3::JakesPropagationLossModel::SetNRays(uint8_t nRays) [member function]
cls.add_method('SetNRays',
'void',
[param('uint8_t', 'nRays')])
## jakes-propagation-loss-model.h: double ns3::JakesPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ListErrorModel_methods(root_module, cls):
## error-model.h: ns3::ListErrorModel::ListErrorModel(ns3::ListErrorModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ListErrorModel const &', 'arg0')])
## error-model.h: ns3::ListErrorModel::ListErrorModel() [constructor]
cls.add_constructor([])
## error-model.h: std::list<unsigned int, std::allocator<unsigned int> > ns3::ListErrorModel::GetList() const [member function]
cls.add_method('GetList',
'std::list< unsigned int >',
[],
is_const=True)
## error-model.h: static ns3::TypeId ns3::ListErrorModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## error-model.h: void ns3::ListErrorModel::SetList(std::list<unsigned int, std::allocator<unsigned int> > const & packetlist) [member function]
cls.add_method('SetList',
'void',
[param('std::list< unsigned int > const &', 'packetlist')])
## error-model.h: bool ns3::ListErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('DoCorrupt',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'p')],
visibility='private', is_virtual=True)
## error-model.h: void ns3::ListErrorModel::DoReset() [member function]
cls.add_method('DoReset',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function]
cls.add_method('SetPathLossExponent',
'void',
[param('double', 'n')])
## propagation-loss-model.h: double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function]
cls.add_method('GetPathLossExponent',
'double',
[],
is_const=True)
## propagation-loss-model.h: void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function]
cls.add_method('SetReference',
'void',
[param('double', 'referenceDistance'), param('double', 'referenceLoss')])
## propagation-loss-model.h: double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3MatrixPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::Node> a, ns3::Ptr<ns3::Node> b, double loss, bool symmetric=true) [member function]
cls.add_method('SetLoss',
'void',
[param('ns3::Ptr< ns3::Node >', 'a'), param('ns3::Ptr< ns3::Node >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')])
## propagation-loss-model.h: void ns3::MatrixPropagationLossModel::SetDefaultLoss(double arg0) [member function]
cls.add_method('SetDefaultLoss',
'void',
[param('double', 'arg0')])
## propagation-loss-model.h: double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h: static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h: ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h: double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h: ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h: ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h: void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h: uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h: ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h: uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h: uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h: uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h: uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h: uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h: ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h: ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h: std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h: ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h: ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h: ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h: ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h: ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h: void ns3::Packet::AddAtEnd(ns3::Ptr<ns3::Packet const> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h: void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h: void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h: void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h: void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h: void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h: ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h: ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h: uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h: void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h: ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h: static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h: static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h: bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h: ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h: ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h: ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h: uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h: uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h: uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h: uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h: uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h: bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h: uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h: void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h: void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h: void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h: void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h: void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h: void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h: void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h: uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h: bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h: uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h: uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h: void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3RateErrorModel_methods(root_module, cls):
## error-model.h: ns3::RateErrorModel::RateErrorModel(ns3::RateErrorModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RateErrorModel const &', 'arg0')])
## error-model.h: ns3::RateErrorModel::RateErrorModel() [constructor]
cls.add_constructor([])
## error-model.h: double ns3::RateErrorModel::GetRate() const [member function]
cls.add_method('GetRate',
'double',
[],
is_const=True)
## error-model.h: static ns3::TypeId ns3::RateErrorModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## error-model.h: ns3::ErrorUnit ns3::RateErrorModel::GetUnit() const [member function]
cls.add_method('GetUnit',
'ns3::ErrorUnit',
[],
is_const=True)
## error-model.h: void ns3::RateErrorModel::SetRandomVariable(ns3::RandomVariable const & ranvar) [member function]
cls.add_method('SetRandomVariable',
'void',
[param('ns3::RandomVariable const &', 'ranvar')])
## error-model.h: void ns3::RateErrorModel::SetRate(double rate) [member function]
cls.add_method('SetRate',
'void',
[param('double', 'rate')])
## error-model.h: void ns3::RateErrorModel::SetUnit(ns3::ErrorUnit error_unit) [member function]
cls.add_method('SetUnit',
'void',
[param('ns3::ErrorUnit', 'error_unit')])
## error-model.h: bool ns3::RateErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('DoCorrupt',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'p')],
visibility='private', is_virtual=True)
## error-model.h: bool ns3::RateErrorModel::DoCorruptBit(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('DoCorruptBit',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'p')],
visibility='private', is_virtual=True)
## error-model.h: bool ns3::RateErrorModel::DoCorruptByte(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('DoCorruptByte',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'p')],
visibility='private', is_virtual=True)
## error-model.h: bool ns3::RateErrorModel::DoCorruptPkt(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('DoCorruptPkt',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'p')],
visibility='private', is_virtual=True)
## error-model.h: void ns3::RateErrorModel::DoReset() [member function]
cls.add_method('DoReset',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3ReceiveListErrorModel_methods(root_module, cls):
## error-model.h: ns3::ReceiveListErrorModel::ReceiveListErrorModel(ns3::ReceiveListErrorModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ReceiveListErrorModel const &', 'arg0')])
## error-model.h: ns3::ReceiveListErrorModel::ReceiveListErrorModel() [constructor]
cls.add_constructor([])
## error-model.h: std::list<unsigned int, std::allocator<unsigned int> > ns3::ReceiveListErrorModel::GetList() const [member function]
cls.add_method('GetList',
'std::list< unsigned int >',
[],
is_const=True)
## error-model.h: static ns3::TypeId ns3::ReceiveListErrorModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## error-model.h: void ns3::ReceiveListErrorModel::SetList(std::list<unsigned int, std::allocator<unsigned int> > const & packetlist) [member function]
cls.add_method('SetList',
'void',
[param('std::list< unsigned int > const &', 'packetlist')])
## error-model.h: bool ns3::ReceiveListErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('DoCorrupt',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'p')],
visibility='private', is_virtual=True)
## error-model.h: void ns3::ReceiveListErrorModel::DoReset() [member function]
cls.add_method('DoReset',
'void',
[],
visibility='private', is_virtual=True)
return
def register_functions(root_module):
module = root_module
## spectrum-value.h: extern ns3::SpectrumValue ns3::Log(ns3::SpectrumValue const & arg) [free function]
module.add_function('Log',
'ns3::SpectrumValue',
[param('ns3::SpectrumValue const &', 'arg')])
## spectrum-value.h: extern ns3::SpectrumValue ns3::Log10(ns3::SpectrumValue const & arg) [free function]
module.add_function('Log10',
'ns3::SpectrumValue',
[param('ns3::SpectrumValue const &', 'arg')])
## spectrum-value.h: extern ns3::SpectrumValue ns3::Log2(ns3::SpectrumValue const & arg) [free function]
module.add_function('Log2',
'ns3::SpectrumValue',
[param('ns3::SpectrumValue const &', 'arg')])
## data-rate.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeDataRateChecker() [free function]
module.add_function('MakeDataRateChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## spectrum-value.h: extern double ns3::Norm(ns3::SpectrumValue const & x) [free function]
module.add_function('Norm',
'double',
[param('ns3::SpectrumValue const &', 'x')])
## spectrum-value.h: extern ns3::SpectrumValue ns3::Pow(ns3::SpectrumValue const & base, double exp) [free function]
module.add_function('Pow',
'ns3::SpectrumValue',
[param('ns3::SpectrumValue const &', 'base'), param('double', 'exp')])
## spectrum-value.h: extern ns3::SpectrumValue ns3::Pow(double base, ns3::SpectrumValue const & exp) [free function]
module.add_function('Pow',
'ns3::SpectrumValue',
[param('double', 'base'), param('ns3::SpectrumValue const &', 'exp')])
## spectrum-value.h: extern double ns3::Prod(ns3::SpectrumValue const & x) [free function]
module.add_function('Prod',
'double',
[param('ns3::SpectrumValue const &', 'x')])
## spectrum-value.h: extern double ns3::Sum(ns3::SpectrumValue const & x) [free function]
module.add_function('Sum',
'double',
[param('ns3::SpectrumValue const &', 'x')])
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_aodv(module.get_submodule('aodv'), root_module)
register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module)
register_functions_ns3_flame(module.get_submodule('flame'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_olsr(module.get_submodule('olsr'), root_module)
return
def register_functions_ns3_Config(module, root_module):
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_addressUtils(module, root_module):
return
def register_functions_ns3_aodv(module, root_module):
return
def register_functions_ns3_dot11s(module, root_module):
return
def register_functions_ns3_flame(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def register_functions_ns3_olsr(module, root_module):
return
| gpl-2.0 |
joopert/home-assistant | homeassistant/components/co2signal/sensor.py | 5 | 3307 | """Support for the CO2signal platform."""
import logging
import CO2Signal
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_TOKEN,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
CONF_COUNTRY_CODE = "country_code"
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by CO2signal"
MSG_LOCATION = (
"Please use either coordinates or the country code. "
"For the coordinates, "
"you need to use both latitude and longitude."
)
CO2_INTENSITY_UNIT = "CO2eq/kWh"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Inclusive(CONF_LATITUDE, "coords", msg=MSG_LOCATION): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coords", msg=MSG_LOCATION): cv.longitude,
vol.Optional(CONF_COUNTRY_CODE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the CO2signal sensor."""
token = config[CONF_TOKEN]
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
country_code = config.get(CONF_COUNTRY_CODE)
_LOGGER.debug("Setting up the sensor using the %s", country_code)
devs = []
devs.append(CO2Sensor(token, country_code, lat, lon))
add_entities(devs, True)
class CO2Sensor(Entity):
"""Implementation of the CO2Signal sensor."""
def __init__(self, token, country_code, lat, lon):
"""Initialize the sensor."""
self._token = token
self._country_code = country_code
self._latitude = lat
self._longitude = lon
self._data = None
if country_code is not None:
device_name = country_code
else:
device_name = "{lat}/{lon}".format(
lat=round(self._latitude, 2), lon=round(self._longitude, 2)
)
self._friendly_name = f"CO2 intensity - {device_name}"
@property
def name(self):
"""Return the name of the sensor."""
return self._friendly_name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return "mdi:periodic-table-co2"
@property
def state(self):
"""Return the state of the device."""
return self._data
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return CO2_INTENSITY_UNIT
@property
def device_state_attributes(self):
"""Return the state attributes of the last update."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Update data for %s", self._friendly_name)
if self._country_code is not None:
self._data = CO2Signal.get_latest_carbon_intensity(
self._token, country_code=self._country_code
)
else:
self._data = CO2Signal.get_latest_carbon_intensity(
self._token, latitude=self._latitude, longitude=self._longitude
)
self._data = round(self._data, 2)
| apache-2.0 |
alaski/nova | nova/tests/unit/test_iptables_network.py | 6 | 13204 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for network code."""
import mock
import six
from nova.network import linux_net
from nova import test
class IptablesManagerTestCase(test.NoDBTestCase):
binary_name = linux_net.get_binary_name()
sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
'*filter',
':INPUT ACCEPT [2223527:305688874]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [2172501:140856656]',
':iptables-top-rule - [0:0]',
':iptables-bottom-rule - [0:0]',
':%s-FORWARD - [0:0]' % (binary_name),
':%s-INPUT - [0:0]' % (binary_name),
':%s-OUTPUT - [0:0]' % (binary_name),
':%s-local - [0:0]' % (binary_name),
':nova-filter-top - [0:0]',
'[0:0] -A FORWARD -j nova-filter-top',
'[0:0] -A OUTPUT -j nova-filter-top',
'[0:0] -A nova-filter-top -j %s-local' % (binary_name),
'[0:0] -A INPUT -j %s-INPUT' % (binary_name),
'[0:0] -A OUTPUT -j %s-OUTPUT' % (binary_name),
'[0:0] -A FORWARD -j %s-FORWARD' % (binary_name),
'[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 53 '
'-j ACCEPT',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 53 '
'-j ACCEPT',
'[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 67 '
'-j ACCEPT',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 '
'-j ACCEPT',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 '
'-j ACCEPT',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT',
'[0:0] -A FORWARD -o virbr0 -j REJECT --reject-with '
'icmp-port-unreachable',
'[0:0] -A FORWARD -i virbr0 -j REJECT --reject-with '
'icmp-port-unreachable',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
'*nat',
':PREROUTING ACCEPT [3936:762355]',
':INPUT ACCEPT [2447:225266]',
':OUTPUT ACCEPT [63491:4191863]',
':POSTROUTING ACCEPT [63112:4108641]',
':%s-OUTPUT - [0:0]' % (binary_name),
':%s-POSTROUTING - [0:0]' % (binary_name),
':%s-PREROUTING - [0:0]' % (binary_name),
':%s-float-snat - [0:0]' % (binary_name),
':%s-snat - [0:0]' % (binary_name),
':nova-postrouting-bottom - [0:0]',
'[0:0] -A PREROUTING -j %s-PREROUTING' % (binary_name),
'[0:0] -A OUTPUT -j %s-OUTPUT' % (binary_name),
'[0:0] -A POSTROUTING -j %s-POSTROUTING' % (binary_name),
'[0:0] -A nova-postrouting-bottom '
'-j %s-snat' % (binary_name),
'[0:0] -A %s-snat '
'-j %s-float-snat' % (binary_name, binary_name),
'[0:0] -A POSTROUTING -j nova-postrouting-bottom',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
def setUp(self):
super(IptablesManagerTestCase, self).setUp()
self.manager = linux_net.IptablesManager()
def test_duplicate_rules_no_dirty(self):
table = self.manager.ipv4['filter']
table.dirty = False
num_rules = len(table.rules)
table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
self.assertEqual(len(table.rules), num_rules + 1)
self.assertTrue(table.dirty)
table.dirty = False
num_rules = len(table.rules)
table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
self.assertEqual(len(table.rules), num_rules)
self.assertFalse(table.dirty)
def test_clean_tables_no_apply(self):
for table in six.itervalues(self.manager.ipv4):
table.dirty = False
for table in six.itervalues(self.manager.ipv6):
table.dirty = False
with mock.patch.object(self.manager, '_apply') as mock_apply:
self.manager.apply()
self.assertFalse(mock_apply.called)
def test_filter_rules_are_wrapped(self):
current_lines = self.sample_filter
table = self.manager.ipv4['filter']
table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
new_lines = self.manager._modify_rules(current_lines, table, 'filter')
self.assertIn('[0:0] -A %s-FORWARD '
'-s 1.2.3.4/5 -j DROP' % self.binary_name, new_lines)
table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
new_lines = self.manager._modify_rules(current_lines, table, 'filter')
self.assertNotIn('[0:0] -A %s-FORWARD '
'-s 1.2.3.4/5 -j DROP' % self.binary_name, new_lines)
def test_remove_rules_regex(self):
current_lines = self.sample_nat
table = self.manager.ipv4['nat']
table.add_rule('float-snat', '-s 10.0.0.1 -j SNAT --to 10.10.10.10'
' -d 10.0.0.1')
table.add_rule('float-snat', '-s 10.0.0.1 -j SNAT --to 10.10.10.10'
' -o eth0')
table.add_rule('PREROUTING', '-d 10.10.10.10 -j DNAT --to 10.0.0.1')
table.add_rule('OUTPUT', '-d 10.10.10.10 -j DNAT --to 10.0.0.1')
table.add_rule('float-snat', '-s 10.0.0.10 -j SNAT --to 10.10.10.11'
' -d 10.0.0.10')
table.add_rule('float-snat', '-s 10.0.0.10 -j SNAT --to 10.10.10.11'
' -o eth0')
table.add_rule('PREROUTING', '-d 10.10.10.11 -j DNAT --to 10.0.0.10')
table.add_rule('OUTPUT', '-d 10.10.10.11 -j DNAT --to 10.0.0.10')
new_lines = self.manager._modify_rules(current_lines, table, 'nat')
self.assertEqual(len(new_lines) - len(current_lines), 8)
regex = '.*\s+%s(/32|\s+|$)'
num_removed = table.remove_rules_regex(regex % '10.10.10.10')
self.assertEqual(num_removed, 4)
new_lines = self.manager._modify_rules(current_lines, table, 'nat')
self.assertEqual(len(new_lines) - len(current_lines), 4)
num_removed = table.remove_rules_regex(regex % '10.10.10.11')
self.assertEqual(num_removed, 4)
new_lines = self.manager._modify_rules(current_lines, table, 'nat')
self.assertEqual(current_lines, new_lines)
def test_nat_rules(self):
current_lines = self.sample_nat
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['nat'],
'nat')
for line in [':%s-OUTPUT - [0:0]' % (self.binary_name),
':%s-float-snat - [0:0]' % (self.binary_name),
':%s-snat - [0:0]' % (self.binary_name),
':%s-PREROUTING - [0:0]' % (self.binary_name),
':%s-POSTROUTING - [0:0]' % (self.binary_name)]:
self.assertIn(line, new_lines, "One of our chains went"
" missing.")
seen_lines = set()
for line in new_lines:
line = line.strip()
self.assertNotIn(line, seen_lines, "Duplicate line: %s" % line)
seen_lines.add(line)
last_postrouting_line = ''
for line in new_lines:
if line.startswith('[0:0] -A POSTROUTING'):
last_postrouting_line = line
self.assertIn('-j nova-postrouting-bottom', last_postrouting_line,
"Last POSTROUTING rule does not jump to "
"nova-postouting-bottom: %s" % last_postrouting_line)
for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
self.assertTrue('[0:0] -A %s -j %s-%s' %
(chain, self.binary_name, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
def test_filter_rules(self):
current_lines = self.sample_filter
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'nat')
for line in [':%s-FORWARD - [0:0]' % (self.binary_name),
':%s-INPUT - [0:0]' % (self.binary_name),
':%s-local - [0:0]' % (self.binary_name),
':%s-OUTPUT - [0:0]' % (self.binary_name)]:
self.assertIn(line, new_lines, "One of our chains went"
" missing.")
seen_lines = set()
for line in new_lines:
line = line.strip()
self.assertNotIn(line, seen_lines, "Duplicate line: %s" % line)
seen_lines.add(line)
for chain in ['FORWARD', 'OUTPUT']:
for line in new_lines:
if line.startswith('[0:0] -A %s' % chain):
self.assertIn('-j nova-filter-top', line,
"First %s rule does not "
"jump to nova-filter-top" % chain)
break
self.assertTrue('[0:0] -A nova-filter-top '
'-j %s-local' % self.binary_name in new_lines,
"nova-filter-top does not jump to wrapped local chain")
for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
self.assertTrue('[0:0] -A %s -j %s-%s' %
(chain, self.binary_name, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
def test_missing_table(self):
current_lines = []
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'filter')
for line in ['*filter',
'COMMIT']:
self.assertIn(line, new_lines, "One of iptables key lines "
"went missing.")
self.assertGreater(len(new_lines), 4, "No iptables rules added")
self.assertTrue("#Generated by nova" == new_lines[0] and
"*filter" == new_lines[1] and
"COMMIT" == new_lines[-2] and
"#Completed by nova" == new_lines[-1],
"iptables rules not generated in the correct order")
def test_iptables_top_order(self):
# Test iptables_top_regex
current_lines = list(self.sample_filter)
current_lines[12:12] = ['[0:0] -A FORWARD -j iptables-top-rule']
self.flags(iptables_top_regex='-j iptables-top-rule')
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'filter')
self.assertEqual(current_lines, new_lines)
def test_iptables_bottom_order(self):
# Test iptables_bottom_regex
current_lines = list(self.sample_filter)
current_lines[26:26] = ['[0:0] -A FORWARD -j iptables-bottom-rule']
self.flags(iptables_bottom_regex='-j iptables-bottom-rule')
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'filter')
self.assertEqual(current_lines, new_lines)
def test_iptables_preserve_order(self):
# Test both iptables_top_regex and iptables_bottom_regex
current_lines = list(self.sample_filter)
current_lines[12:12] = ['[0:0] -A FORWARD -j iptables-top-rule']
current_lines[27:27] = ['[0:0] -A FORWARD -j iptables-bottom-rule']
self.flags(iptables_top_regex='-j iptables-top-rule')
self.flags(iptables_bottom_regex='-j iptables-bottom-rule')
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'filter')
self.assertEqual(current_lines, new_lines)
| apache-2.0 |
cogeorg/BlackRhino | examples/Georg2012/networkx/linalg/tests/test_laplaican.py | 13 | 2686 | from nose import SkipTest
import networkx as nx
from networkx.generators.degree_seq import havel_hakimi_graph
class TestLaplacian(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global numpy
global assert_equal
global assert_almost_equal
try:
import numpy
from numpy.testing import assert_equal,assert_almost_equal
except ImportError:
raise SkipTest('NumPy not available.')
def setUp(self):
deg=[3,2,2,1,0]
self.G=havel_hakimi_graph(deg)
self.WG=nx.Graph( (u,v,{'weight':0.5,'other':0.3})
for (u,v) in self.G.edges_iter() )
self.WG.add_node(4)
self.MG=nx.MultiGraph(self.G)
def test_laplacian(self):
"Graph Laplacian"
NL=numpy.array([[ 3, -1, -1, -1, 0],
[-1, 2, -1, 0, 0],
[-1, -1, 2, 0, 0],
[-1, 0, 0, 1, 0],
[ 0, 0, 0, 0, 0]])
WL=0.5*NL
OL=0.3*NL
assert_equal(nx.laplacian(self.G),NL)
assert_equal(nx.laplacian(self.MG),NL)
assert_equal(nx.laplacian(self.G,nodelist=[0,1]),
numpy.array([[ 1, -1],[-1, 1]]))
assert_equal(nx.laplacian(self.WG),WL)
assert_equal(nx.laplacian(self.WG,weight=None),NL)
assert_equal(nx.laplacian(self.WG,weight='other'),OL)
def test_generalized_laplacian(self):
"Generalized Graph Laplacian"
GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00],
[-0.408, 1.00, -0.50, 0.00 , 0.00],
[-0.408, -0.50, 1.00, 0.00, 0.00],
[-0.577, 0.00, 0.00, 1.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00]])
assert_almost_equal(nx.generalized_laplacian(self.G),GL,decimal=3)
def test_normalized_laplacian(self):
"Generalized Graph Laplacian"
GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00],
[-0.408, 1.00, -0.50, 0.00 , 0.00],
[-0.408, -0.50, 1.00, 0.00, 0.00],
[-0.577, 0.00, 0.00, 1.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00]])
assert_almost_equal(nx.normalized_laplacian(self.G),GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian(self.MG),GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian(self.WG),GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian(self.WG,weight='other'),GL,decimal=3)
| gpl-3.0 |
pas256/troposphere | examples/ElasticsearchDomain.py | 2 | 1680 | # Converted from Elasticsearch Domain example located at:
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#d0e51519
from troposphere import Template, constants
from troposphere.elasticsearch import Domain, EBSOptions, VPCOptions
from troposphere.elasticsearch import ElasticsearchClusterConfig
from troposphere.elasticsearch import SnapshotOptions
templ = Template()
templ.add_description('Elasticsearch Domain example')
es_domain = templ.add_resource(Domain(
'ElasticsearchDomain',
DomainName="ExampleElasticsearchDomain",
ElasticsearchClusterConfig=ElasticsearchClusterConfig(
DedicatedMasterEnabled=True,
InstanceCount=2,
ZoneAwarenessEnabled=True,
InstanceType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterCount=3
),
EBSOptions=EBSOptions(EBSEnabled=True,
Iops=0,
VolumeSize=20,
VolumeType="gp2"),
SnapshotOptions=SnapshotOptions(AutomatedSnapshotStartHour=0),
AccessPolicies={'Version': '2012-10-17',
'Statement': [{
'Effect': 'Allow',
'Principal': {
'AWS': '*'
},
'Action': 'es:*',
'Resource': '*'
}]},
AdvancedOptions={"rest.action.multi.allow_explicit_index": "true"},
VPCOptions=VPCOptions(
SubnetIds=["subnet-4f2bb123"],
SecurityGroupIds=["sg-04cf048c"]
)
))
print(templ.to_json())
| bsd-2-clause |
Evarin/velib-exp | anim.py | 1 | 4785 | #coding:utf8
from get_data import *
from get_weather import *
from visu import *
import numpy as np
import os
import sqlite3
from collections import OrderedDict
from PIL import Image, ImageDraw, ImageFont
from datetime import datetime
from dateutil import tz
from tzlocal import get_localzone
import sys
import StringIO
import locale
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
# Get stations list
def init_conn(file='data.db'):
conn = sqlite3.connect(file)
c = conn.cursor()
stations = OrderedDict({i[0]: i for i in c.execute("SELECT id, latitude, longitude FROM stations").fetchall() if np.abs(i[1]) > 0. and np.abs(i[2]) > 0.})
return (c, stations)
# Browse the DB and generate files from it
def generate_all(c, stations, resolution=600000,
destination="output/time/", start_time=0):
corresp = {k: i for i, k in enumerate(stations.keys())}
status = (np.array([0. for v in stations.values()]),
np.array([0. for v in stations.values()]))
positions = (np.array([v[2] for v in stations.values()]),
np.array([-v[1] for v in stations.values()]))
lasttime = start_time
lastday = -1
sunrise = 0
sunset = 0
fond = Image.open('output/fond.png').convert('RGBA')
for (id, ab, fs, t) in c.execute("SELECT station_id, available_bikes, free_stands, updated FROM stationsstats"):
if lasttime == 0:
lasttime = (t / resolution) * resolution
if t > lasttime + resolution:
bounds, map = build_map(positions, status)
dt = datetime.fromtimestamp((lasttime)/1000, tz=tz.tzutc()).astimezone(tz.tzlocal())
if dt.day != lastday:
wt = retrieve_weather(lasttime/1000)
(hweather, (sunrise, sunset)) = extract_weather(wt)
lastday = dt.day
if destination == "stdout":
f = StringIO.StringIO()
else:
f = os.path.join(destination, "{}.jpg".format(lasttime))
for (tw, icon, temp) in hweather:
if tw > lasttime/1000:
break
weather = (icon, temp)
dsrise = (lasttime/1000-sunrise)/60
dsset = (lasttime/1000-sunset)/60
if np.abs(dsrise) < 30:
luminosity = 0.5 + dsrise/60.
elif np.abs(dsset) < 30:
luminosity = 0.5 - dsset/60.
else:
luminosity = 0. if dsrise < 0 or dsset > 0 else 1.
save_fond_map(map, f, fond=fond,
caption=(dt.strftime("%A %d %B %Y").decode('utf-8'), dt.strftime(u"%Hh%M")),
weather=weather,
luminosity=luminosity)
if destination == "stdout":
print(f.getvalue())
f.close()
lasttime = (t / resolution) * resolution
try:
i = corresp[id]
status[0][i] = fs
status[1][i] = ab
except KeyError:
continue
print("Inexistant station", id)
# Draw the result on a wonderful map and a few other info
weather_icons = {}
def save_fond_map(map, imfile="output/image2.jpg", fond=None, caption=None, weather=None, luminosity=None):
map = map * 255
map[:,:,3] *= 0.7
map = map.astype(np.uint8)
image = Image.fromarray(map)
if fond is None:
fond = Image.open('output/fond.png').convert('RGBA')
if not luminosity is None and luminosity < 1.0:
mm = 0.5 + luminosity/2.
fond = fond.point(lambda p: p*mm)
image = image.resize(fond.size)
image = Image.alpha_composite(fond, image)
w, h = image.size
if not weather is None or not caption is None:
d = ImageDraw.Draw(image)
font = ImageFont.truetype('data/fonts/Raleway-Bold.ttf', 48)
if not caption is None:
(date, time) = caption
ts = d.textsize(date, font)
d.text((w-ts[0]-20, h-ts[1]-20), date, (0,0,0,255), font)
ts = d.textsize(time, font)
d.text((w-ts[0]-20, 20), time, (0,0,0,255), font)
if not weather is None:
(wic, tmp) = weather
d.text((178, 50), u"%d°C" % tmp, (0,0,0,255), font)
global weather_icons
if not wic in weather_icons:
weather_icons[wic] = Image.open('data/icons/{}.png'.format(wic))
icone = weather_icons[wic]
image.paste(icone, box=(94-icone.size[0]/2,84-icone.size[1]/2), mask=icone)
image.save(imfile, 'JPEG')
# Main
if __name__ == '__main__':
(c, stations) = init_conn('/media/evarin/Data/velib.db')
if len(sys.argv) > 1:
destination = sys.argv[1]
else:
destination = "output/time/"
generate_all(c, stations, destination=destination)
exit()
| mit |
vipul-sharma20/oh-mainline | vendor/packages/PyYaml/lib/yaml/resolver.py | 474 | 8972 |
__all__ = ['BaseResolver', 'Resolver']
from error import *
from nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
add_implicit_resolver = classmethod(add_implicit_resolver)
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, basestring) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (basestring, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
add_path_resolver = classmethod(add_path_resolver)
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, basestring):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, basestring):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.yaml_implicit_resolvers.get(u'', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:int',
re.compile(ur'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(ur'^(?:<<)$'),
[u'<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
re.compile(ur'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u''])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:timestamp',
re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(ur'^(?:=)$'),
[u'='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:yaml',
re.compile(ur'^(?:!|&|\*)$'),
list(u'!&*'))
| agpl-3.0 |
ojengwa/odoo | addons/point_of_sale/report/pos_details.py | 72 | 9373 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class pos_details(report_sxw.rml_parse):
def _get_invoice(self, inv_id):
res={}
if inv_id:
self.cr.execute("select number from account_invoice as ac where id = %s", (inv_id,))
res = self.cr.fetchone()
return res[0] or 'Draft'
else:
return ''
def _get_all_users(self):
user_obj = self.pool.get('res.users')
return user_obj.search(self.cr, self.uid, [])
def _pos_sales_details(self, form):
pos_obj = self.pool.get('pos.order')
user_obj = self.pool.get('res.users')
data = []
result = {}
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('user_id','in',user_ids),('state','in',['done','paid','invoiced']),('company_id','=',company_id)])
for pos in pos_obj.browse(self.cr, self.uid, pos_ids):
for pol in pos.lines:
result = {
'code': pol.product_id.default_code,
'name': pol.product_id.name,
'invoice_id': pos.invoice_id.id,
'price_unit': pol.price_unit,
'qty': pol.qty,
'discount': pol.discount,
'total': (pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0)),
'date_order': pos.date_order,
'pos_name': pos.name,
'uom': pol.product_id.uom_id.name
}
data.append(result)
self.total += result['total']
self.qty += result['qty']
self.discount += result['discount']
if data:
return data
else:
return {}
def _get_qty_total_2(self):
return self.qty
def _get_sales_total_2(self):
return self.total
def _get_sum_invoice_2(self, form):
pos_obj = self.pool.get('pos.order')
user_obj = self.pool.get('res.users')
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('user_id','in',user_ids),('company_id','=',company_id),('invoice_id','<>',False)])
for pos in pos_obj.browse(self.cr, self.uid, pos_ids):
for pol in pos.lines:
self.total_invoiced += (pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0))
return self.total_invoiced or False
def _paid_total_2(self):
return self.total or 0.0
def _get_sum_dis_2(self):
return self.discount or 0.0
def _get_sum_discount(self, form):
#code for the sum of discount value
pos_obj = self.pool.get('pos.order')
user_obj = self.pool.get('res.users')
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('user_id','in',user_ids),('company_id','=',company_id)])
for pos in pos_obj.browse(self.cr, self.uid, pos_ids):
for pol in pos.lines:
self.total_discount += ((pol.price_unit * pol.qty) * (pol.discount / 100))
return self.total_discount or False
def _get_payments(self, form):
statement_line_obj = self.pool.get("account.bank.statement.line")
pos_order_obj = self.pool.get("pos.order")
user_ids = form['user_ids'] or self._get_all_users()
company_id = self.pool['res.users'].browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_order_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('state','in',['paid','invoiced','done']),('user_id','in',user_ids), ('company_id', '=', company_id)])
data={}
if pos_ids:
st_line_ids = statement_line_obj.search(self.cr, self.uid, [('pos_statement_id', 'in', pos_ids)])
if st_line_ids:
st_id = statement_line_obj.browse(self.cr, self.uid, st_line_ids)
a_l=[]
for r in st_id:
a_l.append(r['id'])
self.cr.execute("select aj.name,sum(amount) from account_bank_statement_line as absl,account_bank_statement as abs,account_journal as aj " \
"where absl.statement_id = abs.id and abs.journal_id = aj.id and absl.id IN %s " \
"group by aj.name ",(tuple(a_l),))
data = self.cr.dictfetchall()
return data
else:
return {}
def _total_of_the_day(self, objects):
return self.total or 0.00
def _sum_invoice(self, objects):
return reduce(lambda acc, obj:
acc + obj.invoice_id.amount_total,
[o for o in objects if o.invoice_id and o.invoice_id.number],
0.0)
def _ellipsis(self, orig_str, maxlen=100, ellipsis='...'):
maxlen = maxlen - len(ellipsis)
if maxlen <= 0:
maxlen = 1
new_str = orig_str[:maxlen]
return new_str
def _strip_name(self, name, maxlen=50):
return self._ellipsis(name, maxlen, ' ...')
def _get_tax_amount(self, form):
taxes = {}
account_tax_obj = self.pool.get('account.tax')
user_ids = form['user_ids'] or self._get_all_users()
pos_order_obj = self.pool.get('pos.order')
company_id = self.pool['res.users'].browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_order_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('state','in',['paid','invoiced','done']),('user_id','in',user_ids), ('company_id', '=', company_id)])
for order in pos_order_obj.browse(self.cr, self.uid, pos_ids):
for line in order.lines:
line_taxes = account_tax_obj.compute_all(self.cr, self.uid, line.product_id.taxes_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
for tax in line_taxes['taxes']:
taxes.setdefault(tax['id'], {'name': tax['name'], 'amount':0.0})
taxes[tax['id']]['amount'] += tax['amount']
return taxes.values()
def _get_user_names(self, user_ids):
user_obj = self.pool.get('res.users')
return ', '.join(map(lambda x: x.name, user_obj.browse(self.cr, self.uid, user_ids)))
def __init__(self, cr, uid, name, context):
super(pos_details, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.qty = 0.0
self.total_invoiced = 0.0
self.discount = 0.0
self.total_discount = 0.0
self.localcontext.update({
'time': time,
'strip_name': self._strip_name,
'getpayments': self._get_payments,
'getsumdisc': self._get_sum_discount,
'gettotaloftheday': self._total_of_the_day,
'gettaxamount': self._get_tax_amount,
'pos_sales_details':self._pos_sales_details,
'getqtytotal2': self._get_qty_total_2,
'getsalestotal2': self._get_sales_total_2,
'getsuminvoice2':self._get_sum_invoice_2,
'getpaidtotal2': self._paid_total_2,
'getinvoice':self._get_invoice,
'get_user_names': self._get_user_names,
})
class report_pos_details(osv.AbstractModel):
_name = 'report.point_of_sale.report_detailsofsales'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_detailsofsales'
_wrapped_report_class = pos_details
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
erkrishna9/odoo | addons/base_report_designer/wizard/base_report_designer_modify.py | 314 | 6128 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import time
import urllib
from openerp import osv, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class base_report_sxw(osv.osv_memory):
"""Base Report sxw """
_name = 'base.report.sxw'
_columns = {
'report_id': fields.many2one('ir.actions.report.xml', "Report", required=True,domain=[('report_sxw_content','<>',False)],),
}
def get_report(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
data_obj = self.pool['ir.model.data']
id2 = data_obj._get_id(cr, uid, 'base_report_designer', 'view_base_report_file_sxw')
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.report.file.sxw',
'views': [(id2, 'form')],
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
class base_report_file_sxw(osv.osv_memory):
"""Base Report File sxw """
_name = 'base.report.file.sxw'
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(base_report_file_sxw, self).default_get(cr, uid, fields, context=context)
report_id1 = self.pool['base.report.sxw'].search(cr,uid,[])
data = self.pool['base.report.sxw'].read(cr, uid, report_id1, context=context)[0]
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if context is None:
context={}
if 'report_id' in fields:
res['report_id'] = data['report_id']
res['file_sxw'] = base64.encodestring(report.report_sxw_content)
return res
_columns = {
'report_id': fields.many2one('ir.actions.report.xml', "Report", readonly=True),
'file_sxw':fields.binary('Your .SXW file',readonly=True),
'file_sxw_upload':fields.binary('Your .SXW file',required=True)
}
def upload_report(self, cr, uid, ids, context=None):
from base_report_designer import openerp_sxw2rml
import StringIO
data=self.read(cr,uid,ids)[0]
sxwval = StringIO.StringIO(base64.decodestring(data['file_sxw_upload']))
fp = tools.file_open('normalized_oo2rml.xsl',subdir='addons/base_report_designer/openerp_sxw2rml')
newrmlcontent = str(openerp_sxw2rml.sxw2rml(sxwval, xsl=fp.read()))
report = self.pool['ir.actions.report.xml'].write(cr, uid, [data['report_id']], {
'report_sxw_content': base64.decodestring(data['file_sxw_upload']),
'report_rml_content': newrmlcontent
})
cr.commit()
data_obj = self.pool['ir.model.data']
id2 = data_obj._get_id(cr, uid, 'base_report_designer', 'view_base_report_file_rml')
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.report.rml.save',
'views': [(id2, 'form')],
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
class base_report_rml_save(osv.osv_memory):
"""Base Report file Save"""
_name = 'base.report.rml.save'
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(base_report_rml_save, self).default_get(cr, uid, fields, context=context)
report_ids = self.pool['base.report.sxw'].search(cr,uid,[], context=context)
data = self.pool['base.report.file.sxw'].read(cr, uid, report_ids, context=context)[0]
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if 'file_rml' in fields:
res['file_rml'] = base64.encodestring(report.report_rml_content)
return res
_columns = {
'file_rml':fields.binary('Save As'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
benoitsteiner/tensorflow-opencl | tensorflow/python/debug/lib/debug_gradients_test.py | 39 | 15165 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for debug_gradients module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_gradients
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
class IdentifyGradientTest(test_util.TensorFlowTestCase):
def setUp(self):
self.sess = session.Session()
with self.sess:
self.u = variables.Variable(2.0, name="u")
self.v = variables.Variable(3.0, name="v")
self.w = math_ops.multiply(self.u.value(), self.v.value(), name="w")
def tearDown(self):
ops.reset_default_graph()
debug_gradients.clear_gradient_debuggers()
def testIdentifyGradientGivesCorrectTensorObjectWithoutContextManager(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self):
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.identify_gradient(self.w)
with self.assertRaisesRegexp(
ValueError, "The graph already contains an op named .*"):
grad_debugger.identify_gradient(self.w)
def testIdentifyGradientWorksOnMultipleLosses(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
y = math_ops.add(self.w, -1.0, name="y")
debug_y = grad_debugger_1.identify_gradient(y)
z1 = math_ops.square(debug_y, name="z1")
debug_y = grad_debugger_2.identify_gradient(y)
z2 = math_ops.sqrt(debug_y, name="z2")
with grad_debugger_1:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
with grad_debugger_2:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, ops.Tensor)
self.assertIsInstance(dz2_dy, ops.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0 ** 2, self.sess.run(z1))
self.assertAllClose(5.0 ** 0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0 ** -0.5), self.sess.run(dz2_dy))
def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger_1.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
# There are >1 gradient debuggers registered, and grad_debugger is not used
# as a context manager here, so the gradient w.r.t. self.w will not be
# registered.
gradients_impl.gradients(y, [self.u, self.v])
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_1.gradient_tensor(self.w)
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_2.gradient_tensor(self.w)
def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self):
grad_debugger = debug_gradients.GradientsDebugger()
with self.assertRaisesRegexp(
TypeError,
r"x_tensor must be a str or tf\.Tensor or tf\.Variable, but instead "
r"has type .*Operation.*"):
grad_debugger.gradient_tensor(variables.global_variables_initializer())
def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
gradient_descent.GradientDescentOptimizer(0.1).minimize(y)
self.sess.run(variables.global_variables_initializer())
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsWorksOnRefTensor(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "u:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
self.assertAllClose(
3.0, self.sess.run(grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsWorksOnMultipleTensors(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph,
"(u|w):0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
self.assertEqual(2, len(grad_debugger.gradient_tensors()))
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.assertIsInstance(grad_debugger.gradient_tensor("w:0"), ops.Tensor)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(
1.0, self.sess.run(grad_debugger.gradient_tensor("w:0")))
self.assertAllClose(
3.0, self.sess.run(grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsByXTensorsWorks(self):
y = math_ops.add(self.w, -1.0, name="foo/y")
z = math_ops.square(y, name="foo/z")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_x_tensors().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(
self.sess.graph, [self.w, self.u, y]):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.assertEqual(3, len(grad_debugger.gradient_tensors()))
u_grad = grad_debugger.gradient_tensor(self.u)
w_grad = grad_debugger.gradient_tensor(self.w)
y_grad = grad_debugger.gradient_tensor(y)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(10.0, self.sess.run(y_grad))
self.assertAllClose(10.0, self.sess.run(w_grad))
self.assertAllClose(30.0, self.sess.run(u_grad))
def testWatchGradientsByTensorCanWorkOnMultipleLosses(self):
y = math_ops.add(self.w, -1.0, name="y")
z1 = math_ops.square(y, name="z1")
z2 = math_ops.sqrt(y, name="z2")
grad_debugger_1 = debug_gradients.GradientsDebugger()
with grad_debugger_1.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
grad_debugger_2 = debug_gradients.GradientsDebugger()
with grad_debugger_2.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, ops.Tensor)
self.assertIsInstance(dz2_dy, ops.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0 ** 2, self.sess.run(z1))
self.assertAllClose(5.0 ** 0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0 ** -0.5), self.sess.run(dz2_dy))
def testGradientsValuesFromDumpWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
z = math_ops.square(y, name="z")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(
self.sess.graph, [self.w, self.u, y]):
train_op = gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
dump_dir = tempfile.mkdtemp()
debug_url = "file://" + dump_dir
debug_utils.watch_graph(
run_options,
self.sess.graph,
debug_urls=debug_url)
run_metadata = config_pb2.RunMetadata()
self.sess.run(train_op, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
dump_dir, partition_graphs=run_metadata.partition_graphs)
dump.set_python_graph(self.sess.graph)
y_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, y, dump)
self.assertEqual(1, len(y_grad_values))
self.assertAllClose(10.0, y_grad_values[0])
w_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.w, dump)
self.assertEqual(1, len(w_grad_values))
self.assertAllClose(10.0, w_grad_values[0])
u_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.u, dump)
self.assertEqual(1, len(u_grad_values))
self.assertAllClose(30.0, u_grad_values[0])
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "
r"x-tensor v:0"):
debug_gradients.gradient_values_from_dump(grad_debugger, self.v, dump)
# Cleanup.
shutil.rmtree(dump_dir)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
SummerLW/Perf-Insight-Report | third_party/gsutil/third_party/pyasn1/test/type/test_constraint.py | 53 | 8746 | from pyasn1.type import constraint, error
from pyasn1.error import PyAsn1Error
from sys import version_info
if version_info[0:2] < (2, 7) or \
version_info[0:2] in ( (3, 0), (3, 1) ):
try:
import unittest2 as unittest
except ImportError:
import unittest
else:
import unittest
class SingleValueConstraintTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.SingleValueConstraint(1,2)
self.c2 = constraint.SingleValueConstraint(3,4)
def testCmp(self): assert self.c1 == self.c1, 'comparation fails'
def testHash(self): assert hash(self.c1) != hash(self.c2), 'hash() fails'
def testGoodVal(self):
try:
self.c1(1)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ContainedSubtypeConstraintTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ContainedSubtypeConstraint(
constraint.SingleValueConstraint(12)
)
def testGoodVal(self):
try:
self.c1(12)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ValueRangeConstraintTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ValueRangeConstraint(1,4)
def testGoodVal(self):
try:
self.c1(1)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ValueSizeConstraintTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ValueSizeConstraint(1,2)
def testGoodVal(self):
try:
self.c1('a')
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1('abc')
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class PermittedAlphabetConstraintTestCase(SingleValueConstraintTestCase):
def setUp(self):
self.c1 = constraint.PermittedAlphabetConstraint('A', 'B', 'C')
self.c2 = constraint.PermittedAlphabetConstraint('DEF')
def testGoodVal(self):
try:
self.c1('A')
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1('E')
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsIntersectionTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsIntersection(
constraint.SingleValueConstraint(4),
constraint.ValueRangeConstraint(2, 4)
)
def testCmp1(self):
assert constraint.SingleValueConstraint(4) in self.c1, '__cmp__() fails'
def testCmp2(self):
assert constraint.SingleValueConstraint(5) not in self.c1, \
'__cmp__() fails'
def testCmp3(self):
c = constraint.ConstraintsUnion(constraint.ConstraintsIntersection(
constraint.SingleValueConstraint(4),
constraint.ValueRangeConstraint(2, 4)
))
assert self.c1 in c, '__cmp__() fails'
def testCmp4(self):
c = constraint.ConstraintsUnion(
constraint.ConstraintsIntersection(constraint.SingleValueConstraint(5))
)
assert self.c1 not in c, '__cmp__() fails'
def testGoodVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class InnerTypeConstraintTestCase(unittest.TestCase):
def testConst1(self):
c = constraint.InnerTypeConstraint(
constraint.SingleValueConstraint(4)
)
try:
c(4, 32)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
try:
c(5, 32)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
def testConst2(self):
c = constraint.InnerTypeConstraint(
(0, constraint.SingleValueConstraint(4), 'PRESENT'),
(1, constraint.SingleValueConstraint(4), 'ABSENT')
)
try:
c(4, 0)
except error.ValueConstraintError:
raise
assert 0, 'constraint check fails'
try:
c(4, 1)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
try:
c(3, 0)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
# Constraints compositions
class ConstraintsIntersectionTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsIntersection(
constraint.ValueRangeConstraint(1, 9),
constraint.ValueRangeConstraint(2, 5)
)
def testGoodVal(self):
try:
self.c1(3)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(0)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsUnionTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsUnion(
constraint.SingleValueConstraint(5),
constraint.ValueRangeConstraint(1, 3)
)
def testGoodVal(self):
try:
self.c1(2)
self.c1(5)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsExclusionTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsExclusion(
constraint.ValueRangeConstraint(2, 4)
)
def testGoodVal(self):
try:
self.c1(6)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(2)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
# Constraints derivations
class DirectDerivationTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.SingleValueConstraint(5)
self.c2 = constraint.ConstraintsUnion(
self.c1, constraint.ValueRangeConstraint(1, 3)
)
def testGoodVal(self):
assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
assert not self.c1.isSubTypeOf(self.c2) , 'isSubTypeOf failed'
def testBadVal(self):
assert not self.c2.isSuperTypeOf(self.c1) , 'isSuperTypeOf failed'
assert self.c2.isSubTypeOf(self.c1) , 'isSubTypeOf failed'
class IndirectDerivationTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsIntersection(
constraint.ValueRangeConstraint(1, 30)
)
self.c2 = constraint.ConstraintsIntersection(
self.c1, constraint.ValueRangeConstraint(1, 20)
)
self.c2 = constraint.ConstraintsIntersection(
self.c2, constraint.ValueRangeConstraint(1, 10)
)
def testGoodVal(self):
assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
assert not self.c1.isSubTypeOf(self.c2) , 'isSubTypeOf failed'
def testBadVal(self):
assert not self.c2.isSuperTypeOf(self.c1) , 'isSuperTypeOf failed'
assert self.c2.isSubTypeOf(self.c1) , 'isSubTypeOf failed'
if __name__ == '__main__': unittest.main()
# how to apply size constriants to constructed types?
| bsd-3-clause |
SauloAislan/ironic | ironic/tests/unit/drivers/modules/test_agent_base_vendor.py | 1 | 60834 | # Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import types
import mock
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import pxe
from ironic.drivers import utils as driver_utils
from ironic import objects
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as object_utils
CONF = cfg.CONF
INSTANCE_INFO = db_utils.get_test_agent_instance_info()
DRIVER_INFO = db_utils.get_test_agent_driver_info()
DRIVER_INTERNAL_INFO = db_utils.get_test_agent_driver_internal_info()
class AgentDeployMixinBaseTest(db_base.DbTestCase):
def setUp(self):
super(AgentDeployMixinBaseTest, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_agent")
self.deploy = agent_base_vendor.AgentDeployMixin()
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **n)
class HeartbeatMixinTest(AgentDeployMixinBaseTest):
def setUp(self):
super(HeartbeatMixinTest, self).setUp()
self.deploy = agent_base_vendor.HeartbeatMixin()
@mock.patch.object(agent_base_vendor.HeartbeatMixin,
'deploy_has_started', autospec=True)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(agent_base_vendor.HeartbeatMixin, 'deploy_is_done',
autospec=True)
@mock.patch.object(agent_base_vendor.LOG, 'exception', autospec=True)
def test_heartbeat_deploy_done_fails(self, log_mock, done_mock,
failed_mock, deploy_started_mock):
deploy_started_mock.return_value = True
done_mock.side_effect = Exception('LlamaException')
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
self.deploy.heartbeat(task, 'http://127.0.0.1:8080')
failed_mock.assert_called_once_with(
task, mock.ANY, collect_logs=True)
log_mock.assert_called_once_with(
'Asynchronous exception for node '
'1be26c0b-03f2-4d2e-ae87-c02d7f33c123: Failed checking if deploy '
'is done. Exception: LlamaException')
@mock.patch.object(agent_base_vendor.HeartbeatMixin,
'deploy_has_started', autospec=True)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(agent_base_vendor.HeartbeatMixin, 'deploy_is_done',
autospec=True)
@mock.patch.object(agent_base_vendor.LOG, 'exception', autospec=True)
def test_heartbeat_deploy_done_raises_with_event(self, log_mock, done_mock,
failed_mock,
deploy_started_mock):
deploy_started_mock.return_value = True
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
def driver_failure(*args, **kwargs):
# simulate driver failure that both advances the FSM
# and raises an exception
task.node.provision_state = states.DEPLOYFAIL
raise Exception('LlamaException')
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
done_mock.side_effect = driver_failure
self.deploy.heartbeat(task, 'http://127.0.0.1:8080')
# task.node.provision_state being set to DEPLOYFAIL
# within the driver_failue, hearbeat should not call
# deploy_utils.set_failed_state anymore
self.assertFalse(failed_mock.called)
log_mock.assert_called_once_with(
'Asynchronous exception for node '
'1be26c0b-03f2-4d2e-ae87-c02d7f33c123: Failed checking if deploy '
'is done. Exception: LlamaException')
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(agent_base_vendor.HeartbeatMixin,
'refresh_clean_steps', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
def test_heartbeat_resume_clean(self, mock_notify, mock_set_steps,
mock_refresh, mock_touch):
self.node.clean_step = {}
self.node.provision_state = states.CLEANWAIT
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.deploy.heartbeat(task, 'http://127.0.0.1:8080')
mock_touch.assert_called_once_with(mock.ANY)
mock_refresh.assert_called_once_with(mock.ANY, task)
mock_notify.assert_called_once_with(task)
mock_set_steps.assert_called_once_with(task)
@mock.patch.object(manager_utils, 'cleaning_error_handler')
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(agent_base_vendor.HeartbeatMixin,
'refresh_clean_steps', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
def test_heartbeat_resume_clean_fails(self, mock_notify, mock_set_steps,
mock_refresh, mock_touch,
mock_handler):
mocks = [mock_refresh, mock_set_steps, mock_notify]
self.node.clean_step = {}
self.node.provision_state = states.CLEANWAIT
self.node.save()
for i in range(len(mocks)):
before_failed_mocks = mocks[:i]
failed_mock = mocks[i]
after_failed_mocks = mocks[i + 1:]
failed_mock.side_effect = Exception()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.deploy.heartbeat(task, 'http://127.0.0.1:8080')
mock_touch.assert_called_once_with(mock.ANY)
mock_handler.assert_called_once_with(task, mock.ANY)
for called in before_failed_mocks + [failed_mock]:
self.assertTrue(called.called)
for not_called in after_failed_mocks:
self.assertFalse(not_called.called)
# Reset mocks for the next interaction
for m in mocks + [mock_touch, mock_handler]:
m.reset_mock()
failed_mock.side_effect = None
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(agent_base_vendor.HeartbeatMixin,
'continue_cleaning', autospec=True)
def test_heartbeat_continue_cleaning(self, mock_continue, mock_touch):
self.node.clean_step = {
'priority': 10,
'interface': 'deploy',
'step': 'foo',
'reboot_requested': False
}
self.node.provision_state = states.CLEANWAIT
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.deploy.heartbeat(task, 'http://127.0.0.1:8080')
mock_touch.assert_called_once_with(mock.ANY)
mock_continue.assert_called_once_with(mock.ANY, task)
@mock.patch.object(manager_utils, 'cleaning_error_handler')
@mock.patch.object(agent_base_vendor.HeartbeatMixin,
'continue_cleaning', autospec=True)
def test_heartbeat_continue_cleaning_fails(self, mock_continue,
mock_handler):
self.node.clean_step = {
'priority': 10,
'interface': 'deploy',
'step': 'foo',
'reboot_requested': False
}
mock_continue.side_effect = Exception()
self.node.provision_state = states.CLEANWAIT
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.deploy.heartbeat(task, 'http://127.0.0.1:8080')
mock_continue.assert_called_once_with(mock.ANY, task)
mock_handler.assert_called_once_with(task, mock.ANY)
@mock.patch.object(agent_base_vendor.HeartbeatMixin, 'continue_deploy',
autospec=True)
@mock.patch.object(agent_base_vendor.HeartbeatMixin,
'reboot_to_instance', autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
def test_heartbeat_no_agent_last_heartbeat(self, ncrc_mock, rti_mock,
cd_mock):
"""node.driver_internal_info doesn't have 'agent_last_heartbeat'."""
node = self.node
node.maintenance = True
node.provision_state = states.AVAILABLE
driver_internal_info = {'agent_last_heartbeat': 'time'}
node.driver_internal_info = driver_internal_info
node.save()
with task_manager.acquire(
self.context, node['uuid'], shared=False) as task:
self.deploy.heartbeat(task, 'http://127.0.0.1:8080')
self.assertEqual(0, ncrc_mock.call_count)
self.assertEqual(0, rti_mock.call_count)
self.assertEqual(0, cd_mock.call_count)
node.refresh()
self.assertNotIn('agent_last_heartbeat', node.driver_internal_info)
@mock.patch.object(agent_base_vendor.HeartbeatMixin, 'continue_deploy',
autospec=True)
@mock.patch.object(agent_base_vendor.HeartbeatMixin,
'reboot_to_instance', autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
def test_heartbeat_noops_maintenance_mode(self, ncrc_mock, rti_mock,
cd_mock):
"""Ensures that heartbeat() no-ops for a maintenance node."""
self.node.maintenance = True
for state in (states.AVAILABLE, states.DEPLOYWAIT, states.DEPLOYING,
states.CLEANING):
self.node.provision_state = state
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.deploy.heartbeat(task, 'http://127.0.0.1:8080')
self.assertEqual(0, ncrc_mock.call_count)
self.assertEqual(0, rti_mock.call_count)
self.assertEqual(0, cd_mock.call_count)
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(agent_base_vendor.HeartbeatMixin,
'deploy_has_started', autospec=True)
def test_heartbeat_touch_provisioning(self, mock_deploy_started,
mock_touch):
mock_deploy_started.return_value = True
self.node.provision_state = states.DEPLOYWAIT
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.deploy.heartbeat(task, 'http://127.0.0.1:8080')
mock_touch.assert_called_once_with(mock.ANY)
class AgentDeployMixinTest(AgentDeployMixinBaseTest):
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
def test_reboot_and_finish_deploy(
self, power_off_mock, get_power_state_mock,
node_power_action_mock, mock_collect):
cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent')
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
get_power_state_mock.side_effect = [states.POWER_ON,
states.POWER_OFF]
self.deploy.reboot_and_finish_deploy(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(2, get_power_state_mock.call_count)
node_power_action_mock.assert_called_once_with(
task, states.POWER_ON)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
mock_collect.assert_called_once_with(task.node)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'remove_provisioning_network', spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'configure_tenant_networks', spec_set=True, autospec=True)
def test_reboot_and_finish_deploy_soft_poweroff_doesnt_complete(
self, configure_tenant_net_mock, remove_provisioning_net_mock,
power_off_mock, get_power_state_mock,
node_power_action_mock, mock_collect):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
get_power_state_mock.return_value = states.POWER_ON
self.deploy.reboot_and_finish_deploy(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
node_power_action_mock.assert_has_calls([
mock.call(task, states.POWER_OFF),
mock.call(task, states.POWER_ON)])
remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
task)
configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'remove_provisioning_network', spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'configure_tenant_networks', spec_set=True, autospec=True)
def test_reboot_and_finish_deploy_soft_poweroff_fails(
self, configure_tenant_net_mock, remove_provisioning_net_mock,
power_off_mock, node_power_action_mock, mock_collect):
power_off_mock.side_effect = RuntimeError("boom")
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.deploy.reboot_and_finish_deploy(task)
power_off_mock.assert_called_once_with(task.node)
node_power_action_mock.assert_has_calls([
mock.call(task, states.POWER_OFF),
mock.call(task, states.POWER_ON)])
remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
task)
configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'remove_provisioning_network', spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
'configure_tenant_networks', spec_set=True, autospec=True)
def test_reboot_and_finish_deploy_get_power_state_fails(
self, configure_tenant_net_mock, remove_provisioning_net_mock,
power_off_mock, get_power_state_mock, node_power_action_mock,
mock_collect):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
get_power_state_mock.side_effect = RuntimeError("boom")
self.deploy.reboot_and_finish_deploy(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
node_power_action_mock.assert_has_calls([
mock.call(task, states.POWER_OFF),
mock.call(task, states.POWER_ON)])
remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
task)
configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch('ironic.drivers.modules.network.neutron.NeutronNetwork.'
'remove_provisioning_network', spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.network.neutron.NeutronNetwork.'
'configure_tenant_networks', spec_set=True, autospec=True)
def test_reboot_and_finish_deploy_configure_tenant_network_exception(
self, configure_tenant_net_mock, remove_provisioning_net_mock,
power_off_mock, get_power_state_mock, node_power_action_mock,
mock_collect):
self.node.network_interface = 'neutron'
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
configure_tenant_net_mock.side_effect = exception.NetworkError(
"boom")
self.assertRaises(exception.InstanceDeployFailure,
self.deploy.reboot_and_finish_deploy, task)
self.assertEqual(7, get_power_state_mock.call_count)
remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
task)
configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
mock_collect.assert_called_once_with(task.node)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
def test_reboot_and_finish_deploy_power_action_fails(
self, power_off_mock, get_power_state_mock,
node_power_action_mock, mock_collect):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
get_power_state_mock.return_value = states.POWER_ON
node_power_action_mock.side_effect = RuntimeError("boom")
self.assertRaises(exception.InstanceDeployFailure,
self.deploy.reboot_and_finish_deploy,
task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
node_power_action_mock.assert_has_calls([
mock.call(task, states.POWER_OFF)])
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
mock_collect.assert_called_once_with(task.node)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'sync',
spec=types.FunctionType)
def test_reboot_and_finish_deploy_power_action_oob_power_off(
self, sync_mock, node_power_action_mock, mock_collect):
# Enable force power off
driver_info = self.node.driver_info
driver_info['deploy_forces_oob_reboot'] = True
self.node.driver_info = driver_info
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.deploy.reboot_and_finish_deploy(task)
sync_mock.assert_called_once_with(task.node)
node_power_action_mock.assert_has_calls([
mock.call(task, states.POWER_OFF),
mock.call(task, states.POWER_ON),
])
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(agent_base_vendor.LOG, 'warning', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'sync',
spec=types.FunctionType)
def test_reboot_and_finish_deploy_power_action_oob_power_off_failed(
self, sync_mock, node_power_action_mock, log_mock, mock_collect):
# Enable force power off
driver_info = self.node.driver_info
driver_info['deploy_forces_oob_reboot'] = True
self.node.driver_info = driver_info
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
sync_mock.return_value = {'faultstring': 'Unknown command: blah'}
self.deploy.reboot_and_finish_deploy(task)
sync_mock.assert_called_once_with(task.node)
node_power_action_mock.assert_has_calls([
mock.call(task, states.POWER_OFF),
mock.call(task, states.POWER_ON),
])
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
log_error = ('The version of the IPA ramdisk used in the '
'deployment do not support the command "sync"')
log_mock.assert_called_once_with(
'Failed to flush the file system prior to hard rebooting the '
'node %(node)s. Error: %(error)s',
{'node': task.node.uuid, 'error': log_error})
self.assertFalse(mock_collect.called)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
def test_configure_local_boot(self, try_set_boot_device_mock,
install_bootloader_mock):
install_bootloader_mock.return_value = {
'command_status': 'SUCCESS', 'command_error': None}
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.deploy.configure_local_boot(task, root_uuid='some-root-uuid')
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
install_bootloader_mock.assert_called_once_with(
mock.ANY, task.node, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
def test_configure_local_boot_uefi(self, try_set_boot_device_mock,
install_bootloader_mock):
install_bootloader_mock.return_value = {
'command_status': 'SUCCESS', 'command_error': None}
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.deploy.configure_local_boot(
task, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-system-part-uuid')
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
install_bootloader_mock.assert_called_once_with(
mock.ANY, task.node, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-system-part-uuid')
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
def test_configure_local_boot_whole_disk_image(
self, install_bootloader_mock, try_set_boot_device_mock):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.configure_local_boot(task)
self.assertFalse(install_bootloader_mock.called)
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
def test_configure_local_boot_no_root_uuid(
self, install_bootloader_mock, try_set_boot_device_mock):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.deploy.configure_local_boot(task)
self.assertFalse(install_bootloader_mock.called)
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
@mock.patch.object(agent_client.AgentClient, 'collect_system_logs',
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
def test_configure_local_boot_boot_loader_install_fail(
self, install_bootloader_mock, collect_logs_mock):
install_bootloader_mock.return_value = {
'command_status': 'FAILED', 'command_error': 'boom'}
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InstanceDeployFailure,
self.deploy.configure_local_boot,
task, root_uuid='some-root-uuid')
install_bootloader_mock.assert_called_once_with(
mock.ANY, task.node, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
collect_logs_mock.assert_called_once_with(mock.ANY, task.node)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
@mock.patch.object(agent_client.AgentClient, 'collect_system_logs',
autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
def test_configure_local_boot_set_boot_device_fail(
self, install_bootloader_mock, try_set_boot_device_mock,
collect_logs_mock):
install_bootloader_mock.return_value = {
'command_status': 'SUCCESS', 'command_error': None}
try_set_boot_device_mock.side_effect = RuntimeError('error')
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InstanceDeployFailure,
self.deploy.configure_local_boot,
task, root_uuid='some-root-uuid')
install_bootloader_mock.assert_called_once_with(
mock.ANY, task.node, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
collect_logs_mock.assert_called_once_with(mock.ANY, task.node)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
@mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
@mock.patch.object(agent_base_vendor.AgentDeployMixin,
'configure_local_boot', autospec=True)
def test_prepare_instance_to_boot_netboot(self, configure_mock,
boot_option_mock,
prepare_instance_mock,
failed_state_mock):
boot_option_mock.return_value = 'netboot'
prepare_instance_mock.return_value = None
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
root_uuid = 'root_uuid'
efi_system_part_uuid = 'efi_sys_uuid'
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.prepare_instance_to_boot(task, root_uuid,
efi_system_part_uuid)
self.assertFalse(configure_mock.called)
boot_option_mock.assert_called_once_with(task.node)
prepare_instance_mock.assert_called_once_with(task.driver.boot,
task)
self.assertFalse(failed_state_mock.called)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
@mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
@mock.patch.object(agent_base_vendor.AgentDeployMixin,
'configure_local_boot', autospec=True)
def test_prepare_instance_to_boot_localboot(self, configure_mock,
boot_option_mock,
prepare_instance_mock,
failed_state_mock):
boot_option_mock.return_value = 'local'
prepare_instance_mock.return_value = None
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
root_uuid = 'root_uuid'
efi_system_part_uuid = 'efi_sys_uuid'
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.prepare_instance_to_boot(task, root_uuid,
efi_system_part_uuid)
configure_mock.assert_called_once_with(
self.deploy, task,
root_uuid=root_uuid,
efi_system_part_uuid=efi_system_part_uuid)
boot_option_mock.assert_called_once_with(task.node)
prepare_instance_mock.assert_called_once_with(task.driver.boot,
task)
self.assertFalse(failed_state_mock.called)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
@mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
@mock.patch.object(agent_base_vendor.AgentDeployMixin,
'configure_local_boot', autospec=True)
def test_prepare_instance_to_boot_configure_fails(self, configure_mock,
boot_option_mock,
prepare_mock,
failed_state_mock):
boot_option_mock.return_value = 'local'
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
root_uuid = 'root_uuid'
efi_system_part_uuid = 'efi_sys_uuid'
reason = 'reason'
configure_mock.side_effect = (
exception.InstanceDeployFailure(reason=reason))
prepare_mock.side_effect = (
exception.InstanceDeployFailure(reason=reason))
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.assertRaises(exception.InstanceDeployFailure,
self.deploy.prepare_instance_to_boot, task,
root_uuid, efi_system_part_uuid)
configure_mock.assert_called_once_with(
self.deploy, task,
root_uuid=root_uuid,
efi_system_part_uuid=efi_system_part_uuid)
boot_option_mock.assert_called_once_with(task.node)
self.assertFalse(prepare_mock.called)
self.assertFalse(failed_state_mock.called)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning(self, status_mock, notify_mock):
# Test a successful execute clean step on the agent
self.node.clean_step = {
'priority': 10,
'interface': 'deploy',
'step': 'erase_devices',
'reboot_requested': False
}
self.node.save()
status_mock.return_value = [{
'command_status': 'SUCCEEDED',
'command_name': 'execute_clean_step',
'command_result': {
'clean_step': self.node.clean_step
}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.continue_cleaning(task)
notify_mock.assert_called_once_with(task)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
def test__cleaning_reboot(self, mock_reboot, mock_prepare, mock_build_opt):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
agent_base_vendor._cleaning_reboot(task)
self.assertTrue(mock_build_opt.called)
self.assertTrue(mock_prepare.called)
mock_reboot.assert_called_once_with(task, states.REBOOT)
self.assertTrue(task.node.driver_internal_info['cleaning_reboot'])
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
def test__cleaning_reboot_fail(self, mock_reboot, mock_handler,
mock_prepare, mock_build_opt):
mock_reboot.side_effect = RuntimeError("broken")
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
agent_base_vendor._cleaning_reboot(task)
mock_reboot.assert_called_once_with(task, states.REBOOT)
mock_handler.assert_called_once_with(task, mock.ANY)
self.assertNotIn('cleaning_reboot',
task.node.driver_internal_info)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_reboot(
self, status_mock, reboot_mock, mock_prepare, mock_build_opt):
# Test a successful execute clean step on the agent, with reboot
self.node.clean_step = {
'priority': 42,
'interface': 'deploy',
'step': 'reboot_me_afterwards',
'reboot_requested': True
}
self.node.save()
status_mock.return_value = [{
'command_status': 'SUCCEEDED',
'command_name': 'execute_clean_step',
'command_result': {
'clean_step': self.node.clean_step
}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.continue_cleaning(task)
reboot_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_after_reboot(self, status_mock, notify_mock):
# Test a successful execute clean step on the agent, with reboot
self.node.clean_step = {
'priority': 42,
'interface': 'deploy',
'step': 'reboot_me_afterwards',
'reboot_requested': True
}
driver_internal_info = self.node.driver_internal_info
driver_internal_info['cleaning_reboot'] = True
self.node.driver_internal_info = driver_internal_info
self.node.save()
# Represents a freshly booted agent with no commands
status_mock.return_value = []
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.continue_cleaning(task)
notify_mock.assert_called_once_with(task)
self.assertNotIn('cleaning_reboot',
task.node.driver_internal_info)
@mock.patch.object(agent_base_vendor,
'_get_post_clean_step_hook', autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_with_hook(
self, status_mock, notify_mock, get_hook_mock):
self.node.clean_step = {
'priority': 10,
'interface': 'raid',
'step': 'create_configuration',
}
self.node.save()
command_status = {
'command_status': 'SUCCEEDED',
'command_name': 'execute_clean_step',
'command_result': {'clean_step': self.node.clean_step}}
status_mock.return_value = [command_status]
hook_mock = mock.MagicMock(spec=types.FunctionType, __name__='foo')
get_hook_mock.return_value = hook_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.deploy.continue_cleaning(task)
get_hook_mock.assert_called_once_with(task.node)
hook_mock.assert_called_once_with(task, command_status)
notify_mock.assert_called_once_with(task)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(agent_base_vendor,
'_get_post_clean_step_hook', autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_with_hook_fails(
self, status_mock, error_handler_mock, get_hook_mock,
notify_mock):
self.node.clean_step = {
'priority': 10,
'interface': 'raid',
'step': 'create_configuration',
}
self.node.save()
command_status = {
'command_status': 'SUCCEEDED',
'command_name': 'execute_clean_step',
'command_result': {'clean_step': self.node.clean_step}}
status_mock.return_value = [command_status]
hook_mock = mock.MagicMock(spec=types.FunctionType, __name__='foo')
hook_mock.side_effect = RuntimeError('error')
get_hook_mock.return_value = hook_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.deploy.continue_cleaning(task)
get_hook_mock.assert_called_once_with(task.node)
hook_mock.assert_called_once_with(task, command_status)
error_handler_mock.assert_called_once_with(task, mock.ANY)
self.assertFalse(notify_mock.called)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_old_command(self, status_mock, notify_mock):
# Test when a second execute_clean_step happens to the agent, but
# the new step hasn't started yet.
self.node.clean_step = {
'priority': 10,
'interface': 'deploy',
'step': 'erase_devices',
'reboot_requested': False
}
self.node.save()
status_mock.return_value = [{
'command_status': 'SUCCEEDED',
'command_name': 'execute_clean_step',
'command_result': {
'priority': 20,
'interface': 'deploy',
'step': 'update_firmware',
'reboot_requested': False
}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.continue_cleaning(task)
self.assertFalse(notify_mock.called)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_running(self, status_mock, notify_mock):
# Test that no action is taken while a clean step is executing
status_mock.return_value = [{
'command_status': 'RUNNING',
'command_name': 'execute_clean_step',
'command_result': None
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.continue_cleaning(task)
self.assertFalse(notify_mock.called)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_fail(self, status_mock, error_mock):
# Test the a failure puts the node in CLEANFAIL
status_mock.return_value = [{
'command_status': 'FAILED',
'command_name': 'execute_clean_step',
'command_result': {}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.continue_cleaning(task)
error_mock.assert_called_once_with(task, mock.ANY)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(agent_base_vendor.AgentDeployMixin,
'refresh_clean_steps', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def _test_continue_cleaning_clean_version_mismatch(
self, status_mock, refresh_steps_mock, notify_mock, steps_mock,
manual=False):
status_mock.return_value = [{
'command_status': 'CLEAN_VERSION_MISMATCH',
'command_name': 'execute_clean_step',
}]
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
self.node.provision_state = states.CLEANWAIT
self.node.target_provision_state = tgt_prov_state
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.deploy.continue_cleaning(task)
notify_mock.assert_called_once_with(task)
refresh_steps_mock.assert_called_once_with(mock.ANY, task)
if manual:
self.assertFalse(
task.node.driver_internal_info['skip_current_clean_step'])
self.assertFalse(steps_mock.called)
else:
steps_mock.assert_called_once_with(task)
self.assertNotIn('skip_current_clean_step',
task.node.driver_internal_info)
def test_continue_cleaning_automated_clean_version_mismatch(self):
self._test_continue_cleaning_clean_version_mismatch()
def test_continue_cleaning_manual_clean_version_mismatch(self):
self._test_continue_cleaning_clean_version_mismatch(manual=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(agent_base_vendor.AgentDeployMixin,
'refresh_clean_steps', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_clean_version_mismatch_fail(
self, status_mock, refresh_steps_mock, notify_mock, steps_mock,
error_mock, manual=False):
status_mock.return_value = [{
'command_status': 'CLEAN_VERSION_MISMATCH',
'command_name': 'execute_clean_step',
'command_result': {'hardware_manager_version': {'Generic': '1'}}
}]
refresh_steps_mock.side_effect = exception.NodeCleaningFailure("boo")
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
self.node.provision_state = states.CLEANWAIT
self.node.target_provision_state = tgt_prov_state
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.deploy.continue_cleaning(task)
status_mock.assert_called_once_with(mock.ANY, task.node)
refresh_steps_mock.assert_called_once_with(mock.ANY, task)
error_mock.assert_called_once_with(task, mock.ANY)
self.assertFalse(notify_mock.called)
self.assertFalse(steps_mock.called)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_unknown(self, status_mock, error_mock):
# Test that unknown commands are treated as failures
status_mock.return_value = [{
'command_status': 'UNKNOWN',
'command_name': 'execute_clean_step',
'command_result': {}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.deploy.continue_cleaning(task)
error_mock.assert_called_once_with(task, mock.ANY)
def _test_clean_step_hook(self, hook_dict_mock):
"""Helper method for unit tests related to clean step hooks.
This is a helper method for other unit tests related to
clean step hooks. It acceps a mock 'hook_dict_mock' which is
a MagicMock and sets it up to function as a mock dictionary.
After that, it defines a dummy hook_method for two clean steps
raid.create_configuration and raid.delete_configuration.
:param hook_dict_mock: An instance of mock.MagicMock() which
is the mocked value of agent_base_vendor.POST_CLEAN_STEP_HOOKS
:returns: a tuple, where the first item is the hook method created
by this method and second item is the backend dictionary for
the mocked hook_dict_mock
"""
hook_dict = {}
def get(key, default):
return hook_dict.get(key, default)
def getitem(self, key):
return hook_dict[key]
def setdefault(key, default):
if key not in hook_dict:
hook_dict[key] = default
return hook_dict[key]
hook_dict_mock.get = get
hook_dict_mock.__getitem__ = getitem
hook_dict_mock.setdefault = setdefault
some_function_mock = mock.MagicMock()
@agent_base_vendor.post_clean_step_hook(
interface='raid', step='delete_configuration')
@agent_base_vendor.post_clean_step_hook(
interface='raid', step='create_configuration')
def hook_method():
some_function_mock('some-arguments')
return hook_method, hook_dict
@mock.patch.object(agent_base_vendor, 'POST_CLEAN_STEP_HOOKS',
spec_set=dict)
def test_post_clean_step_hook(self, hook_dict_mock):
# This unit test makes sure that hook methods are registered
# properly and entries are made in
# agent_base_vendor.POST_CLEAN_STEP_HOOKS
hook_method, hook_dict = self._test_clean_step_hook(hook_dict_mock)
self.assertEqual(hook_method,
hook_dict['raid']['create_configuration'])
self.assertEqual(hook_method,
hook_dict['raid']['delete_configuration'])
@mock.patch.object(agent_base_vendor, 'POST_CLEAN_STEP_HOOKS',
spec_set=dict)
def test__get_post_clean_step_hook(self, hook_dict_mock):
# Check if agent_base_vendor._get_post_clean_step_hook can get
# clean step for which hook is registered.
hook_method, hook_dict = self._test_clean_step_hook(hook_dict_mock)
self.node.clean_step = {'step': 'create_configuration',
'interface': 'raid'}
self.node.save()
hook_returned = agent_base_vendor._get_post_clean_step_hook(self.node)
self.assertEqual(hook_method, hook_returned)
@mock.patch.object(agent_base_vendor, 'POST_CLEAN_STEP_HOOKS',
spec_set=dict)
def test__get_post_clean_step_hook_no_hook_registered(
self, hook_dict_mock):
# Make sure agent_base_vendor._get_post_clean_step_hook returns
# None when no clean step hook is registered for the clean step.
hook_method, hook_dict = self._test_clean_step_hook(hook_dict_mock)
self.node.clean_step = {'step': 'some-clean-step',
'interface': 'some-other-interface'}
self.node.save()
hook_returned = agent_base_vendor._get_post_clean_step_hook(self.node)
self.assertIsNone(hook_returned)
class TestRefreshCleanSteps(AgentDeployMixinBaseTest):
def setUp(self):
super(TestRefreshCleanSteps, self).setUp()
self.node.driver_internal_info['agent_url'] = 'http://127.0.0.1:9999'
self.ports = [object_utils.create_test_port(self.context,
node_id=self.node.id)]
self.clean_steps = {
'hardware_manager_version': '1',
'clean_steps': {
'GenericHardwareManager': [
{'interface': 'deploy',
'step': 'erase_devices',
'priority': 20},
],
'SpecificHardwareManager': [
{'interface': 'deploy',
'step': 'update_firmware',
'priority': 30},
{'interface': 'raid',
'step': 'create_configuration',
'priority': 10},
]
}
}
@mock.patch.object(agent_client.AgentClient, 'get_clean_steps',
autospec=True)
def test_refresh_clean_steps(self, client_mock):
client_mock.return_value = {
'command_result': self.clean_steps}
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.deploy.refresh_clean_steps(task)
client_mock.assert_called_once_with(mock.ANY, task.node,
task.ports)
self.assertEqual('1', task.node.driver_internal_info[
'hardware_manager_version'])
self.assertIn('agent_cached_clean_steps_refreshed',
task.node.driver_internal_info)
steps = task.node.driver_internal_info['agent_cached_clean_steps']
# Since steps are returned in dicts, they have non-deterministic
# ordering
self.assertEqual(2, len(steps))
self.assertIn(self.clean_steps['clean_steps'][
'GenericHardwareManager'][0], steps['deploy'])
self.assertIn(self.clean_steps['clean_steps'][
'SpecificHardwareManager'][0], steps['deploy'])
self.assertEqual([self.clean_steps['clean_steps'][
'SpecificHardwareManager'][1]], steps['raid'])
@mock.patch.object(agent_client.AgentClient, 'get_clean_steps',
autospec=True)
def test_refresh_clean_steps_missing_steps(self, client_mock):
del self.clean_steps['clean_steps']
client_mock.return_value = {
'command_result': self.clean_steps}
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaisesRegex(exception.NodeCleaningFailure,
'invalid result',
self.deploy.refresh_clean_steps,
task)
client_mock.assert_called_once_with(mock.ANY, task.node,
task.ports)
@mock.patch.object(agent_client.AgentClient, 'get_clean_steps',
autospec=True)
def test_refresh_clean_steps_missing_interface(self, client_mock):
step = self.clean_steps['clean_steps']['SpecificHardwareManager'][1]
del step['interface']
client_mock.return_value = {
'command_result': self.clean_steps}
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaisesRegex(exception.NodeCleaningFailure,
'invalid clean step',
self.deploy.refresh_clean_steps,
task)
client_mock.assert_called_once_with(mock.ANY, task.node,
task.ports)
| apache-2.0 |
shanemcd/ansible | lib/ansible/modules/network/a10/a10_server_axapi3.py | 27 | 9017 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Mischa Peters <mpeters@a10networks.com>
# (c) 2016, Eric Chou <ericc@a10networks.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: a10_server_axapi3
version_added: 2.3
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
description:
- Manage SLB (Server Load Balancer) server objects on A10 Networks devices via aXAPIv3.
author: "Eric Chou (@ericchou) based on previous work by Mischa Peters (@mischapeters)"
extends_documentation_fragment: a10
options:
server_name:
description:
- The SLB (Server Load Balancer) server name.
required: true
aliases: ['server']
server_ip:
description:
- The SLB (Server Load Balancer) server IPv4 address.
required: true
aliases: ['ip', 'address']
server_status:
description:
- The SLB (Server Load Balancer) virtual server status.
required: false
default: enable
aliases: ['action']
choices: ['enable', 'disable']
server_ports:
description:
- A list of ports to create for the server. Each list item should be a dictionary which specifies the C(port:)
and C(protocol:).
required: false
default: null
operation:
description:
- Create, Update or Remove SLB server. For create and update operation, we use the IP address and server
name specified in the POST message. For delete operation, we use the server name in the request URI.
required: false
default: create
choices: ['create', 'update', 'remove']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
RETURN = '''
#
'''
EXAMPLES = '''
# Create a new server
- a10_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
server: test
server_ip: 1.1.1.100
validate_certs: false
server_status: enable
write_config: yes
operation: create
server_ports:
- port-number: 8080
protocol: tcp
action: enable
- port-number: 8443
protocol: TCP
'''
import json
from ansible.module_utils.a10 import axapi_call_v3, a10_argument_spec, axapi_authenticate_v3, axapi_failure
from ansible.module_utils.a10 import AXAPI_PORT_PROTOCOLS
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
VALID_PORT_FIELDS = ['port-number', 'protocol', 'action']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port-number' in item:
try:
item['port-number'] = int(item['port-number'])
except:
module.fail_json(msg="port-number entries in the port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port-number field")
# validate the port protocol is present, no need to convert to the internal API integer value in v3
if 'protocol' in item:
protocol = item['protocol']
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS))
# 'status' is 'action' in AXAPIv3
# no need to convert the status, a.k.a action, to the internal API integer value in v3
# action is either enabled or disabled
if 'action' in item:
action = item['action']
if action not in ['enable', 'disable']:
module.fail_json(msg="server action must be enable or disable")
else:
item['action'] = 'enable'
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
operation=dict(type='str', default='create', choices=['create', 'update', 'delete']),
server_name=dict(type='str', aliases=['server'], required=True),
server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
server_status=dict(type='str', default='enable', aliases=['action'], choices=['enable', 'disable']),
server_ports=dict(type='list', aliases=['port'], default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
operation = module.params['operation']
write_config = module.params['write_config']
slb_server = module.params['server_name']
slb_server_ip = module.params['server_ip']
slb_server_status = module.params['server_status']
slb_server_ports = module.params['server_ports']
axapi_base_url = 'https://{}/axapi/v3/'.format(host)
axapi_auth_url = axapi_base_url + 'auth/'
signature = axapi_authenticate_v3(module, axapi_auth_url, username, password)
# validate the ports data structure
validate_ports(module, slb_server_ports)
json_post = {
"server-list": [
{
"name": slb_server,
"host": slb_server_ip
}
]
}
# add optional module parameters
if slb_server_ports:
json_post['server-list'][0]['port-list'] = slb_server_ports
if slb_server_status:
json_post['server-list'][0]['action'] = slb_server_status
slb_server_data = axapi_call_v3(module, axapi_base_url+'slb/server/', method='GET', body='', signature=signature)
# for empty slb server list
if axapi_failure(slb_server_data):
slb_server_exists = False
else:
slb_server_list = [server['name'] for server in slb_server_data['server-list']]
if slb_server in slb_server_list:
slb_server_exists = True
else:
slb_server_exists = False
changed = False
if operation == 'create':
if slb_server_exists is False:
result = axapi_call_v3(module, axapi_base_url+'slb/server/', method='POST', body=json.dumps(json_post), signature=signature)
if axapi_failure(result):
module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg'])
changed = True
else:
module.fail_json(msg="server already exists, use state='update' instead")
changed = False
# if we changed things, get the full info regarding result
if changed:
result = axapi_call_v3(module, axapi_base_url + 'slb/server/' + slb_server, method='GET', body='', signature=signature)
else:
result = slb_server_data
elif operation == 'delete':
if slb_server_exists:
result = axapi_call_v3(module, axapi_base_url + 'slb/server/' + slb_server, method='DELETE', body='', signature=signature)
if axapi_failure(result):
module.fail_json(msg="failed to delete server: %s" % result['response']['err']['msg'])
changed = True
else:
result = dict(msg="the server was not present")
elif operation == 'update':
if slb_server_exists:
result = axapi_call_v3(module, axapi_base_url + 'slb/server/', method='PUT', body=json.dumps(json_post), signature=signature)
if axapi_failure(result):
module.fail_json(msg="failed to update server: %s" % result['response']['err']['msg'])
changed = True
else:
result = dict(msg="the server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call_v3(module, axapi_base_url+'write/memory/', method='POST', body='', signature=signature)
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out gracefully and exit
axapi_call_v3(module, axapi_base_url + 'logoff/', method='POST', body='', signature=signature)
module.exit_json(changed=changed, content=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ujenmr/ansible | lib/ansible/modules/windows/win_rds_cap.py | 10 | 4121 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Subileau (@ksubileau)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_rds_cap
short_description: Manage Connection Authorization Policies (CAP) on a Remote Desktop Gateway server
description:
- Creates, removes and configures a Remote Desktop connection authorization policy (RD CAP).
- A RD CAP allows you to specify the users who can connect to a Remote Desktop Gateway server.
version_added: "2.8"
author:
- Kevin Subileau (@ksubileau)
options:
name:
description:
- Name of the connection authorization policy.
required: yes
state:
description:
- The state of connection authorization policy.
- If C(absent) will ensure the policy is removed.
- If C(present) will ensure the policy is configured and exists.
- If C(enabled) will ensure the policy is configured, exists and enabled.
- If C(disabled) will ensure the policy is configured, exists, but disabled.
choices: [ absent, present, enabled, disabled ]
default: present
auth_method:
description:
- Specifies how the RD Gateway server authenticates users.
- When a new CAP is created, the default value is C(password).
choices: [ password, smartcard, both, none ]
order:
description:
- Evaluation order of the policy.
- The CAP in which I(order) is set to a value of '1' is evaluated first.
- By default, a newly created CAP will take the first position.
- If the given value exceed the total number of existing policies,
the policy will take the last position but the evaluation order
will be capped to this number.
type: int
session_timeout:
description:
- The maximum time, in minutes, that a session can be idle.
- A value of zero disables session timeout.
type: int
session_timeout_action:
description:
- The action the server takes when a session times out.
- 'C(disconnect): disconnect the session.'
- 'C(reauth): silently reauthenticate and reauthorize the session.'
choices: [ disconnect, reauth ]
default: disconnect
idle_timeout:
description:
- Specifies the time interval, in minutes, after which an idle session is disconnected.
- A value of zero disables idle timeout.
type: int
allow_only_sdrts_servers:
description:
- Specifies whether connections are allowed only to Remote Desktop Session Host servers that
enforce Remote Desktop Gateway redirection policy.
type: bool
user_groups:
description:
- A list of user groups that is allowed to connect to the Remote Gateway server.
- Required when a new CAP is created.
type: list
computer_groups:
description:
- A list of computer groups that is allowed to connect to the Remote Gateway server.
type: list
redirect_clipboard:
description:
- Allow clipboard redirection.
type: bool
redirect_drives:
description:
- Allow disk drive redirection.
type: bool
redirect_printers:
description:
- Allow printers redirection.
type: bool
redirect_serial:
description:
- Allow serial port redirection.
type: bool
redirect_pnp:
description:
- Allow Plug and Play devices redirection.
type: bool
requirements:
- Windows Server 2008R2 (6.1) or higher.
- The Windows Feature "RDS-Gateway" must be enabled.
'''
EXAMPLES = r'''
- name: Create a new RDS CAP with a 30 minutes timeout and clipboard redirection enabled
win_rds_cap:
name: My CAP
user_groups:
- BUILTIN\users
session_timeout: 30
session_timeout_action: disconnect
allow_only_sdrts_servers: true
redirect_clipboard: true
redirect_drives: false
redirect_printers: false
redirect_serial: false
redirect_pnp: false
state: enabled
'''
RETURN = r'''
'''
| gpl-3.0 |
angelapper/edx-platform | openedx/core/djangoapps/catalog/management/commands/cache_programs.py | 16 | 4355 | import logging
import sys
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.management import BaseCommand
from openedx.core.djangoapps.catalog.cache import (
PROGRAM_CACHE_KEY_TPL,
SITE_PROGRAM_UUIDS_CACHE_KEY_TPL
)
from openedx.core.djangoapps.catalog.models import CatalogIntegration
from openedx.core.djangoapps.catalog.utils import create_catalog_api_client
logger = logging.getLogger(__name__)
User = get_user_model() # pylint: disable=invalid-name
class Command(BaseCommand):
"""Management command used to cache program data.
This command requests every available program from the discovery
service, writing each to its own cache entry with an indefinite expiration.
It is meant to be run on a scheduled basis and should be the only code
updating these cache entries.
"""
help = "Rebuild the LMS' cache of program data."
def handle(self, *args, **options):
failure = False
logger.info('populate-multitenant-programs switch is ON')
catalog_integration = CatalogIntegration.current()
username = catalog_integration.service_username
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
logger.error(
'Failed to create API client. Service user {username} does not exist.'.format(username=username)
)
raise
programs = {}
for site in Site.objects.all():
site_config = getattr(site, 'configuration', None)
if site_config is None or not site_config.get_value('COURSE_CATALOG_API_URL'):
logger.info('Skipping site {domain}. No configuration.'.format(domain=site.domain))
cache.set(SITE_PROGRAM_UUIDS_CACHE_KEY_TPL.format(domain=site.domain), [], None)
continue
client = create_catalog_api_client(user, site=site)
uuids, program_uuids_failed = self.get_site_program_uuids(client, site)
new_programs, program_details_failed = self.fetch_program_details(client, uuids)
if program_uuids_failed or program_details_failed:
failure = True
programs.update(new_programs)
logger.info('Caching UUIDs for {total} programs for site {site_name}.'.format(
total=len(uuids),
site_name=site.domain,
))
cache.set(SITE_PROGRAM_UUIDS_CACHE_KEY_TPL.format(domain=site.domain), uuids, None)
successful = len(programs)
logger.info('Caching details for {successful} programs.'.format(successful=successful))
cache.set_many(programs, None)
if failure:
# This will fail a Jenkins job running this command, letting site
# operators know that there was a problem.
sys.exit(1)
def get_site_program_uuids(self, client, site):
failure = False
uuids = []
try:
querystring = {
'exclude_utm': 1,
'status': ('active', 'retired'),
'uuids_only': 1,
}
logger.info('Requesting program UUIDs for {domain}.'.format(domain=site.domain))
uuids = client.programs.get(**querystring)
except: # pylint: disable=bare-except
logger.error('Failed to retrieve program UUIDs for site: {domain}.'.format(domain=site.domain))
failure = True
logger.info('Received {total} UUIDs for site {domain}'.format(
total=len(uuids),
domain=site.domain
))
return uuids, failure
def fetch_program_details(self, client, uuids):
programs = {}
failure = False
for uuid in uuids:
try:
cache_key = PROGRAM_CACHE_KEY_TPL.format(uuid=uuid)
logger.info('Requesting details for program {uuid}.'.format(uuid=uuid))
program = client.programs(uuid).get(exclude_utm=1)
programs[cache_key] = program
except: # pylint: disable=bare-except
logger.exception('Failed to retrieve details for program {uuid}.'.format(uuid=uuid))
failure = True
continue
return programs, failure
| agpl-3.0 |
avasenin/mzbench | lib/multipart.py | 8 | 2679 | """Encode multipart form data to upload files via POST.
http://code.activestate.com/recipes/578668-encode-multipart-form-data-for-uploading-files-via/"""
from __future__ import print_function
import mimetypes
import random
import string
_BOUNDARY_CHARS = string.digits + string.ascii_letters
def encode_multipart(fields, files, boundary=None):
r"""Encode dict of form fields and dict of files as multipart/form-data.
Return tuple of (body_string, headers_dict). Each value in files is a dict
with required keys 'filename' and 'content', and optional 'mimetype' (if
not specified, tries to guess mime type or uses 'application/octet-stream').
>>> body, headers = encode_multipart({'FIELD': 'VALUE'},
... {'FILE': {'filename': 'F.TXT', 'content': 'CONTENT'}},
... boundary='BOUNDARY')
>>> print('\n'.join(repr(l) for l in body.split('\r\n')))
'--BOUNDARY'
'Content-Disposition: form-data; name="FIELD"'
''
'VALUE'
'--BOUNDARY'
'Content-Disposition: form-data; name="FILE"; filename="F.TXT"'
'Content-Type: text/plain'
''
'CONTENT'
'--BOUNDARY--'
''
>>> print(sorted(headers.items()))
[('Content-Length', '193'), ('Content-Type', 'multipart/form-data; boundary=BOUNDARY')]
>>> len(body)
193
"""
def escape_quote(s):
return s.replace('"', '\\"')
if boundary is None:
boundary = ''.join(random.choice(_BOUNDARY_CHARS) for i in range(30))
lines = []
for name, value in fields.items():
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"'.format(escape_quote(name)),
'',
str(value),
))
for name, value in files:
filename = value['filename']
if 'mimetype' in value:
mimetype = value['mimetype']
else:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(
escape_quote(name), escape_quote(filename)),
'Content-Type: {0}'.format(mimetype),
'',
value['content'],
))
lines.extend((
'--{0}--'.format(boundary),
'',
))
body = '\r\n'.join(lines)
headers = {
'Content-Type': 'multipart/form-data; boundary={0}'.format(boundary),
'Content-Length': str(len(body)),
}
return (body, headers)
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause |
auduny/home-assistant | homeassistant/components/history_graph/__init__.py | 7 | 2051 | """Support to graphs card in the UI."""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_ENTITIES, CONF_NAME, ATTR_ENTITY_ID
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'history_graph'
CONF_HOURS_TO_SHOW = 'hours_to_show'
CONF_REFRESH = 'refresh'
ATTR_HOURS_TO_SHOW = CONF_HOURS_TO_SHOW
ATTR_REFRESH = CONF_REFRESH
GRAPH_SCHEMA = vol.Schema({
vol.Required(CONF_ENTITIES): cv.entity_ids,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_HOURS_TO_SHOW, default=24): vol.Range(min=1),
vol.Optional(CONF_REFRESH, default=0): vol.Range(min=0),
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: cv.schema_with_slug_keys(GRAPH_SCHEMA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Load graph configurations."""
component = EntityComponent(
_LOGGER, DOMAIN, hass)
graphs = []
for object_id, cfg in config[DOMAIN].items():
name = cfg.get(CONF_NAME, object_id)
graph = HistoryGraphEntity(name, cfg)
graphs.append(graph)
await component.async_add_entities(graphs)
return True
class HistoryGraphEntity(Entity):
"""Representation of a graph entity."""
def __init__(self, name, cfg):
"""Initialize the graph."""
self._name = name
self._hours = cfg[CONF_HOURS_TO_SHOW]
self._refresh = cfg[CONF_REFRESH]
self._entities = cfg[CONF_ENTITIES]
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def state_attributes(self):
"""Return the state attributes."""
attrs = {
ATTR_HOURS_TO_SHOW: self._hours,
ATTR_REFRESH: self._refresh,
ATTR_ENTITY_ID: self._entities,
}
return attrs
| apache-2.0 |
Lh4cKg/MITMf | core/servers/IMAP.py | 24 | 2557 | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import core.responder.settings as settings
import threading
from core.responder.utils import *
from SocketServer import BaseRequestHandler, ThreadingMixIn, TCPServer
from core.responder.packets import IMAPGreeting, IMAPCapability, IMAPCapabilityEnd
class IMAP:
def start(self):
try:
if OsInterfaceIsSupported():
server = ThreadingTCPServer((settings.Config.Bind_To, 143), IMAP4)
else:
server = ThreadingTCPServer(('', 143), IMAP4)
t = threading.Thread(name='IMAP', target=server.serve_forever)
t.setDaemon(True)
t.start()
except Exception as e:
print "Error starting IMAP server: {}".format(e)
print_exc()
class ThreadingTCPServer(ThreadingMixIn, TCPServer):
allow_reuse_address = 1
def server_bind(self):
if OsInterfaceIsSupported():
try:
self.socket.setsockopt(socket.SOL_SOCKET, 25, settings.Config.Bind_To+'\0')
except:
pass
TCPServer.server_bind(self)
# IMAP4 Server class
class IMAP4(BaseRequestHandler):
def handle(self):
try:
self.request.send(str(IMAPGreeting()))
data = self.request.recv(1024)
if data[5:15] == "CAPABILITY":
RequestTag = data[0:4]
self.request.send(str(IMAPCapability()))
self.request.send(str(IMAPCapabilityEnd(Tag=RequestTag)))
data = self.request.recv(1024)
if data[5:10] == "LOGIN":
Credentials = data[10:].strip()
SaveToDb({
'module': 'IMAP',
'type': 'Cleartext',
'client': self.client_address[0],
'user': Credentials[0],
'cleartext': Credentials[1],
'fullhash': Credentials[0]+":"+Credentials[1],
})
## FIXME: Close connection properly
## self.request.send(str(ditchthisconnection()))
## data = self.request.recv(1024)
except Exception:
pass | gpl-3.0 |
ramondelafuente/ansible | test/integration/setup_gce.py | 163 | 1391 | '''
Create GCE resources for use in integration tests.
Takes a prefix as a command-line argument and creates two persistent disks named
${prefix}-base and ${prefix}-extra and a snapshot of the base disk named
${prefix}-snapshot. prefix will be forced to lowercase, to ensure the names are
legal GCE resource names.
'''
import sys
import optparse
import gce_credentials
def parse_args():
parser = optparse.OptionParser(
usage="%s [options] <prefix>" % (sys.argv[0],), description=__doc__)
gce_credentials.add_credentials_options(parser)
parser.add_option("--prefix",
action="store", dest="prefix",
help="String used to prefix GCE resource names (default: %default)")
(opts, args) = parser.parse_args()
gce_credentials.check_required(opts, parser)
if not args:
parser.error("Missing required argument: name prefix")
return (opts, args)
if __name__ == '__main__':
(opts, args) = parse_args()
gce = gce_credentials.get_gce_driver(opts)
prefix = args[0].lower()
try:
base_volume = gce.create_volume(
size=10, name=prefix+'-base', location='us-central1-a')
gce.create_volume_snapshot(base_volume, name=prefix+'-snapshot')
gce.create_volume(
size=10, name=prefix+'-extra', location='us-central1-a')
except KeyboardInterrupt as e:
print("\nExiting on user command.")
| gpl-3.0 |
Udayraj123/dashboard_IITG | Binder/discussions/views.py | 1 | 3712 | from django.shortcuts import render, get_object_or_404, redirect,HttpResponse
from django.http import HttpResponseRedirect
from .models import post
from .forms import post_form
from django.db.models import Q
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from comment.forms import CommentForm
from comment.models import comment
def index(request):
return render(request, "discussions/practise.html")
def post_detail(request, id):
queryset = get_object_or_404(post, id=id)
writer = queryset.writer
# content_type = ContentType.objects.get_for_model(post)
initial_data = {
"content_type": queryset.get_content_type,
"object_id": queryset.id
}
form = CommentForm(request.POST or None, initial=initial_data)
if request.method == "POST":
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
if form.is_valid():
# print(form.cleaned_data)
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get("object_id")
content_data = form.cleaned_data.get("content")
new_comment, created = comment.objects.get_or_create(
user=request.user,
content_type=content_type,
object_id=obj_id,
content=content_data
)
comments = queryset.comments
context = {
"obj": queryset,
"comments": comments,
"comment_form": form,
"writer": writer
}
return render(request, "discussions/post_detail.html", context)
def create_post(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
form = post_form(request.POST or None)
message=""
if form.is_valid():
instance = form.save(commit=False)
instance.writer = request.user
instance.save()
message = "Successfully added the post!"
context = {
"form": form,
"message":message
}
return render(request, "discussions/create_post.html", context)
def edit_post(request, id):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
instance = get_object_or_404(post, id=id)
form = post_form(request.POST or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
# messages.success(request, "Successfully created")
return HttpResponseRedirect(instance.get_url_to_list())
# else:
# messages.error(request, "Unsuccessful ")
context = {
"form": form
}
return render(request, "discussions/create_post.html", context)
# def search_post_list(request):
def post_list(request):
posts = post.objects.all().order_by("-timestamp")
search_query = request.GET.get("q")
if search_query:
posts = posts.filter(Q(title__icontains=search_query) | Q(content__icontains=search_query) | Q(goingat__icontains = search_query) | Q(comingback__icontains = search_query) )
context = {
"post": posts,
}
return render(request, "discussions/post_list.html", context)
def post_delete(request, id):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
var = get_object_or_404(post,id=id)
var.delete()
return redirect("posts:list")
def profile(request):
return render(request, 'discussions/profile.html')
| mit |
eshasharma/mase | python101/code/homophone.py | 14 | 1749 | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from pronounce import read_dictionary
def make_word_dict():
"""Read the words in words.txt and return a dictionary
that contains the words as keys"""
d = dict()
fin = open('words.txt')
for line in fin:
word = line.strip().lower()
d[word] = word
return d
def homophones(a, b, phonetic):
"""Checks if words two can be pronounced the same way.
If either word is not in the pronouncing dictionary, return False
a, b: strings
phonetic: map from words to pronunciation codes
"""
if a not in phonetic or b not in phonetic:
return False
return phonetic[a] == phonetic[b]
def check_word(word, word_dict, phonetic):
"""Checks to see if the word has the following property:
removing the first letter yields a word with the same
pronunciation, and removing the second letter yields a word
with the same pronunciation.
word: string
word_dict: dictionary with words as keys
phonetic: map from words to pronunciation codes
"""
word1 = word[1:]
if word1 not in word_dict:
return False
if not homophones(word, word1, phonetic):
return False
word2 = word[0] + word[2:]
if word2 not in word_dict:
return False
if not homophones(word, word2, phonetic):
return False
return True
if __name__ == '__main__':
phonetic = read_dictionary()
word_dict = make_word_dict()
for word in word_dict:
if check_word(word, word_dict, phonetic):
print word, word[1:], word[0] + word[2:]
| unlicense |
benthomasson/ansible | lib/ansible/modules/windows/win_find.py | 22 | 10385 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ansible, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_find
version_added: "2.3"
short_description: return a list of files based on specific criteria
description:
- Return a list of files based on specified criteria.
- Multiple criteria are AND'd together.
- For non-Windows targets, use the M(find) module instead.
options:
age:
description:
- Select files or folders whose age is equal to or greater than
the specified time. Use a negative age to find files equal to or
less than the specified time. You can choose seconds, minutes,
hours, days or weeks by specifying the first letter of an of
those words (e.g., "2s", "10d", 1w").
required: false
age_stamp:
description:
- Choose the file property against which we compare C(age). The
default attribute we compare with is the last modification time.
required: false
default: mtime
choices: ['atime', 'mtime', 'ctime']
checksum_algorithm:
description:
- Algorithm to determine the checksum of a file. Will throw an error
if the host is unable to use specified algorithm.
required: false
default: sha1
choices: ['md5', 'sha1', 'sha256', 'sha384', 'sha512']
file_type:
description: Type of file to search for
required: false
default: file
choices: ['file', 'directory']
follow:
description:
- Set this to true to follow symlinks in the path. This needs to
be used in conjunction with C(recurse).
required: false
default: false
choices: ['true', 'false']
get_checksum:
description:
- Whether to return a checksum of the file in the return info (default sha1),
use C(checksum_algorithm) to change from the default.
required: false
default: true
choices: ['true', 'false']
hidden:
description: Set this to include hidden files or folders
required: false
default: false
choices: ['true', 'false']
paths:
description:
- List of paths of directories to search for files or folders in.
This can be supplied as a single path or a list of paths.
required: true
patterns:
description:
- One or more (powershell or regex) patterns to compare filenames
with. The type of pattern matching is controlled by C(use_regex)
option. The patterns retrict the list of files or folders to be
returned based on the filenames. For a file to be matched it
only has to match with one pattern in a list provided.
required: false
recurse:
description:
- Will recursively descend into the directory looking for files
or folders
required: false
default: false
choices: ['true', 'false']
size:
description:
- Select files or folders whose size is equal to or greater than
the specified size. Use a negative value to find files equal to
or less than the specified size. You can specify the size with
a suffix of the byte type i.e. kilo = k, mega = m... Size is not
evaluated for symbolic links.
required: false
default: false
use_regex:
description:
- Will set patterns to run as a regex check if true
required: false
default: false
choices: ['true', 'false']
notes:
- For non-Windows targets, use the M(find) module instead.
author: "Jordan Borean (@jborean93)"
'''
EXAMPLES = r'''
# Find files in path
- win_find:
paths: D:\temp
# Find hidden files in path
- win_find:
paths: D:\temp
hidden: True
# Find files in multiple paths
- win_find:
paths: ['C:\temp', 'D:\temp']
# Find files in directory while searching recursively
- win_find:
paths: D:\temp
recurse: True
# Find files in directory while following symlinks
- win_find:
paths: D:\temp
recurse: True
follow: True
# Find files with .log and .out extension using powershell wildcards
- win_find:
paths: D:\temp
patterns: ['*.log', '*.out']
# Find files in path based on regex pattern
- win_find:
paths: D:\temp
patterns: 'out_\d{8}-\d{6}.log'
# Find files older than 1 day
- win_find:
paths: D:\temp
age: 86400
# Find files older than 1 day based on create time
- win_find:
paths: D:\temp
age: 86400
age_stamp: ctime
# Find files older than 1 day with unit syntax
- win_find:
paths: D:\temp
age: 1d
# Find files newer than 1 hour
- win_find:
paths: D:\temp
age: -3600
# Find files newer than 1 hour with unit syntax
- win_find:
paths: D:\temp
age: -1h
# Find files larger than 1MB
- win_find:
paths: D:\temp
size: 1048576
# Find files larger than 1GB with unit syntax
- win_find:
paths: D:\temp
size: 1g
# Find files smaller than 1MB
- win_find:
paths: D:\temp
size: -1048576
# Find files smaller than 1GB with unit syntax
- win_find:
paths: D:\temp
size: -1g
# Find folders/symlinks in multiple paths
- win_find:
paths: ['C:\temp', 'D:\temp']
file_type: directory
# Find files and return SHA256 checksum of files found
- win_find:
paths: C:\temp
get_checksum: True
checksum_algorithm: sha256
# Find files and do not return the checksum
- win_find:
path: C:\temp
get_checksum: False
'''
RETURN = r'''
changed:
description: Whether anything was chagned
returned: always
type: boolean
sample: True
examined:
description: The number of files/folders that was checked
returned: always
type: int
sample: 10
matched:
description: The number of files/folders that match the criteria
returned: always
type: int
sample: 2
files:
description: Information on the files/folders that match the criteria returned as a list of dictionary elements for each file matched
returned: success
type: complex
contains:
attributes:
description: attributes of the file at path in raw form
returned: success, path exists
type: string
sample: "Archive, Hidden"
checksum:
description: The checksum of a file based on checksum_algorithm specified
returned: success, path exists, path is a file, get_checksum == True
type: string
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
creationtime:
description: the create time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
extension:
description: the extension of the file at path
returned: success, path exists, path is a file
type: string
sample: ".ps1"
isarchive:
description: if the path is ready for archiving or not
returned: success, path exists
type: boolean
sample: True
isdir:
description: if the path is a directory or not
returned: success, path exists
type: boolean
sample: True
ishidden:
description: if the path is hidden or not
returned: success, path exists
type: boolean
sample: True
islnk:
description: if the path is a symbolic link or junction or not
returned: success, path exists
type: boolean
sample: True
isreadonly:
description: if the path is read only or not
returned: success, path exists
type: boolean
sample: True
isshared:
description: if the path is shared or not
returned: success, path exists
type: boolean
sample: True
lastaccesstime:
description: the last access time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lastwritetime:
description: the last modification time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lnk_source:
description: the target of the symbolic link, will return null if not a link or the link is broken
return: success, path exists, path is a symbolic link
type: string
sample: C:\temp
owner:
description: the owner of the file
returned: success, path exists
type: string
sample: BUILTIN\Administrators
path:
description: the full absolute path to the file
returned: success, path exists
type: string
sample: BUILTIN\Administrators
sharename:
description: the name of share if folder is shared
returned: success, path exists, path is a directory and isshared == True
type: string
sample: file-share
size:
description: the size in bytes of a file or folder
returned: success, path exists, path is not a link
type: int
sample: 1024
'''
| gpl-3.0 |
sjlehtin/django | tests/model_fields/test_uuid.py | 67 | 6991 | import json
import uuid
from django.core import exceptions, serializers
from django.db import IntegrityError, models
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipUnlessDBFeature,
)
from .models import (
NullableUUIDModel, PrimaryKeyUUIDModel, RelatedToUUIDModel, UUIDGrandchild,
UUIDModel,
)
class TestSaveLoad(TestCase):
def test_uuid_instance(self):
instance = UUIDModel.objects.create(field=uuid.uuid4())
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, instance.field)
def test_str_instance_no_hyphens(self):
UUIDModel.objects.create(field='550e8400e29b41d4a716446655440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_str_instance_hyphens(self):
UUIDModel.objects.create(field='550e8400-e29b-41d4-a716-446655440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_str_instance_bad_hyphens(self):
UUIDModel.objects.create(field='550e84-00-e29b-41d4-a716-4-466-55440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_null_handling(self):
NullableUUIDModel.objects.create(field=None)
loaded = NullableUUIDModel.objects.get()
self.assertIsNone(loaded.field)
def test_pk_validated(self):
with self.assertRaisesMessage(exceptions.ValidationError, 'is not a valid UUID'):
PrimaryKeyUUIDModel.objects.get(pk={})
with self.assertRaisesMessage(exceptions.ValidationError, 'is not a valid UUID'):
PrimaryKeyUUIDModel.objects.get(pk=[])
def test_wrong_value(self):
with self.assertRaisesMessage(exceptions.ValidationError, 'is not a valid UUID'):
UUIDModel.objects.get(field='not-a-uuid')
with self.assertRaisesMessage(exceptions.ValidationError, 'is not a valid UUID'):
UUIDModel.objects.create(field='not-a-uuid')
class TestMethods(SimpleTestCase):
def test_deconstruct(self):
field = models.UUIDField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs, {})
def test_to_python(self):
self.assertIsNone(models.UUIDField().to_python(None))
class TestQuerying(TestCase):
def setUp(self):
self.objs = [
NullableUUIDModel.objects.create(field=uuid.uuid4()),
NullableUUIDModel.objects.create(field='550e8400e29b41d4a716446655440000'),
NullableUUIDModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__exact='550e8400e29b41d4a716446655440000'),
[self.objs[1]]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__isnull=True),
[self.objs[2]]
)
class TestSerialization(SimpleTestCase):
test_data = (
'[{"fields": {"field": "550e8400-e29b-41d4-a716-446655440000"}, '
'"model": "model_fields.uuidmodel", "pk": null}]'
)
nullable_test_data = (
'[{"fields": {"field": null}, '
'"model": "model_fields.nullableuuidmodel", "pk": null}]'
)
def test_dumping(self):
instance = UUIDModel(field=uuid.UUID('550e8400e29b41d4a716446655440000'))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, uuid.UUID('550e8400-e29b-41d4-a716-446655440000'))
def test_nullable_loading(self):
instance = list(serializers.deserialize('json', self.nullable_test_data))[0].object
self.assertIsNone(instance.field)
class TestValidation(SimpleTestCase):
def test_invalid_uuid(self):
field = models.UUIDField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('550e8400', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(cm.exception.message % cm.exception.params, "'550e8400' is not a valid UUID.")
def test_uuid_instance_ok(self):
field = models.UUIDField()
field.clean(uuid.uuid4(), None) # no error
class TestAsPrimaryKey(TestCase):
def test_creation(self):
PrimaryKeyUUIDModel.objects.create()
loaded = PrimaryKeyUUIDModel.objects.get()
self.assertIsInstance(loaded.pk, uuid.UUID)
def test_uuid_pk_on_save(self):
saved = PrimaryKeyUUIDModel.objects.create(id=None)
loaded = PrimaryKeyUUIDModel.objects.get()
self.assertIsNotNone(loaded.id, None)
self.assertEqual(loaded.id, saved.id)
def test_uuid_pk_on_bulk_create(self):
u1 = PrimaryKeyUUIDModel()
u2 = PrimaryKeyUUIDModel(id=None)
PrimaryKeyUUIDModel.objects.bulk_create([u1, u2])
# The two objects were correctly created.
u1_found = PrimaryKeyUUIDModel.objects.filter(id=u1.id).exists()
u2_found = PrimaryKeyUUIDModel.objects.exclude(id=u1.id).exists()
self.assertTrue(u1_found)
self.assertTrue(u2_found)
self.assertEqual(PrimaryKeyUUIDModel.objects.count(), 2)
def test_underlying_field(self):
pk_model = PrimaryKeyUUIDModel.objects.create()
RelatedToUUIDModel.objects.create(uuid_fk=pk_model)
related = RelatedToUUIDModel.objects.get()
self.assertEqual(related.uuid_fk.pk, related.uuid_fk_id)
def test_update_with_related_model_instance(self):
# regression for #24611
u1 = PrimaryKeyUUIDModel.objects.create()
u2 = PrimaryKeyUUIDModel.objects.create()
r = RelatedToUUIDModel.objects.create(uuid_fk=u1)
RelatedToUUIDModel.objects.update(uuid_fk=u2)
r.refresh_from_db()
self.assertEqual(r.uuid_fk, u2)
def test_update_with_related_model_id(self):
u1 = PrimaryKeyUUIDModel.objects.create()
u2 = PrimaryKeyUUIDModel.objects.create()
r = RelatedToUUIDModel.objects.create(uuid_fk=u1)
RelatedToUUIDModel.objects.update(uuid_fk=u2.pk)
r.refresh_from_db()
self.assertEqual(r.uuid_fk, u2)
def test_two_level_foreign_keys(self):
# exercises ForeignKey.get_db_prep_value()
UUIDGrandchild().save()
class TestAsPrimaryKeyTransactionTests(TransactionTestCase):
# Need a TransactionTestCase to avoid deferring FK constraint checking.
available_apps = ['model_fields']
@skipUnlessDBFeature('supports_foreign_keys')
def test_unsaved_fk(self):
u1 = PrimaryKeyUUIDModel()
with self.assertRaises(IntegrityError):
RelatedToUUIDModel.objects.create(uuid_fk=u1)
| bsd-3-clause |
RomainBrault/scikit-learn | examples/plot_isotonic_regression.py | 55 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
matachi/subdownloader | modules/mmpython/__init__.py | 1 | 5445 | #!/usr/bin/python
#if 0
# -----------------------------------------------------------------------
# $Id: __init__.py 382 2004-10-15 09:02:11Z dischi $
# -----------------------------------------------------------------------
# $Log$
# Revision 1.35 2004/10/15 09:02:11 dischi
# add ac3 parser
#
# Revision 1.34 2004/05/20 15:55:08 dischi
# add xml file detection
#
# Revision 1.33 2004/05/02 08:28:20 dischi
# dvd iso support
#
# Revision 1.32 2004/04/18 09:11:36 dischi
# improved lsdvd support
#
# Revision 1.31 2004/04/17 18:38:54 dischi
# add lsdvd parser to avoid problems with our own
#
# Revision 1.30 2004/01/31 12:24:39 dischi
# add basic matroska info
#
# Revision 1.29 2004/01/27 20:27:52 dischi
# remove cache, it does not belong in mmpython
#
# Revision 1.28 2004/01/03 17:44:04 dischi
# catch OSError in case the file is removed file scanning
#
# Revision 1.27 2003/11/24 20:30:17 dischi
# fix again, dvd may fail, but datadir may not
#
# Revision 1.26 2003/11/24 20:29:26 dischi
# resort to let dvd work again
#
# Revision 1.25 2003/11/07 13:58:52 dischi
# extra check for dvd
#
# Revision 1.24 2003/09/22 16:24:58 the_krow
# o added flac
# o try-except block around ioctl since it is not avaiable in all OS
#
# Revision 1.23 2003/09/14 13:50:42 dischi
# make it possible to scan extention based only
#
# Revision 1.22 2003/09/10 18:41:44 dischi
# add USE_NETWORK, maybe there is no network connection
#
# Revision 1.20 2003/08/26 13:16:41 outlyer
# Enabled m4a support
#
# Revision 1.19 2003/07/10 11:17:35 the_krow
# ogminfo is used to parse ogg files
#
# Revision 1.18 2003/07/01 21:07:42 dischi
# switch back to eyed3info
#
# Revision 1.17 2003/06/30 13:17:18 the_krow
# o Refactored mediainfo into factory, synchronizedobject
# o Parsers now register directly at mmpython not at mmpython.mediainfo
# o use mmpython.Factory() instead of mmpython.mediainfo.get_singleton()
# o Bugfix in PNG parser
# o Renamed disc.AudioInfo into disc.AudioDiscInfo
# o Renamed disc.DataInfo into disc.DataDiscInfo
#
# -----------------------------------------------------------------------
# MMPython - Media Metadata for Python
# Copyright (C) 2003 Thomas Schueppel, Dirk Meyer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------
#endif
# Do this stuff before importing the info instances since they
# depend on this function
import factory
from synchronizedobject import SynchronizedObject
_factory = SynchronizedObject(factory.Factory())
#def Factory():
# global _factory
#
# print _factory
# # One-time init
# if _factory == None:
# _factory = SynchronizedObject(factory.Factory())
#
# return _factory
def registertype(mimetype,extensions,type,c):
#f = _factory
_factory.register(mimetype,extensions,type,c)
def gettype(mimetype,extensions):
#f = _factory
return _factory.get(mimetype,extensions)
USE_NETWORK = 1
def parse(filename, ext_only = 0):
"""
parse the file
"""
return _factory.create(filename, ext_only)
# Okay Regular imports and code follow
import sys
import os
import mediainfo
import video.riffinfo
import video.mpeginfo
import video.asfinfo
import video.movinfo
import video.vcdinfo
import video.realinfo
import video.ogminfo
import video.mkvinfo
import misc.xmlinfo
registertype( 'video/asf', ('asf','wmv','wma'), mediainfo.TYPE_AV, video.asfinfo.AsfInfo )
registertype( 'application/mkv', ('mkv', 'mka',), mediainfo.TYPE_AV, video.mkvinfo.MkvInfo )
registertype( 'video/quicktime', ('mov', 'qt'), mediainfo.TYPE_AV, video.movinfo.MovInfo )
registertype( 'video/mpeg', ('mpeg','mpg','mp4', 'ts'), mediainfo.TYPE_AV, video.mpeginfo.MpegInfo )
registertype( 'application/ogg', ('ogm', 'ogg',), mediainfo.TYPE_AV, video.ogminfo.OgmInfo )
registertype( 'video/real', ('rm', 'ra', 'ram'), mediainfo.TYPE_AV, video.realinfo.RealInfo )
registertype( 'video/avi', ('avi',), mediainfo.TYPE_AV, video.riffinfo.RiffInfo )
registertype( 'video/vcd', ('cue',), mediainfo.TYPE_AV, video.vcdinfo.VCDInfo )
#import audio.ogginfo
#import audio.pcminfo
#import audio.m4ainfo
#import audio.ac3info
#import image.jpginfo
#import image.pnginfo
#import image.tiffinfo
#import image.ImageInfo
# import some disc modules (may fail)
#try:
# import disc.discinfo
# import disc.vcdinfo
# import disc.audioinfo
#except ImportError:
# pass
#
# find the best working DVD module
#try:
# import disc.lsdvd
#except ImportError:
# pass
#
#try:
# import disc.dvdinfo
#except ImportError:
# pass
#
# use fallback disc module
#try:
# import disc.datainfo
#except ImportError:
# pass
#import audio.eyed3info
#import audio.mp3info
#import audio.webradioinfo
#import audio.flacinfo
| gpl-3.0 |
mdworks2016/work_development | Python/20_Third_Certification/venv/lib/python3.7/site-packages/celery/app/builtins.py | 1 | 6719 | # -*- coding: utf-8 -*-
"""Built-in Tasks.
The built-in tasks are always available in all app instances.
"""
from __future__ import absolute_import, unicode_literals
from celery._state import connect_on_app_finalize
from celery.utils.log import get_logger
__all__ = ()
logger = get_logger(__name__)
@connect_on_app_finalize
def add_backend_cleanup_task(app):
"""Task used to clean up expired results.
If the configured backend requires periodic cleanup this task is also
automatically configured to run every day at 4am (requires
:program:`celery beat` to be running).
"""
@app.task(name='celery.backend_cleanup', shared=False, lazy=False)
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup
@connect_on_app_finalize
def add_accumulate_task(app):
"""Task used by Task.replace when replacing task with group."""
@app.task(bind=True, name='celery.accumulate', shared=False, lazy=False)
def accumulate(self, *args, **kwargs):
index = kwargs.get('index')
return args[index] if index is not None else args
return accumulate
@connect_on_app_finalize
def add_unlock_chord_task(app):
"""Task used by result backends without native chord support.
Will joins chord by creating a task chain polling the header
for completion.
"""
from celery.canvas import maybe_signature
from celery.exceptions import ChordError
from celery.result import allow_join_result, result_from_tuple
@app.task(name='celery.chord_unlock', max_retries=None, shared=False,
default_retry_delay=1, ignore_result=True, lazy=False, bind=True)
def unlock_chord(self, group_id, callback, interval=None,
max_retries=None, result=None,
Result=app.AsyncResult, GroupResult=app.GroupResult,
result_from_tuple=result_from_tuple, **kwargs):
if interval is None:
interval = self.default_retry_delay
# check if the task group is ready, and if so apply the callback.
callback = maybe_signature(callback, app)
deps = GroupResult(
group_id,
[result_from_tuple(r, app=app) for r in result],
app=app,
)
j = deps.join_native if deps.supports_native_join else deps.join
try:
ready = deps.ready()
except Exception as exc:
raise self.retry(
exc=exc, countdown=interval, max_retries=max_retries,
)
else:
if not ready:
raise self.retry(countdown=interval, max_retries=max_retries)
callback = maybe_signature(callback, app=app)
try:
with allow_join_result():
ret = j(
timeout=app.conf.result_chord_join_timeout,
propagate=True,
)
except Exception as exc: # pylint: disable=broad-except
try:
culprit = next(deps._failed_join_report())
reason = 'Dependency {0.id} raised {1!r}'.format(culprit, exc)
except StopIteration:
reason = repr(exc)
logger.exception('Chord %r raised: %r', group_id, exc)
app.backend.chord_error_from_stack(callback, ChordError(reason))
else:
try:
callback.delay(ret)
except Exception as exc: # pylint: disable=broad-except
logger.exception('Chord %r raised: %r', group_id, exc)
app.backend.chord_error_from_stack(
callback,
exc=ChordError('Callback error: {0!r}'.format(exc)),
)
return unlock_chord
@connect_on_app_finalize
def add_map_task(app):
from celery.canvas import signature
@app.task(name='celery.map', shared=False, lazy=False)
def xmap(task, it):
task = signature(task, app=app).type
return [task(item) for item in it]
return xmap
@connect_on_app_finalize
def add_starmap_task(app):
from celery.canvas import signature
@app.task(name='celery.starmap', shared=False, lazy=False)
def xstarmap(task, it):
task = signature(task, app=app).type
return [task(*item) for item in it]
return xstarmap
@connect_on_app_finalize
def add_chunk_task(app):
from celery.canvas import chunks as _chunks
@app.task(name='celery.chunks', shared=False, lazy=False)
def chunks(task, it, n):
return _chunks.apply_chunks(task, it, n)
return chunks
@connect_on_app_finalize
def add_group_task(app):
"""No longer used, but here for backwards compatibility."""
from celery.canvas import maybe_signature
from celery.result import result_from_tuple
@app.task(name='celery.group', bind=True, shared=False, lazy=False)
def group(self, tasks, result, group_id, partial_args, add_to_parent=True):
app = self.app
result = result_from_tuple(result, app)
# any partial args are added to all tasks in the group
taskit = (maybe_signature(task, app=app).clone(partial_args)
for i, task in enumerate(tasks))
with app.producer_or_acquire() as producer:
[stask.apply_async(group_id=group_id, producer=producer,
add_to_parent=False) for stask in taskit]
parent = app.current_worker_task
if add_to_parent and parent:
parent.add_trail(result)
return result
return group
@connect_on_app_finalize
def add_chain_task(app):
"""No longer used, but here for backwards compatibility."""
@app.task(name='celery.chain', shared=False, lazy=False)
def chain(*args, **kwargs):
raise NotImplementedError('chain is not a real task')
return chain
@connect_on_app_finalize
def add_chord_task(app):
"""No longer used, but here for backwards compatibility."""
from celery import group, chord as _chord
from celery.canvas import maybe_signature
@app.task(name='celery.chord', bind=True, ignore_result=False,
shared=False, lazy=False)
def chord(self, header, body, partial_args=(), interval=None,
countdown=1, max_retries=None, eager=False, **kwargs):
app = self.app
# - convert back to group if serialized
tasks = header.tasks if isinstance(header, group) else header
header = group([
maybe_signature(s, app=app) for s in tasks
], app=self.app)
body = maybe_signature(body, app=app)
ch = _chord(header, body)
return ch.run(header, body, partial_args, app, interval,
countdown, max_retries, **kwargs)
return chord
| apache-2.0 |
an420/123 | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
endlessm/chromium-browser | third_party/libxml/src/genUnicode.py | 18 | 12997 | #!/usr/bin/python -u
#
# Original script modified in November 2003 to take advantage of
# the character-validation range routines, and updated to the
# current Unicode information (Version 4.0.1)
#
# NOTE: there is an 'alias' facility for blocks which are not present in
# the current release, but are needed for ABI compatibility. This
# must be accomplished MANUALLY! Please see the comments below under
# 'blockAliases'
#
import sys
import string
import time
webpage = "http://www.unicode.org/Public/4.0-Update1/UCD-4.0.1.html"
sources = "Blocks-4.0.1.txt UnicodeData-4.0.1.txt"
#
# blockAliases is a small hack - it is used for mapping block names which
# were were used in the 3.1 release, but are missing or changed in the current
# release. The format is "OldBlockName:NewBlockName1[,NewBlockName2[,...]]"
blockAliases = []
blockAliases.append("CombiningMarksforSymbols:CombiningDiacriticalMarksforSymbols")
blockAliases.append("Greek:GreekandCoptic")
blockAliases.append("PrivateUse:PrivateUseArea,SupplementaryPrivateUseArea-A," +
"SupplementaryPrivateUseArea-B")
# minTableSize gives the minimum number of ranges which must be present
# before a range table is produced. If there are less than this
# number, inline comparisons are generated
minTableSize = 8
(blockfile, catfile) = string.split(sources)
#
# Now process the "blocks" file, reducing it to a dictionary
# indexed by blockname, containing a tuple with the applicable
# block range
#
BlockNames = {}
try:
blocks = open(blockfile, "r")
except:
print "Missing %s, aborting ..." % blockfile
sys.exit(1)
for line in blocks.readlines():
if line[0] == '#':
continue
line = string.strip(line)
if line == '':
continue
try:
fields = string.split(line, ';')
range = string.strip(fields[0])
(start, end) = string.split(range, "..")
name = string.strip(fields[1])
name = string.replace(name, ' ', '')
except:
print "Failed to process line: %s" % (line)
continue
start = "0x" + start
end = "0x" + end
try:
BlockNames[name].append((start, end))
except:
BlockNames[name] = [(start, end)]
blocks.close()
print "Parsed %d blocks descriptions" % (len(BlockNames.keys()))
for block in blockAliases:
alias = string.split(block,':')
alist = string.split(alias[1],',')
for comp in alist:
if BlockNames.has_key(comp):
if alias[0] not in BlockNames:
BlockNames[alias[0]] = []
for r in BlockNames[comp]:
BlockNames[alias[0]].append(r)
else:
print "Alias %s: %s not in Blocks" % (alias[0], comp)
continue
#
# Next process the Categories file. This is more complex, since
# the file is in code sequence, and we need to invert it. We use
# a dictionary with index category-name, with each entry containing
# all the ranges (codepoints) of that category. Note that category
# names comprise two parts - the general category, and the "subclass"
# within that category. Therefore, both "general category" (which is
# the first character of the 2-character category-name) and the full
# (2-character) name are entered into this dictionary.
#
try:
data = open(catfile, "r")
except:
print "Missing %s, aborting ..." % catfile
sys.exit(1)
nbchar = 0;
Categories = {}
for line in data.readlines():
if line[0] == '#':
continue
line = string.strip(line)
if line == '':
continue
try:
fields = string.split(line, ';')
point = string.strip(fields[0])
value = 0
while point != '':
value = value * 16
if point[0] >= '0' and point[0] <= '9':
value = value + ord(point[0]) - ord('0')
elif point[0] >= 'A' and point[0] <= 'F':
value = value + 10 + ord(point[0]) - ord('A')
elif point[0] >= 'a' and point[0] <= 'f':
value = value + 10 + ord(point[0]) - ord('a')
point = point[1:]
name = fields[2]
except:
print "Failed to process line: %s" % (line)
continue
nbchar = nbchar + 1
# update entry for "full name"
try:
Categories[name].append(value)
except:
try:
Categories[name] = [value]
except:
print "Failed to process line: %s" % (line)
# update "general category" name
try:
Categories[name[0]].append(value)
except:
try:
Categories[name[0]] = [value]
except:
print "Failed to process line: %s" % (line)
blocks.close()
print "Parsed %d char generating %d categories" % (nbchar, len(Categories.keys()))
#
# The data is now all read. Time to process it into a more useful form.
#
# reduce the number list into ranges
for cat in Categories.keys():
list = Categories[cat]
start = -1
prev = -1
end = -1
ranges = []
for val in list:
if start == -1:
start = val
prev = val
continue
elif val == prev + 1:
prev = val
continue
elif prev == start:
ranges.append((prev, prev))
start = val
prev = val
continue
else:
ranges.append((start, prev))
start = val
prev = val
continue
if prev == start:
ranges.append((prev, prev))
else:
ranges.append((start, prev))
Categories[cat] = ranges
#
# Assure all data is in alphabetic order, since we will be doing binary
# searches on the tables.
#
bkeys = BlockNames.keys()
bkeys.sort()
ckeys = Categories.keys()
ckeys.sort()
#
# Generate the resulting files
#
try:
header = open("include/libxml/xmlunicode.h", "w")
except:
print "Failed to open include/libxml/xmlunicode.h"
sys.exit(1)
try:
output = open("xmlunicode.c", "w")
except:
print "Failed to open xmlunicode.c"
sys.exit(1)
date = time.asctime(time.localtime(time.time()))
header.write(
"""/*
* Summary: Unicode character APIs
* Description: API for the Unicode character APIs
*
* This file is automatically generated from the
* UCS description files of the Unicode Character Database
* %s
* using the genUnicode.py Python script.
*
* Generation date: %s
* Sources: %s
* Author: Daniel Veillard
*/
#ifndef __XML_UNICODE_H__
#define __XML_UNICODE_H__
#include <libxml/xmlversion.h>
#ifdef LIBXML_UNICODE_ENABLED
#ifdef __cplusplus
extern "C" {
#endif
""" % (webpage, date, sources));
output.write(
"""/*
* xmlunicode.c: this module implements the Unicode character APIs
*
* This file is automatically generated from the
* UCS description files of the Unicode Character Database
* %s
* using the genUnicode.py Python script.
*
* Generation date: %s
* Sources: %s
* Daniel Veillard <veillard@redhat.com>
*/
#define IN_LIBXML
#include "libxml.h"
#ifdef LIBXML_UNICODE_ENABLED
#include <string.h>
#include <libxml/xmlversion.h>
#include <libxml/xmlunicode.h>
#include <libxml/chvalid.h>
typedef int (xmlIntFunc)(int); /* just to keep one's mind untwisted */
typedef struct {
const char *rangename;
xmlIntFunc *func;
} xmlUnicodeRange;
typedef struct {
const xmlUnicodeRange *table;
int numentries;
} xmlUnicodeNameTable;
static xmlIntFunc *xmlUnicodeLookup(xmlUnicodeNameTable *tptr, const char *tname);
static const xmlUnicodeRange xmlUnicodeBlocks[] = {
""" % (webpage, date, sources));
flag = 0
for block in bkeys:
name = string.replace(block, '-', '')
if flag:
output.write(',\n')
else:
flag = 1
output.write(' {"%s", xmlUCSIs%s}' % (block, name))
output.write('};\n\n')
output.write('static xmlUnicodeRange xmlUnicodeCats[] = {\n')
flag = 0;
for name in ckeys:
if flag:
output.write(',\n')
else:
flag = 1
output.write(' {"%s", xmlUCSIsCat%s}' % (name, name))
output.write('};\n\n')
#
# For any categories with more than minTableSize ranges we generate
# a range table suitable for xmlCharInRange
#
for name in ckeys:
if len(Categories[name]) > minTableSize:
numshort = 0
numlong = 0
ranges = Categories[name]
sptr = "NULL"
lptr = "NULL"
for range in ranges:
(low, high) = range
if high < 0x10000:
if numshort == 0:
pline = "static const xmlChSRange xml%sS[] = {" % name
sptr = "xml%sS" % name
else:
pline += ", "
numshort += 1
else:
if numlong == 0:
if numshort > 0:
output.write(pline + " };\n")
pline = "static const xmlChLRange xml%sL[] = {" % name
lptr = "xml%sL" % name
else:
pline += ", "
numlong += 1
if len(pline) > 60:
output.write(pline + "\n")
pline = " "
pline += "{%s, %s}" % (hex(low), hex(high))
output.write(pline + " };\nstatic xmlChRangeGroup xml%sG = {%s,%s,%s,%s};\n\n"
% (name, numshort, numlong, sptr, lptr))
output.write(
"""static xmlUnicodeNameTable xmlUnicodeBlockTbl = {xmlUnicodeBlocks, %s};
static xmlUnicodeNameTable xmlUnicodeCatTbl = {xmlUnicodeCats, %s};
/**
* xmlUnicodeLookup:
* @tptr: pointer to the name table
* @name: name to be found
*
* binary table lookup for user-supplied name
*
* Returns pointer to range function if found, otherwise NULL
*/
static xmlIntFunc
*xmlUnicodeLookup(xmlUnicodeNameTable *tptr, const char *tname) {
int low, high, mid, cmp;
xmlUnicodeRange *sptr;
if ((tptr == NULL) || (tname == NULL)) return(NULL);
low = 0;
high = tptr->numentries - 1;
sptr = tptr->table;
while (low <= high) {
mid = (low + high) / 2;
if ((cmp=strcmp(tname, sptr[mid].rangename)) == 0)
return (sptr[mid].func);
if (cmp < 0)
high = mid - 1;
else
low = mid + 1;
}
return (NULL);
}
""" % (len(BlockNames), len(Categories)) )
for block in bkeys:
name = string.replace(block, '-', '')
header.write("XMLPUBFUN int XMLCALL xmlUCSIs%s\t(int code);\n" % name)
output.write("/**\n * xmlUCSIs%s:\n * @code: UCS code point\n" % (name))
output.write(" *\n * Check whether the character is part of %s UCS Block\n"%
(block))
output.write(" *\n * Returns 1 if true 0 otherwise\n */\n");
output.write("int\nxmlUCSIs%s(int code) {\n return(" % name)
flag = 0
for (start, end) in BlockNames[block]:
if flag:
output.write(" ||\n ")
else:
flag = 1
output.write("((code >= %s) && (code <= %s))" % (start, end))
output.write(");\n}\n\n")
header.write("\nXMLPUBFUN int XMLCALL xmlUCSIsBlock\t(int code, const char *block);\n\n")
output.write(
"""/**
* xmlUCSIsBlock:
* @code: UCS code point
* @block: UCS block name
*
* Check whether the character is part of the UCS Block
*
* Returns 1 if true, 0 if false and -1 on unknown block
*/
int
xmlUCSIsBlock(int code, const char *block) {
xmlIntFunc *func;
func = xmlUnicodeLookup(&xmlUnicodeBlockTbl, block);
if (func == NULL)
return (-1);
return (func(code));
}
""")
for name in ckeys:
ranges = Categories[name]
header.write("XMLPUBFUN int XMLCALL xmlUCSIsCat%s\t(int code);\n" % name)
output.write("/**\n * xmlUCSIsCat%s:\n * @code: UCS code point\n" % (name))
output.write(" *\n * Check whether the character is part of %s UCS Category\n"%
(name))
output.write(" *\n * Returns 1 if true 0 otherwise\n */\n");
output.write("int\nxmlUCSIsCat%s(int code) {\n" % name)
if len(Categories[name]) > minTableSize:
output.write(" return(xmlCharInRange((unsigned int)code, &xml%sG)"
% name)
else:
start = 1
for range in ranges:
(begin, end) = range;
if start:
output.write(" return(");
start = 0
else:
output.write(" ||\n ");
if (begin == end):
output.write("(code == %s)" % (hex(begin)))
else:
output.write("((code >= %s) && (code <= %s))" % (
hex(begin), hex(end)))
output.write(");\n}\n\n")
header.write("\nXMLPUBFUN int XMLCALL xmlUCSIsCat\t(int code, const char *cat);\n")
output.write(
"""/**
* xmlUCSIsCat:
* @code: UCS code point
* @cat: UCS Category name
*
* Check whether the character is part of the UCS Category
*
* Returns 1 if true, 0 if false and -1 on unknown category
*/
int
xmlUCSIsCat(int code, const char *cat) {
xmlIntFunc *func;
func = xmlUnicodeLookup(&xmlUnicodeCatTbl, cat);
if (func == NULL)
return (-1);
return (func(code));
}
#define bottom_xmlunicode
#include "elfgcchack.h"
#endif /* LIBXML_UNICODE_ENABLED */
""")
header.write("""
#ifdef __cplusplus
}
#endif
#endif /* LIBXML_UNICODE_ENABLED */
#endif /* __XML_UNICODE_H__ */
""");
header.close()
output.close()
| bsd-3-clause |
valtandor/easybuild-framework | test/framework/yeb.py | 4 | 6661 | # #
# Copyright 2015-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for .yeb easyconfig format
@author: Caroline De Brouwer (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import os
import sys
from test.framework.utilities import EnhancedTestCase, init_config
from unittest import TestLoader, main
import easybuild.tools.build_log
from easybuild.framework.easyconfig.easyconfig import ActiveMNS, EasyConfig
from easybuild.framework.easyconfig.format.yeb import is_yeb_format
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import module_classes
from easybuild.tools.filetools import read_file, write_file
try:
import yaml
except ImportError:
pass
class YebTest(EnhancedTestCase):
""" Testcase for run module """
def setUp(self):
"""Test setup."""
super(YebTest, self).setUp()
self.orig_experimental = easybuild.tools.build_log.EXPERIMENTAL
easybuild.tools.build_log.EXPERIMENTAL = True
def tearDown(self):
"""Test cleanup."""
super(YebTest, self).tearDown()
easybuild.tools.build_log.EXPERIMENTAL = self.orig_experimental
def test_parse_yeb(self):
"""Test parsing of .yeb easyconfigs."""
if 'yaml' not in sys.modules:
print "Skipping test_parse_yeb (no PyYAML available)"
return
testdir = os.path.dirname(os.path.abspath(__file__))
test_easyconfigs = os.path.join(testdir, 'easyconfigs')
test_yeb_easyconfigs = os.path.join(testdir, 'easyconfigs', 'yeb')
# test parsing
test_files = [
'bzip2-1.0.6-GCC-4.9.2',
'gzip-1.6-GCC-4.9.2',
'goolf-1.4.10',
'ictce-4.1.13',
'SQLite-3.8.10.2-goolf-1.4.10',
'Python-2.7.10-ictce-4.1.13',
'CrayCCE-5.1.29',
]
for filename in test_files:
ec_yeb = EasyConfig(os.path.join(test_yeb_easyconfigs, '%s.yeb' % filename))
# compare with parsed result of .eb easyconfig
ec_eb = EasyConfig(os.path.join(test_easyconfigs, '%s.eb' % filename))
no_match = False
for key in sorted(ec_yeb.asdict()):
eb_val = ec_eb[key]
yeb_val = ec_yeb[key]
if key == 'description':
# multi-line string is always terminated with '\n' in YAML, so strip it off
yeb_val = yeb_val.strip()
self.assertEqual(yeb_val, eb_val)
def test_is_yeb_format(self):
""" Test is_yeb_format function """
testdir = os.path.dirname(os.path.abspath(__file__))
test_yeb = os.path.join(testdir, 'easyconfigs', 'yeb', 'bzip2-1.0.6-GCC-4.9.2.yeb')
raw_yeb = read_file(test_yeb)
self.assertTrue(is_yeb_format(test_yeb, None))
self.assertTrue(is_yeb_format(None, raw_yeb))
test_eb = os.path.join(testdir, 'easyconfigs', 'gzip-1.4.eb')
raw_eb = read_file(test_eb)
self.assertFalse(is_yeb_format(test_eb, None))
self.assertFalse(is_yeb_format(None, raw_eb))
def test_join(self):
""" Test yaml_join function """
# skip test if yaml module was not loaded
if 'yaml' not in sys.modules:
print "Skipping test_join (no PyYAML available)"
return
stream = [
"variables:",
" - &f foo",
" - &b bar",
"",
"fb1: !join [foo, bar]",
"fb2: !join [*f, bar]",
"fb3: !join [*f, *b]",
]
# import here for testing yaml_join separately
from easybuild.framework.easyconfig.format.yeb import yaml_join
loaded = yaml.load('\n'.join(stream))
for key in ['fb1', 'fb2', 'fb3']:
self.assertEqual(loaded.get(key), 'foobar')
def test_bad_toolchain_format(self):
""" Test alternate toolchain format name,version """
if 'yaml' not in sys.modules:
print "Skipping test_parse_yeb (no PyYAML available)"
return
# only test bad cases - the right ones are tested with the test files in test_parse_yeb
testdir = os.path.dirname(os.path.abspath(__file__))
test_easyconfigs = os.path.join(testdir, 'easyconfigs', 'yeb')
expected = r'Can not convert list .* to name and version dict. Expected 2 elements'
self.assertErrorRegex(EasyBuildError, expected, EasyConfig, os.path.join(test_easyconfigs, 'bzip-bad-toolchain.yeb'))
def test_external_module_toolchain(self):
"""Test specifying external (build) dependencies in yaml format."""
if 'yaml' not in sys.modules:
print "Skipping test_external_module_toolchain (no PyYAML available)"
return
ecpath = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'yeb', 'CrayCCE-5.1.29.yeb')
metadata = {
'name': ['foo', 'bar'],
'version': ['1.2.3', '3.2.1'],
'prefix': '/foo/bar',
}
build_options = {
'external_modules_metadata': {'fftw/3.3.4.0': metadata},
'valid_module_classes': module_classes(),
}
init_config(build_options=build_options)
easybuild.tools.build_log.EXPERIMENTAL = True
ec = EasyConfig(ecpath)
self.assertEqual(ec.dependencies()[1]['full_mod_name'], 'fftw/3.3.4.0')
self.assertEqual(ec.dependencies()[1]['external_module_metadata'], metadata)
def suite():
""" returns all the testcases in this module """
return TestLoader().loadTestsFromTestCase(YebTest)
if __name__ == '__main__':
main()
| gpl-2.0 |
ufieeehw/IEEE2016 | ros/ieee2016_simulator/scripts/point_cloud_gen.py | 1 | 2635 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import PointCloud,ChannelFloat32
from geometry_msgs.msg import Point32
import random
def generate_simulated_points(stage):
x_mod, y_mod, z_mod = 0,0,0
if stage == 'A':
x_mod, y_mod, z_mod = 85,0,-10
elif stage == 'B':
x_mod, y_mod, z_mod = 9,0,0
elif stage == 'C':
x_mod, y_mod, z_mod = -67,0,-6
colors = ["blue","blue","blue","blue","red","red","red","red","yellow","yellow","yellow","yellow","green","green","green","green"]
coordinates = [(0,215.3,25),(0,215.3,28.81),(6.35,215.3,25),(6.35,215.3,28.81),(6.35*2,215.3,25),(6.35*2,215.3,28.81),(6.35*3,215.3,25),(6.35*3,215.3,28.81),(6.35*4,215.3,25),(6.35*4,215.3,28.81),(6.35*5,215.3,25),(6.35*5,215.3,28.81),(6.35*6,215.3,25),(6.35*6,215.3,28.81),(6.35*7,215.3,25),(6.35*7,215.3,28.81)]
found_points = []
for p in range(16):
color = random.randint(0,len(colors)-1)
coor = random.randint(0,len(coordinates)-1)
found_points.append([colors[color],coordinates[coor]])
del colors[color]
del coordinates[coor]
print found_points
points = []
channels = [[],[],[]]
for p in found_points:
if p[0] == "blue":
channels[0].append(0) #R
channels[1].append(0) #G
channels[2].append(1) #B
elif p[0] == "red":
channels[0].append(1) #R
channels[1].append(0) #G
channels[2].append(0) #B
elif p[0] == "green":
channels[0].append(0) #R
channels[1].append(1) #G
channels[2].append(0) #B
elif p[0] == "yellow":
channels[0].append(1) #R
channels[1].append(1) #G
channels[2].append(0) #B
points.append(Point32(
x=p[1][0]/100.0 + random.uniform(-.005, .005) + x_mod,
y=p[1][1]/100.0 + random.uniform(-.005, .005) + y_mod,
z=p[1][2]/100.0 + random.uniform(-.005, .005) + z_mod
)
)
rospy.init_node('temp')
point_pub = rospy.Publisher("/camera/block_point_cloud", PointCloud, queue_size=1)
rgb_channels = [ChannelFloat32(name="r", values=channels[0]),
ChannelFloat32(name="g", values=channels[1]),
ChannelFloat32(name="b", values=channels[2])]
print "Publishing..."
point_pub.publish(PointCloud(
header=Header(
stamp=rospy.Time.now(),
frame_id="map"
),
points=points,
channels=rgb_channels
)
) | mit |
Arundhatii/erpnext | erpnext/hr/doctype/leave_allocation/leave_allocation.py | 33 | 5531 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, date_diff, formatdate
from frappe import _
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
from erpnext.hr.doctype.leave_application.leave_application import get_approved_leaves_for_period
class OverlapError(frappe.ValidationError): pass
class BackDatedAllocationError(frappe.ValidationError): pass
class OverAllocationError(frappe.ValidationError): pass
class LessAllocationError(frappe.ValidationError): pass
class ValueMultiplierError(frappe.ValidationError): pass
class LeaveAllocation(Document):
def validate(self):
self.validate_period()
self.validate_new_leaves_allocated_value()
self.validate_allocation_overlap()
self.validate_back_dated_allocation()
self.set_total_leaves_allocated()
self.validate_total_leaves_allocated()
self.validate_lwp()
set_employee_name(self)
def on_update_after_submit(self):
self.validate_new_leaves_allocated_value()
self.set_total_leaves_allocated()
frappe.db.set(self,'carry_forwarded_leaves', flt(self.carry_forwarded_leaves))
frappe.db.set(self,'total_leaves_allocated',flt(self.total_leaves_allocated))
self.validate_against_leave_applications()
def validate_period(self):
if date_diff(self.to_date, self.from_date) <= 0:
frappe.throw(_("To date cannot be before from date"))
def validate_lwp(self):
if frappe.db.get_value("Leave Type", self.leave_type, "is_lwp"):
frappe.throw(_("Leave Type {0} cannot be allocated since it is leave without pay").format(self.leave_type))
def validate_new_leaves_allocated_value(self):
"""validate that leave allocation is in multiples of 0.5"""
if flt(self.new_leaves_allocated) % 0.5:
frappe.throw(_("Leaves must be allocated in multiples of 0.5"), ValueMultiplierError)
def validate_allocation_overlap(self):
leave_allocation = frappe.db.sql("""
select name from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1
and to_date >= %s and from_date <= %s""",
(self.employee, self.leave_type, self.from_date, self.to_date))
if leave_allocation:
frappe.msgprint(_("{0} already allocated for Employee {1} for period {2} to {3}")
.format(self.leave_type, self.employee, formatdate(self.from_date), formatdate(self.to_date)))
frappe.throw(_('Reference') + ': <a href="#Form/Leave Allocation/{0}">{0}</a>'
.format(leave_allocation[0][0]), OverlapError)
def validate_back_dated_allocation(self):
future_allocation = frappe.db.sql("""select name, from_date from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1 and from_date > %s
and carry_forward=1""", (self.employee, self.leave_type, self.to_date), as_dict=1)
if future_allocation:
frappe.throw(_("Leave cannot be allocated before {0}, as leave balance has already been carry-forwarded in the future leave allocation record {1}")
.format(formatdate(future_allocation[0].from_date), future_allocation[0].name),
BackDatedAllocationError)
def set_total_leaves_allocated(self):
self.carry_forwarded_leaves = get_carry_forwarded_leaves(self.employee,
self.leave_type, self.from_date, self.carry_forward)
self.total_leaves_allocated = flt(self.carry_forwarded_leaves) + flt(self.new_leaves_allocated)
if not self.total_leaves_allocated:
frappe.throw(_("Total leaves allocated is mandatory"))
def validate_total_leaves_allocated(self):
# Adding a day to include To Date in the difference
date_difference = date_diff(self.to_date, self.from_date) + 1
if date_difference < self.total_leaves_allocated:
frappe.throw(_("Total allocated leaves are more than days in the period"), OverAllocationError)
def validate_against_leave_applications(self):
leaves_taken = get_approved_leaves_for_period(self.employee, self.leave_type,
self.from_date, self.to_date)
if flt(leaves_taken) > flt(self.total_leaves_allocated):
if frappe.db.get_value("Leave Type", self.leave_type, "allow_negative"):
frappe.msgprint(_("Note: Total allocated leaves {0} shouldn't be less than already approved leaves {1} for the period").format(self.total_leaves_allocated, leaves_taken))
else:
frappe.throw(_("Total allocated leaves {0} cannot be less than already approved leaves {1} for the period").format(self.total_leaves_allocated, leaves_taken), LessAllocationError)
@frappe.whitelist()
def get_carry_forwarded_leaves(employee, leave_type, date, carry_forward=None):
carry_forwarded_leaves = 0
if carry_forward:
validate_carry_forward(leave_type)
previous_allocation = frappe.db.sql("""
select name, from_date, to_date, total_leaves_allocated
from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1 and to_date < %s
order by to_date desc limit 1
""", (employee, leave_type, date), as_dict=1)
if previous_allocation:
leaves_taken = get_approved_leaves_for_period(employee, leave_type,
previous_allocation[0].from_date, previous_allocation[0].to_date)
carry_forwarded_leaves = flt(previous_allocation[0].total_leaves_allocated) - flt(leaves_taken)
return carry_forwarded_leaves
def validate_carry_forward(leave_type):
if not frappe.db.get_value("Leave Type", leave_type, "is_carry_forward"):
frappe.throw(_("Leave Type {0} cannot be carry-forwarded").format(leave_type))
| gpl-3.0 |
iniverno/RnR-LLC | jgraph/data_size.py | 6 | 1682 | #!/s/std/bin/python
import sys, string, os, glob, re, mfgraph
def gen_data_size(benchmarks):
configs = ["1p-MOSI_bcast_opt", "2p-MOSI_bcast_opt", "4p-MOSI_bcast_opt", "8p-MOSI_bcast_opt", "16p-MOSI_bcast_opt"]
parameters = ["C GET_INSTR", "C GETS", "C GETX"]
stacks = []
for benchmark in benchmarks:
bars = []
for config in configs:
print " %s %s" % (benchmark, config)
filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
for filename in filenames:
numbers = []
for parameter in parameters:
lines = mfgraph.grep(filename, parameter);
line = string.split(lines[0])
map(string.strip, line)
num = string.split(line[2], "%")
num = (64L*(long(num[0])))/(1024.0*1024.0)
numbers.append(num)
numbers = mfgraph.stack_bars(numbers)
number = reduce(lambda x,y:x+y, numbers)
config_label = string.split(config, "-")[0]
bars.append([config_label] + [number])
stacks.append([benchmark] + bars)
# labels = []
# for label in parameters:
# labels.append(string.split(label)[1])
return [mfgraph.stacked_bar_graph(stacks,
# bar_segment_labels = labels,
title = "Memory touched",
ylabel = "Mbytes",
patterns = ["solid"],
xsize = 8.5,
)]
| gpl-2.0 |
iut-ibk/DynaMind-ToolBox | DynaMind-Extensions/unit-tests/gtest-1.6.0/test/gtest_list_tests_unittest.py | 1068 | 5415 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
Abc.
Xyz
Def
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output: the expected output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
msg = ('when %s is %s, the output of "%s" is "%s".' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))
if expected_output is not None:
self.assert_(output == expected_output, msg)
else:
self.assert_(output != EXPECTED_OUTPUT_NO_FILTER, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_FILTER_FOO,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
jhawkesworth/ansible | test/units/modules/network/checkpoint/test_checkpoint_host.py | 60 | 3694 | # Copyright (c) 2018 Red Hat
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleFailJson, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.checkpoint import checkpoint_host
OBJECT = {'name': 'foo', 'ipv4-address': '192.168.0.15'}
CREATE_PAYLOAD = {'name': 'foo', 'ip_address': '192.168.0.15'}
UPDATE_PAYLOAD = {'name': 'foo', 'ip_address': '192.168.0.16'}
DELETE_PAYLOAD = {'name': 'foo', 'state': 'absent'}
class TestCheckpointHost(object):
module = checkpoint_host
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.modules.network.checkpoint.checkpoint_host.Connection')
return connection_class_mock.return_value
@pytest.fixture
def get_host_200(self, mocker):
mock_function = mocker.patch('ansible.modules.network.checkpoint.checkpoint_host.get_host')
mock_function.return_value = (200, OBJECT)
return mock_function.return_value
@pytest.fixture
def get_host_404(self, mocker):
mock_function = mocker.patch('ansible.modules.network.checkpoint.checkpoint_host.get_host')
mock_function.return_value = (404, 'Object not found')
return mock_function.return_value
def test_create(self, get_host_404, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert 'checkpoint_hosts' in result
def test_create_idempotent(self, get_host_200, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, get_host_200, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
def test_delete(self, get_host_200, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, get_host_404, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
def _run_module_with_fail_json(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleFailJson) as exc:
self.module.main()
result = exc.value.args[0]
return result
| gpl-3.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/boto/ses/__init__.py | 14 | 2004 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Harry Marr http://hmarr.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from connection import SESConnection
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the SES service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
return get_regions('ses', connection_cls=SESConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ses.connection.SESConnection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.ses.connection.SESConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| gpl-2.0 |
asser/django | tests/invalid_models_tests/test_models.py | 7 | 23848 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.conf import settings
from django.core.checks import Error
from django.db import connections, models
from django.test import SimpleTestCase
from django.test.utils import isolate_apps, override_settings
def get_max_column_name_length():
allowed_len = None
db_alias = None
for db in settings.DATABASES.keys():
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
return (allowed_len, db_alias)
@isolate_apps('invalid_models_tests')
class IndexTogetherTests(SimpleTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = 42
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
index_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'index_together' elements must be lists or tuples.",
obj=Model,
id='models.E009',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
index_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to the non-existent field 'missing_field'.",
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_non_local_field(self):
class Foo(models.Model):
field1 = models.IntegerField()
class Bar(Foo):
field2 = models.IntegerField()
class Meta:
index_together = [
["field2", "field1"],
]
errors = Bar.check()
expected = [
Error(
"'index_together' refers to field 'field1' which is not "
"local to model 'Bar'.",
hint=("This issue may be caused by multi-table inheritance."),
obj=Bar,
id='models.E016',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m_field(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
index_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'index_together'.",
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
# unique_together tests are very similar to index_together tests.
@isolate_apps('invalid_models_tests')
class UniqueTogetherTests(SimpleTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
unique_together = 42
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
unique_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'unique_together' elements must be lists or tuples.",
obj=Model,
id='models.E011',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
unique_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
# unique_together can be a simple tuple
unique_together = ('one', 'two')
errors = Model.check()
self.assertEqual(errors, [])
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
unique_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to the non-existent field 'missing_field'.",
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
unique_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'unique_together'.",
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class FieldNamesTests(SimpleTestCase):
def test_ending_with_underscore(self):
class Model(models.Model):
field_ = models.CharField(max_length=10)
m2m_ = models.ManyToManyField('self')
errors = Model.check()
expected = [
Error(
'Field names must not end with an underscore.',
obj=Model._meta.get_field('field_'),
id='fields.E001',
),
Error(
'Field names must not end with an underscore.',
obj=Model._meta.get_field('m2m_'),
id='fields.E001',
),
]
self.assertEqual(errors, expected)
max_column_name_length, column_limit_db_alias = get_max_column_name_length()
@unittest.skipIf(max_column_name_length is None,
"The database doesn't have a column name length limit.")
def test_M2M_long_column_name(self):
"""
#13711 -- Model check for long M2M column names when database has
column name length limits.
"""
allowed_len, db_alias = get_max_column_name_length()
# A model with very long name which will be used to set relations to.
class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model):
title = models.CharField(max_length=11)
# Main model for which checks will be performed.
class ModelWithLongField(models.Model):
m2m_field = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn1"
)
m2m_field2 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn2", through='m2msimple'
)
m2m_field3 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn3",
through='m2mcomplex'
)
fk = models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
related_name="rn4",
)
# Models used for setting `through` in M2M field.
class m2msimple(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
class m2mcomplex(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
long_field_name = 'a' * (self.max_column_name_length + 1)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
).contribute_to_class(m2msimple, long_field_name)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
db_column=long_field_name
).contribute_to_class(m2mcomplex, long_field_name)
errors = ModelWithLongField.check()
# First error because of M2M field set on the model with long name.
m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id"
if self.max_column_name_length > len(m2m_long_name):
# Some databases support names longer than the test name.
expected = []
else:
expected = [
Error(
'Autogenerated column name too long for M2M field "%s". '
'Maximum length is "%s" for database "%s".'
% (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias),
hint=("Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."),
obj=ModelWithLongField,
id='models.E019',
)
]
# Second error because the FK specified in the `through` model
# `m2msimple` has auto-generated name longer than allowed.
# There will be no check errors in the other M2M because it
# specifies db_column for the FK in `through` model even if the actual
# name is longer than the limits of the database.
expected.append(
Error(
'Autogenerated column name too long for M2M field "%s_id". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint=("Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."),
obj=ModelWithLongField,
id='models.E019',
)
)
self.assertEqual(errors, expected)
@unittest.skipIf(max_column_name_length is None,
"The database doesn't have a column name length limit.")
def test_local_field_long_column_name(self):
"""
#13711 -- Model check for long column names
when database does not support long names.
"""
allowed_len, db_alias = get_max_column_name_length()
class ModelWithLongField(models.Model):
title = models.CharField(max_length=11)
long_field_name = 'a' * (self.max_column_name_length + 1)
long_field_name2 = 'b' * (self.max_column_name_length + 1)
models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name)
models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2)
errors = ModelWithLongField.check()
# Error because of the field with long name added to the model
# without specifying db_column
expected = [
Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Set the column name manually using 'db_column'.",
obj=ModelWithLongField,
id='models.E018',
)
]
self.assertEqual(errors, expected)
def test_including_separator(self):
class Model(models.Model):
some__field = models.IntegerField()
errors = Model.check()
expected = [
Error(
'Field names must not contain "__".',
obj=Model._meta.get_field('some__field'),
id='fields.E002',
)
]
self.assertEqual(errors, expected)
def test_pk(self):
class Model(models.Model):
pk = models.IntegerField()
errors = Model.check()
expected = [
Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=Model._meta.get_field('pk'),
id='fields.E003',
)
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ShadowingFieldsTests(SimpleTestCase):
def test_field_name_clash_with_child_accessor(self):
class Parent(models.Model):
pass
class Child(Parent):
child = models.CharField(max_length=100)
errors = Child.check()
expected = [
Error(
"The field 'child' clashes with the field "
"'child' from model 'invalid_models_tests.parent'.",
obj=Child._meta.get_field('child'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multiinheritance_clash(self):
class Mother(models.Model):
clash = models.IntegerField()
class Father(models.Model):
clash = models.IntegerField()
class Child(Mother, Father):
# Here we have two clashed: id (automatic field) and clash, because
# both parents define these fields.
pass
errors = Child.check()
expected = [
Error(
"The field 'id' from parent model "
"'invalid_models_tests.mother' clashes with the field 'id' "
"from parent model 'invalid_models_tests.father'.",
obj=Child,
id='models.E005',
),
Error(
"The field 'clash' from parent model "
"'invalid_models_tests.mother' clashes with the field 'clash' "
"from parent model 'invalid_models_tests.father'.",
obj=Child,
id='models.E005',
)
]
self.assertEqual(errors, expected)
def test_inheritance_clash(self):
class Parent(models.Model):
f_id = models.IntegerField()
class Target(models.Model):
# This field doesn't result in a clash.
f_id = models.IntegerField()
class Child(Parent):
# This field clashes with parent "f_id" field.
f = models.ForeignKey(Target, models.CASCADE)
errors = Child.check()
expected = [
Error(
"The field 'f' clashes with the field 'f_id' "
"from model 'invalid_models_tests.parent'.",
obj=Child._meta.get_field('f'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multigeneration_inheritance(self):
class GrandParent(models.Model):
clash = models.IntegerField()
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
clash = models.IntegerField()
errors = GrandChild.check()
expected = [
Error(
"The field 'clash' clashes with the field 'clash' "
"from model 'invalid_models_tests.grandparent'.",
obj=GrandChild._meta.get_field('clash'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_id_clash(self):
class Target(models.Model):
pass
class Model(models.Model):
fk = models.ForeignKey(Target, models.CASCADE)
fk_id = models.IntegerField()
errors = Model.check()
expected = [
Error(
"The field 'fk_id' clashes with the field 'fk' from model "
"'invalid_models_tests.model'.",
obj=Model._meta.get_field('fk_id'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class OtherModelTests(SimpleTestCase):
def test_unique_primary_key(self):
invalid_id = models.IntegerField(primary_key=False)
class Model(models.Model):
id = invalid_id
errors = Model.check()
expected = [
Error(
"'id' can only be used as a field name if the field also sets "
"'primary_key=True'.",
obj=Model,
id='models.E004',
),
]
self.assertEqual(errors, expected)
def test_ordering_non_iterable(self):
class Model(models.Model):
class Meta:
ordering = "missing_field"
errors = Model.check()
expected = [
Error(
"'ordering' must be a tuple or list "
"(even if you want to order by only one field).",
obj=Model,
id='models.E014',
),
]
self.assertEqual(errors, expected)
def test_just_ordering_no_errors(self):
class Model(models.Model):
order = models.PositiveIntegerField()
class Meta:
ordering = ['order']
self.assertEqual(Model.check(), [])
def test_just_order_with_respect_to_no_errors(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
class Meta:
order_with_respect_to = 'question'
self.assertEqual(Answer.check(), [])
def test_ordering_with_order_with_respect_to(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
order = models.IntegerField()
class Meta:
order_with_respect_to = 'question'
ordering = ['order']
errors = Answer.check()
expected = [
Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=Answer,
id='models.E021',
),
]
self.assertEqual(errors, expected)
def test_non_valid(self):
class RelationModel(models.Model):
pass
class Model(models.Model):
relation = models.ManyToManyField(RelationModel)
class Meta:
ordering = ['relation']
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'relation'.",
obj=Model,
id='models.E015',
),
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
ordering = ("missing_field",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_field'.",
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_foreignkey_field(self):
# refs #22711
class Model(models.Model):
missing_fk_field = models.IntegerField()
class Meta:
ordering = ("missing_fk_field_id",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_fk_field_id'.",
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_existing_foreignkey_field(self):
# refs #22711
class Parent(models.Model):
pass
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE)
class Meta:
ordering = ("parent_id",)
self.assertFalse(Child.check())
@override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model')
def test_swappable_missing_app_name(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.",
id='models.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target')
def test_swappable_missing_app(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', "
'which has not been installed, or is abstract.',
id='models.E002',
),
]
self.assertEqual(errors, expected)
def test_two_m2m_through_same_relationship(self):
class Person(models.Model):
pass
class Group(models.Model):
primary = models.ManyToManyField(Person,
through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership",
related_name="secondary")
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
errors = Group.check()
expected = [
Error(
"The model has two many-to-many relations through "
"the intermediate model 'invalid_models_tests.Membership'.",
obj=Group,
id='models.E003',
)
]
self.assertEqual(errors, expected)
| bsd-3-clause |
mae/tech-hive | node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
MoisesTedeschi/python | Scripts-Python/Modulos-Diversos/deteccao-de-faces-com-python-e-opencv/Lib/site-packages/setuptools/_vendor/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-3.0 |
BizzCloud/PosBox | addons/account/project/report/analytic_balance.py | 358 | 7060 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_analytic_balance(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_balance, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'get_objects': self._get_objects,
'lines_g': self._lines_g,
'move_sum': self._move_sum,
'sum_all': self._sum_all,
'sum_balance': self._sum_balance,
'move_sum_balance': self._move_sum_balance,
})
self.acc_ids = []
self.read_data = []
self.empty_acc = False
self.acc_data_dict = {}# maintains a relation with an account with its successors.
self.acc_sum_list = []# maintains a list of all ids
def get_children(self, ids):
read_data = self.pool.get('account.analytic.account').read(self.cr, self.uid, ids,['child_ids','code','complete_name','balance'])
for data in read_data:
if (data['id'] not in self.acc_ids):
inculde_empty = True
if (not self.empty_acc) and data['balance'] == 0.00:
inculde_empty = False
if inculde_empty:
self.acc_ids.append(data['id'])
self.read_data.append(data)
if data['child_ids']:
self.get_children(data['child_ids'])
return True
def _get_objects(self, empty_acc):
if self.read_data:
return self.read_data
self.empty_acc = empty_acc
self.read_data = []
self.get_children(self.ids)
return self.read_data
def _lines_g(self, account_id, date1, date2):
account_analytic_obj = self.pool.get('account.analytic.account')
ids = account_analytic_obj.search(self.cr, self.uid,
[('parent_id', 'child_of', [account_id])])
self.cr.execute("SELECT aa.name AS name, aa.code AS code, \
sum(aal.amount) AS balance, sum(aal.unit_amount) AS quantity \
FROM account_analytic_line AS aal, account_account AS aa \
WHERE (aal.general_account_id=aa.id) \
AND (aal.account_id IN %s)\
AND (date>=%s) AND (date<=%s) AND aa.active \
GROUP BY aal.general_account_id, aa.name, aa.code, aal.code \
ORDER BY aal.code", (tuple(ids), date1, date2))
res = self.cr.dictfetchall()
for r in res:
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['balance'] == 0
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _move_sum(self, account_id, date1, date2, option):
if account_id not in self.acc_data_dict:
account_analytic_obj = self.pool.get('account.analytic.account')
ids = account_analytic_obj.search(self.cr, self.uid,[('parent_id', 'child_of', [account_id])])
self.acc_data_dict[account_id] = ids
else:
ids = self.acc_data_dict[account_id]
query_params = (tuple(ids), date1, date2)
if option == "credit":
self.cr.execute("SELECT COALESCE(-sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0",query_params)
elif option == "debit":
self.cr.execute("SELECT COALESCE(sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s\
AND date>=%s AND date<=%s AND amount>0",query_params)
elif option == "quantity":
self.cr.execute("SELECT COALESCE(sum(unit_amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s\
AND date>=%s AND date<=%s",query_params)
return self.cr.fetchone()[0] or 0.0
def _move_sum_balance(self, account_id, date1, date2):
debit = self._move_sum(account_id, date1, date2, 'debit')
credit = self._move_sum(account_id, date1, date2, 'credit')
return (debit-credit)
def _sum_all(self, accounts, date1, date2, option):
account_analytic_obj = self.pool.get('account.analytic.account')
ids = map(lambda x: x['id'], accounts)
if not ids:
return 0.0
if not self.acc_sum_list:
ids2 = account_analytic_obj.search(self.cr, self.uid,[('parent_id', 'child_of', ids)])
self.acc_sum_list = ids2
else:
ids2 = self.acc_sum_list
query_params = (tuple(ids2), date1, date2)
if option == "debit":
self.cr.execute("SELECT COALESCE(sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount>0",query_params)
elif option == "credit":
self.cr.execute("SELECT COALESCE(-sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0",query_params)
elif option == "quantity":
self.cr.execute("SELECT COALESCE(sum(unit_amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s",query_params)
return self.cr.fetchone()[0] or 0.0
def _sum_balance(self, accounts, date1, date2):
debit = self._sum_all(accounts, date1, date2, 'debit') or 0.0
credit = self._sum_all(accounts, date1, date2, 'credit') or 0.0
return (debit-credit)
class report_analyticbalance(osv.AbstractModel):
_name = 'report.account.report_analyticbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_analyticbalance'
_wrapped_report_class = account_analytic_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sstone/bitcoin | test/functional/test_framework/address.py | 30 | 5316 | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode Bitcoin addresses.
- base58 P2PKH and P2SH addresses.
- bech32 segwit v0 P2WPKH and P2WSH addresses."""
import enum
import unittest
from .script import hash256, hash160, sha256, CScript, OP_0
from .segwit_addr import encode_segwit_address
from .util import assert_equal, hex_str_to_bytes
ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj'
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj)#juyq9d97'
# Coins sent to this address can be spent with a witness stack of just OP_TRUE
ADDRESS_BCRT1_P2WSH_OP_TRUE = 'bcrt1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqseac85'
class AddressType(enum.Enum):
bech32 = 'bech32'
p2sh_segwit = 'p2sh-segwit'
legacy = 'legacy' # P2PKH
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = b.hex()
str = chr(version).encode('latin-1').hex() + str
checksum = hash256(hex_str_to_bytes(str)).hex()
str += checksum[:8]
value = int('0x' + str, 0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
def base58_to_byte(s):
"""Converts a base58-encoded string to its data and version.
Throws if the base58 checksum is invalid."""
if not s:
return b''
n = 0
for c in s:
n *= 58
assert c in chars
digit = chars.index(c)
n += digit
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = n.to_bytes((n.bit_length() + 7) // 8, 'big')
pad = 0
for c in s:
if c == chars[0]:
pad += 1
else:
break
res = b'\x00' * pad + res
# Assert if the checksum is invalid
assert_equal(hash256(res[:-4])[:4], res[-4:])
return res[1:-4], int(res[0])
def keyhash_to_p2pkh(hash, main=False):
assert len(hash) == 20
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main=False):
assert len(hash) == 20
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main=False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main=False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main=False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main=False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return encode_segwit_address("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main=False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main=False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main=False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert False
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert False
class TestFrameworkScript(unittest.TestCase):
def test_base58encodedecode(self):
def check_base58(data, version):
self.assertEqual(base58_to_byte(byte_to_base58(data, version)), (data, version))
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 111)
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 111)
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 0)
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 0)
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
| mit |
ArcherSys/ArcherSys | Lib/site-packages/pip/compat/ordereddict.py | 141 | 4110 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# flake8: noqa
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| mit |
michhar/flask-webapp-aml | env1/Lib/site-packages/setuptools/package_index.py | 95 | 39490 | """PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
import itertools
from functools import wraps
try:
from urllib.parse import splituser
except ImportError:
from urllib2 import splituser
from setuptools.extern import six
from setuptools.extern.six.moves import urllib, http_client, configparser, map
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
parts = urllib.parse.urlparse(url)
scheme, server, path, parameters, query, fragment = parts
base = urllib.parse.unquote(path.split('/')[-1])
if server=='sourceforge.net' and base=='download': # XXX Yuck
base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match('py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
)
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urllib.parse.urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else: self.opener = urllib.request.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, urllib.error.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urllib.parse.urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
dirs = filter(os.path.isdir, search_path)
egg_links = (
(path, entry)
for path in dirs
for entry in os.listdir(path)
if entry.endswith('.egg-link')
)
list(itertools.starmap(self.scan_egg_link, egg_links))
def scan_egg_link(self, path, entry):
with open(os.path.join(path, entry)) as raw_lines:
# filter non-empty lines
lines = list(filter(None, map(str.strip, raw_lines)))
if len(lines) != 2:
# format is not recognized; punt
return
egg_path, setup_path = lines
for dist in find_distributions(os.path.join(path, egg_path)):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=self.download(dist.location, tmpdir))
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, info = None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, urllib.error.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename,'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp: fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, http_client.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib.error.HTTPError as v:
return v
except urllib.error.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except http_client.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except http_client.HTTPException as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme=='file':
return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/',1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':',1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username="+auth
netloc = host
parts = scheme, netloc, url, p, q, f
url = urllib.parse.urlunparse(parts)
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#',1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return six.unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = six.moves.html_entities.name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = urllib.parse.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(configparser.RawConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
configparser.RawConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib.request.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urllib.parse.urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise http_client.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)' % info)
if auth:
auth = "Basic " + _encode_auth(auth)
parts = scheme, host, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2==scheme and h2==host:
parts = s2, netloc, path2, param2, query2, frag2
fp.url = urllib.parse.urlunparse(parts)
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = ("<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream)
| mit |
jamesr66a/cryptkernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
blooparksystems/odoo | addons/website_sale/tests/test_website_sale_pricelist.py | 6 | 2627 | from openerp.tests.common import TransactionCase
from mock import patch
class TestWebsitePriceList(TransactionCase):
# Mock nedded because request.session doesn't exist during test
def _get_pricelist_available(self, cr, uid, show_visible=False, context=None):
return self.get_pl(context.get('show'), context.get('current_pl'), context.get('country'))
def setUp(self):
super(TestWebsitePriceList, self).setUp()
self.website = self.registry('website').browse(self.cr, self.uid, 1)
self.patcher = patch('openerp.addons.website_sale.models.sale_order.website.get_pricelist_available', wraps=self._get_pricelist_available)
self.mock_get_pricelist_available = self.patcher.start()
def get_pl(self, show, current_pl, country):
pls = self.website._get_pl(
country,
show,
self.website.pricelist_id.id,
current_pl,
self.website.website_pricelist_ids
)
return pls
def test_get_pricelist_available_show(self):
show = True
current_pl = False
country_list = {
False: 2,
'BE': 2,
'IT': 1,
'US': 1,
'AF': 2
}
for country, result in country_list.items():
pls = self.get_pl(show, current_pl, country)
self.assertEquals(len(pls), result)
def test_get_pricelist_available_not_show(self):
show = False
current_pl = False
country_list = {
False: 3,
'BE': 3,
'IT': 1,
'US': 1,
'AF': 3
}
for country, result in country_list.items():
pls = self.get_pl(show, current_pl, country)
self.assertEquals(len(pls), result)
def test_get_pricelist_available_promocode(self):
christmas_pl = self.registry('ir.model.data').xmlid_to_res_id(self.cr, self.uid, 'website_sale.list_christmas')
context = {
'show': True,
'current_pl': christmas_pl,
}
country_list = {
False: True,
'BE': True,
'IT': False,
'US': False,
}
for country, result in country_list.items():
context['country'] = country
available = self.website.with_context(context).is_pricelist_available(christmas_pl)
if result:
self.assertTrue(available)
else:
self.assertFalse(available)
def tearDown(self):
self.patcher.stop()
super(TestWebsitePriceList, self).tearDown()
| gpl-3.0 |
kakkyz81/buntan | src/thrift/server/TNonblockingServer.py | 83 | 10950 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Implementation of non-blocking server.
The main idea of the server is reciving and sending requests
only from main thread.
It also makes thread pool server in tasks terms, not connections.
"""
import threading
import socket
import Queue
import select
import struct
import logging
from thrift.transport import TTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory
__all__ = ['TNonblockingServer']
class Worker(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logging.exception("Exception while processing request")
callback(False, '')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"Decorator which locks self.lock."
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"Decorator close object on socket.error."
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self.close()
return read
class Connection:
"""Basic class is represented connection.
It can be in state:
WAIT_LEN --- connection is reading request len.
WAIT_MESSAGE --- connection is reading request.
WAIT_PROCESS --- connection has just read whole request and
waits for call ready routine.
SEND_ANSWER --- connection is sending answer string (including length
of answer).
CLOSED --- socket was closed and connection should be deleted.
"""
def __init__(self, new_socket, wake_up):
self.socket = new_socket
self.socket.setblocking(False)
self.status = WAIT_LEN
self.len = 0
self.message = ''
self.lock = threading.Lock()
self.wake_up = wake_up
def _read_len(self):
"""Reads length of request.
It's really paranoic routine and it may be replaced by
self.socket.recv(4)."""
read = self.socket.recv(4 - len(self.message))
if len(read) == 0:
# if we read 0 bytes and self.message is empty, it means client close
# connection
if len(self.message) != 0:
logging.error("can't read frame size from socket")
self.close()
return
self.message += read
if len(self.message) == 4:
self.len, = struct.unpack('!i', self.message)
if self.len < 0:
logging.error("negative frame size, it seems client"\
" doesn't use FramedTransport")
self.close()
elif self.len == 0:
logging.error("empty frame, it's really strange")
self.close()
else:
self.message = ''
self.status = WAIT_MESSAGE
@socket_exception
def read(self):
"""Reads data from stream and switch state."""
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
if self.status == WAIT_LEN:
self._read_len()
# go back to the main loop here for simplicity instead of
# falling through, even though there is a good chance that
# the message is already available
elif self.status == WAIT_MESSAGE:
read = self.socket.recv(self.len - len(self.message))
if len(read) == 0:
logging.error("can't read frame from socket (get %d of %d bytes)" %
(len(self.message), self.len))
self.close()
return
self.message += read
if len(self.message) == self.len:
self.status = WAIT_PROCESS
@socket_exception
def write(self):
"""Writes data from socket and switch state."""
assert self.status == SEND_ANSWER
sent = self.socket.send(self.message)
if sent == len(self.message):
self.status = WAIT_LEN
self.message = ''
self.len = 0
else:
self.message = self.message[sent:]
@locked
def ready(self, all_ok, message):
"""Callback function for switching state and waking up main thread.
This function is the only function witch can be called asynchronous.
The ready can switch Connection to three states:
WAIT_LEN if request was oneway.
SEND_ANSWER if request was processed in normal way.
CLOSED if request throws unexpected exception.
The one wakes up main thread.
"""
assert self.status == WAIT_PROCESS
if not all_ok:
self.close()
self.wake_up()
return
self.len = ''
if len(message) == 0:
# it was a oneway request, do not write answer
self.message = ''
self.status = WAIT_LEN
else:
self.message = struct.pack('!i', len(message)) + message
self.status = SEND_ANSWER
self.wake_up()
@locked
def is_writeable(self):
"Returns True if connection should be added to write list of select."
return self.status == SEND_ANSWER
# it's not necessary, but...
@locked
def is_readable(self):
"Returns True if connection should be added to read list of select."
return self.status in (WAIT_LEN, WAIT_MESSAGE)
@locked
def is_closed(self):
"Returns True if connection is closed."
return self.status == CLOSED
def fileno(self):
"Returns the file descriptor of the associated socket."
return self.socket.fileno()
def close(self):
"Closes connection"
self.status = CLOSED
self.socket.close()
class TNonblockingServer:
"""Non-blocking server."""
def __init__(self, processor, lsocket, inputProtocolFactory=None,
outputProtocolFactory=None, threads=10):
self.processor = processor
self.socket = lsocket
self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
self.out_protocol = outputProtocolFactory or self.in_protocol
self.threads = int(threads)
self.clients = {}
self.tasks = Queue.Queue()
self._read, self._write = socket.socketpair()
self.prepared = False
def setNumThreads(self, num):
"""Set the number of worker threads that should be created."""
# implement ThreadPool interface
assert not self.prepared, "You can't change number of threads for working server"
self.threads = num
def prepare(self):
"""Prepares server for serve requests."""
self.socket.listen()
for _ in xrange(self.threads):
thread = Worker(self.tasks)
thread.setDaemon(True)
thread.start()
self.prepared = True
def wake_up(self):
"""Wake up main thread.
The server usualy waits in select call in we should terminate one.
The simplest way is using socketpair.
Select always wait to read from the first socket of socketpair.
In this case, we can just write anything to the second socket from
socketpair."""
self._write.send('1')
def _select(self):
"""Does select on open connections."""
readable = [self.socket.handle.fileno(), self._read.fileno()]
writable = []
for i, connection in self.clients.items():
if connection.is_readable():
readable.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
return select.select(readable, writable, readable)
def handle(self):
"""Handle requests.
WARNING! You must call prepare BEFORE calling handle.
"""
assert self.prepared, "You have to call prepare before handle"
rset, wset, xset = self._select()
for readable in rset:
if readable == self._read.fileno():
# don't care i just need to clean readable flag
self._read.recv(1024)
elif readable == self.socket.handle.fileno():
client = self.socket.accept().handle
self.clients[client.fileno()] = Connection(client, self.wake_up)
else:
connection = self.clients[readable]
connection.read()
if connection.status == WAIT_PROCESS:
itransport = TTransport.TMemoryBuffer(connection.message)
otransport = TTransport.TMemoryBuffer()
iprot = self.in_protocol.getProtocol(itransport)
oprot = self.out_protocol.getProtocol(otransport)
self.tasks.put([self.processor, iprot, oprot,
otransport, connection.ready])
for writeable in wset:
self.clients[writeable].write()
for oob in xset:
self.clients[oob].close()
del self.clients[oob]
def close(self):
"""Closes the server."""
for _ in xrange(self.threads):
self.tasks.put([None, None, None, None, None])
self.socket.close()
self.prepared = False
def serve(self):
"""Serve forever."""
self.prepare()
while True:
self.handle()
| mit |
alfredoavanzosc/odoo-addons | base_user_signature_logo/__openerp__.py | 4 | 1555 | # coding: utf-8
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2010 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: Luis Torres (luis_t@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Add signature logo in user",
"version": "8.0.1.6.0",
"author": "Vauxoo",
"category": "Vauxoo",
"website": "http://www.vauxoo.com/",
"license": "AGPL-3",
"depends": [
"base",
],
"data": [
"views/res_users_view.xml",
],
"installable": True,
}
| agpl-3.0 |
supercollider/supercollider | editors/sced/scedwin/py/__init__.py | 44 | 1659 | # sced (SuperCollider mode for gedit)
#
# Copyright 2012 Jakob Leben
# Copyright 2009 Artem Popov and other contributors (see AUTHORS)
#
# sced is free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gedit
import gtk
from WindowHelper import WindowHelper
import Settings
from ConfigurationDialog import ConfigurationDialog
class ScedPlugin(gedit.Plugin):
def __init__(self):
gedit.Plugin.__init__(self)
#self.settings = Settings()
self.__instances = {}
self.__settings = None
def activate(self, window):
self.__instances[window] = WindowHelper(self, window)
def deactivate(self, window):
self.__instances[window].deactivate()
del self.__instances[window]
def update_ui(self, window):
self.__instances[window].update_ui()
def create_configure_dialog(self):
dialog = ConfigurationDialog(self)
return dialog
def settings(self):
if self.__settings is None:
self.__settings = Settings.load()
return self.__settings
| gpl-3.0 |
marcuskelly/recover | Lib/site-packages/alembic/util/pyfiles.py | 25 | 3158 | import sys
import os
import re
from .compat import load_module_py, load_module_pyc
from mako.template import Template
from mako import exceptions
import tempfile
from .exc import CommandError
def template_to_file(template_file, dest, output_encoding, **kw):
template = Template(filename=template_file)
try:
output = template.render_unicode(**kw).encode(output_encoding)
except:
with tempfile.NamedTemporaryFile(suffix='.txt', delete=False) as ntf:
ntf.write(
exceptions.text_error_template().
render_unicode().encode(output_encoding))
fname = ntf.name
raise CommandError(
"Template rendering failed; see %s for a "
"template-oriented traceback." % fname)
else:
with open(dest, 'wb') as f:
f.write(output)
def coerce_resource_to_filename(fname):
"""Interpret a filename as either a filesystem location or as a package
resource.
Names that are non absolute paths and contain a colon
are interpreted as resources and coerced to a file location.
"""
if not os.path.isabs(fname) and ":" in fname:
import pkg_resources
fname = pkg_resources.resource_filename(*fname.split(':'))
return fname
def simple_pyc_file_from_path(path):
"""Given a python source path, return the so-called
"sourceless" .pyc or .pyo path.
This just a .pyc or .pyo file where the .py file would be.
Even with PEP-3147, which normally puts .pyc/.pyo files in __pycache__,
this use case remains supported as a so-called "sourceless module import".
"""
if sys.flags.optimize:
return path + "o" # e.g. .pyo
else:
return path + "c" # e.g. .pyc
def pyc_file_from_path(path):
"""Given a python source path, locate the .pyc.
See http://www.python.org/dev/peps/pep-3147/
#detecting-pep-3147-availability
http://www.python.org/dev/peps/pep-3147/#file-extension-checks
"""
import imp
has3147 = hasattr(imp, 'get_tag')
if has3147:
return imp.cache_from_source(path)
else:
return simple_pyc_file_from_path(path)
def edit(path):
"""Given a source path, run the EDITOR for it"""
import editor
try:
editor.edit(path)
except Exception as exc:
raise CommandError('Error executing editor (%s)' % (exc,))
def load_python_file(dir_, filename):
"""Load a file from the given path as a Python module."""
module_id = re.sub(r'\W', "_", filename)
path = os.path.join(dir_, filename)
_, ext = os.path.splitext(filename)
if ext == ".py":
if os.path.exists(path):
module = load_module_py(module_id, path)
elif os.path.exists(simple_pyc_file_from_path(path)):
# look for sourceless load
module = load_module_pyc(
module_id, simple_pyc_file_from_path(path))
else:
raise ImportError("Can't find Python file %s" % path)
elif ext in (".pyc", ".pyo"):
module = load_module_pyc(module_id, path)
del sys.modules[module_id]
return module
| bsd-2-clause |
nicobustillos/odoo | openerp/osv/orm.py | 126 | 6167 | import simplejson
from lxml import etree
from ..exceptions import except_orm
from ..models import (
MetaModel,
BaseModel,
Model, TransientModel, AbstractModel,
MAGIC_COLUMNS,
LOG_ACCESS_COLUMNS,
)
# extra definitions for backward compatibility
browse_record_list = BaseModel
class browse_record(object):
""" Pseudo-class for testing record instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and len(inst) <= 1
class browse_null(object):
""" Pseudo-class for testing null instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and not inst
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in (field.get("states",{})).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['tree_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``tree_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``tree_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, basestring):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', '{}')
test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
test_modifiers('<field name="a" required="1"/>', '{"required": true}')
test_modifiers('<field name="a" invisible="0"/>', '{}')
test_modifiers('<field name="a" readonly="0"/>', '{}')
test_modifiers('<field name="a" required="0"/>', '{}')
test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, '{}')
test_modifiers({"invisible": True}, '{"invisible": true}')
test_modifiers({"invisible": False}, '{}')
| agpl-3.0 |
mrquim/mrquimrepo | plugin.video.salts/scrapers/hdmoviefree_scraper.py | 7 | 5729 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urlparse
import re
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
BASE_URL = 'https://www.hdmoviefree.org'
SERVER_URL = '/ajax/loadsv/%s'
EP_URL = '/ajax/loadep/%s'
XHR = {'X-Requested-With': 'XMLHttpRequest'}
Q_MAP = {'HD1080': QUALITIES.HD1080, 'HD720': QUALITIES.HD720, 'SD480': QUALITIES.HIGH, 'CAMRIP': QUALITIES.LOW}
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'HDMovieFree'
def get_sources(self, video):
source_url = self.get_url(video)
sources = []
if not source_url or source_url == FORCE_NO_MATCH: return sources
page_url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=8)
for attrs, _content in dom_parser2.parse_dom(html, 'img', req=['data-id', 'data-name']):
film_id, data_name = attrs['data-id'], attrs['data-name']
data = {'id': film_id, 'n': data_name}
server_url = scraper_utils.urljoin(self.base_url, SERVER_URL)
server_url = server_url % (film_id)
headers = {'Referer': page_url}
headers.update(XHR)
html = self._http_get(server_url, data=data, headers=headers, cache_limit=.5)
for attrs, _content in dom_parser2.parse_dom(html, 'a', req='data-id'):
data = {'epid': attrs['data-id']}
ep_url = scraper_utils.urljoin(self.base_url, EP_URL)
ep_url = ep_url % (attrs['data-id'])
headers = {'Referer': page_url}
headers.update(XHR)
html = self._http_get(ep_url, data=data, headers=headers, cache_limit=.5)
js_data = scraper_utils.parse_json(html, ep_url)
try:
links = [r.attrs['src'] for r in dom_parser2.parse_dom(js_data['link']['embed'], 'iframe', req='src')]
except:
try: links = js_data['link']['l']
except: links = []
try: heights = js_data['link']['q']
except: heights = []
for stream_url, height in map(None, links, heights):
match = re.search('movie_url=(.*)', stream_url)
if match:
stream_url = match.group(1)
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': page_url})
direct = True
else:
host = urlparse.urlparse(stream_url).hostname
if height:
quality = scraper_utils.height_get_quality(height)
else:
quality = QUALITIES.HD720
direct = False
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
sources.append(source)
return sources
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/search/%s.html')
search_url = search_url % (scraper_utils.to_slug(title))
html = self._http_get(search_url, cache_limit=8)
for _attrs, item in dom_parser2.parse_dom(html, 'div', {'class': 'slideposter'}):
match_url = dom_parser2.parse_dom(item, 'a', req='href')
match_title_year = dom_parser2.parse_dom(item, 'img', req='alt')
if match_url and match_title_year:
match_url = match_url[0].attrs['href']
match_title_year = match_title_year[0].attrs['alt']
match_title, match_year = scraper_utils.extra_year(match_title_year)
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
return results
def __to_slug(self, title):
slug = title.lower()
slug = re.sub('[^A-Za-z0-9 -]', ' ', slug)
slug = re.sub('\s\s+', ' ', slug)
slug = re.sub(' ', '-', slug)
return slug
| gpl-2.0 |
ebrelsford/django-moderation | src/moderation/helpers.py | 1 | 1143 | from moderation.register import RegistrationError
def automoderate(instance, user):
'''
Auto moderates given model instance on user. Returns moderation status:
0 - Rejected
1 - Approved
'''
try:
status = instance.moderated_object.automoderate(user)
except AttributeError:
msg = u"%s has been registered with Moderation." % instance.__class__
raise RegistrationError(msg)
return status
def import_moderator(app):
'''
Import moderator module and register all models it contains with moderation
'''
from django.utils.importlib import import_module
import imp
import sys
try:
app_path = import_module(app).__path__
except AttributeError:
return None
try:
imp.find_module('moderator', app_path)
except ImportError:
return None
module = import_module("%s.moderator" % app)
return module
def auto_discover():
'''
Auto register all apps that have module moderator with moderation
'''
from django.conf import settings
for app in settings.INSTALLED_APPS:
import_moderator(app)
| bsd-3-clause |
resin-io/linux-sunxi | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
baidu/Paddle | python/paddle/fluid/tests/unittests/test_split_op.py | 5 | 1451 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
class TestSplitOp(OpTest):
def setUp(self):
self._set_op_type()
axis = 1
x = np.random.random((4, 5, 6)).astype('float32')
out = np.split(x, [2, 3], axis)
self.inputs = {'X': x}
self.attrs = {'axis': axis, 'sections': [2, 1, 2]}
self.outputs = {'Out': [('out%d' % i, out[i]) \
for i in range(len(out))]}
def _set_op_type(self):
self.op_type = "split"
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], ['out0', 'out1', 'out2'])
class TestSplitByrefOp(OpTest):
def _set_op_type(self):
self.op_type = "split_byref"
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jonathanverner/brython | www/src/Lib/test/test_frozen.py | 32 | 2877 | # Test the frozen module defined in frozen.c.
from test.support import captured_stdout, run_unittest
import unittest
import sys
class FrozenTests(unittest.TestCase):
module_attrs = frozenset(['__builtins__', '__cached__', '__doc__',
'__loader__', '__name__',
'__package__'])
package_attrs = frozenset(list(module_attrs) + ['__path__'])
def test_frozen(self):
with captured_stdout() as stdout:
try:
import __hello__
except ImportError as x:
self.fail("import __hello__ failed:" + str(x))
self.assertEqual(__hello__.initialized, True)
expect = set(self.module_attrs)
expect.add('initialized')
self.assertEqual(set(dir(__hello__)), expect)
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
with captured_stdout() as stdout:
try:
import __phello__
except ImportError as x:
self.fail("import __phello__ failed:" + str(x))
self.assertEqual(__phello__.initialized, True)
expect = set(self.package_attrs)
expect.add('initialized')
if not "__phello__.spam" in sys.modules:
self.assertEqual(set(dir(__phello__)), expect)
else:
expect.add('spam')
self.assertEqual(set(dir(__phello__)), expect)
self.assertEqual(__phello__.__path__, [__phello__.__name__])
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
with captured_stdout() as stdout:
try:
import __phello__.spam
except ImportError as x:
self.fail("import __phello__.spam failed:" + str(x))
self.assertEqual(__phello__.spam.initialized, True)
spam_expect = set(self.module_attrs)
spam_expect.add('initialized')
self.assertEqual(set(dir(__phello__.spam)), spam_expect)
phello_expect = set(self.package_attrs)
phello_expect.add('initialized')
phello_expect.add('spam')
self.assertEqual(set(dir(__phello__)), phello_expect)
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
try:
import __phello__.foo
except ImportError:
pass
else:
self.fail("import __phello__.foo should have failed")
try:
import __phello__.foo
except ImportError:
pass
else:
self.fail("import __phello__.foo should have failed")
del sys.modules['__hello__']
del sys.modules['__phello__']
del sys.modules['__phello__.spam']
def test_main():
run_unittest(FrozenTests)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
juiceblender/cassandra | bin/cqlsh.py | 14 | 101911 | #!/bin/sh
# -*- mode: Python -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":"
# bash code here; finds a suitable python interpreter and execs this file.
# prefer unqualified "python" if suitable:
python -c 'import sys; sys.exit(not (0x020700b0 < sys.hexversion < 0x03000000))' 2>/dev/null \
&& exec python "$0" "$@"
for pyver in 2.7; do
which python$pyver > /dev/null 2>&1 && exec python$pyver "$0" "$@"
done
echo "No appropriate python interpreter found." >&2
exit 1
":"""
from __future__ import with_statement
import cmd
import codecs
import ConfigParser
import csv
import getpass
import optparse
import os
import platform
import sys
import traceback
import warnings
import webbrowser
from StringIO import StringIO
from contextlib import contextmanager
from glob import glob
from uuid import UUID
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
sys.exit("\nCQL Shell supports only Python 2.7\n")
# see CASSANDRA-10428
if platform.python_implementation().startswith('Jython'):
sys.exit("\nCQL Shell does not run on Jython\n")
UTF8 = 'utf-8'
CP65001 = 'cp65001' # Win utf-8 variant
description = "CQL Shell for Apache Cassandra"
version = "5.0.1"
readline = None
try:
# check if tty first, cause readline doesn't check, and only cares
# about $TERM. we don't want the funky escape code stuff to be
# output if not a tty.
if sys.stdin.isatty():
import readline
except ImportError:
pass
CQL_LIB_PREFIX = 'cassandra-driver-internal-only-'
CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/cql3/CQL-3.2.html'
# default location of local CQL.html
if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'):
# default location of local CQL.html
CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html'
elif os.path.exists('/usr/share/doc/cassandra/CQL.html'):
# fallback to package file
CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html'
else:
# fallback to online version
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
# On Linux, the Python webbrowser module uses the 'xdg-open' executable
# to open a file/URL. But that only works, if the current session has been
# opened from _within_ a desktop environment. I.e. 'xdg-open' will fail,
# if the session's been opened via ssh to a remote box.
#
# Use 'python' to get some information about the detected browsers.
# >>> import webbrowser
# >>> webbrowser._tryorder
# >>> webbrowser._browser
#
if len(webbrowser._tryorder) == 0:
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '':
# only on Linux (some OS with xdg-open)
webbrowser._tryorder.remove('xdg-open')
webbrowser._tryorder.append('xdg-open')
# use bundled lib for python-cql if available. if there
# is a ../lib dir, use bundled libs there preferentially.
ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
myplatform = platform.system()
is_win = myplatform == 'Windows'
# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
if is_win and sys.version_info < (3, 3):
codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
if myplatform == 'Linux':
ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
if os.environ.get('CQLSH_NO_BUNDLED', ''):
ZIPLIB_DIRS = ()
def find_zip(libprefix):
for ziplibdir in ZIPLIB_DIRS:
zips = glob(os.path.join(ziplibdir, libprefix + '*.zip'))
if zips:
return max(zips) # probably the highest version, if multiple
cql_zip = find_zip(CQL_LIB_PREFIX)
if cql_zip:
ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
third_parties = ('futures-', 'six-')
for lib in third_parties:
lib_zip = find_zip(lib)
if lib_zip:
sys.path.insert(0, lib_zip)
warnings.filterwarnings("ignore", r".*blist.*")
try:
import cassandra
except ImportError, e:
sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n"
'You might try "pip install cassandra-driver".\n\n'
'Python: %s\n'
'Module load path: %r\n\n'
'Error: %s\n' % (sys.executable, sys.path, e))
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.cqltypes import cql_typename
from cassandra.marshal import int64_unpack
from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
TableMetadata, protect_name, protect_names)
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
from cassandra.util import datetime_from_timestamp
# cqlsh should run correctly when run out of a Cassandra source tree,
# out of an unpacked Cassandra tarball, and after a proper package install.
cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib')
if os.path.isdir(cqlshlibdir):
sys.path.insert(0, cqlshlibdir)
from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling, cqlshhandling
from cqlshlib.copyutil import ExportTask, ImportTask
from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
RED, WHITE, FormattedValue, colorme)
from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
DEFAULT_TIMESTAMP_FORMAT, CqlType, DateTimeFormat,
format_by_type, formatter_for)
from cqlshlib.tracing import print_trace, print_trace_session
from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9042
DEFAULT_SSL = False
DEFAULT_CONNECT_TIMEOUT_SECONDS = 5
DEFAULT_REQUEST_TIMEOUT_SECONDS = 10
DEFAULT_FLOAT_PRECISION = 5
DEFAULT_DOUBLE_PRECISION = 5
DEFAULT_MAX_TRACE_WAIT = 10
if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__:
DEFAULT_COMPLETEKEY = '\t'
else:
DEFAULT_COMPLETEKEY = 'tab'
cqldocs = None
cqlruleset = None
epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These
defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a
host (and optional port number) are given on the command line, they take
precedence over any defaults.""" % globals()
parser = optparse.OptionParser(description=description, epilog=epilog,
usage="Usage: %prog [options] [host [port]]",
version='cqlsh ' + version)
parser.add_option("-C", "--color", action='store_true', dest='color',
help='Always use color output')
parser.add_option("--no-color", action='store_false', dest='color',
help='Never use color output')
parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be:
- one of the supported browsers in https://docs.python.org/2/library/webbrowser.html.
- browser path followed by %s, example: /usr/bin/google-chrome-stable %s""")
parser.add_option('--ssl', action='store_true', help='Use SSL', default=False)
parser.add_option("-u", "--username", help="Authenticate as user.")
parser.add_option("-p", "--password", help="Authenticate using password.")
parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.')
parser.add_option("-f", "--file", help="Execute commands from FILE, then exit")
parser.add_option('--debug', action='store_true',
help='Show additional debugging information')
parser.add_option("--encoding", help="Specify a non-default encoding for output." +
" (Default: %s)" % (UTF8,))
parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
parser.add_option('--cqlversion', default=None,
help='Specify a particular CQL version, '
'by default the highest version supported by the server will be used.'
' Examples: "3.0.3", "3.1.0"')
parser.add_option("--protocol-version", type="int", default=None,
help='Specify a specific protcol version otherwise the client will default and downgrade as necessary')
parser.add_option("-e", "--execute", help='Execute the statement and quit.')
parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout',
help='Specify the connection timeout in seconds (default: %default seconds).')
parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout',
help='Specify the default request timeout in seconds (default: %default seconds).')
parser.add_option("-t", "--tty", action='store_true', dest='tty',
help='Force tty mode (command prompt).')
optvalues = optparse.Values()
(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
# BEGIN history/config definition
HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
if hasattr(options, 'cqlshrc'):
CONFIG_FILE = options.cqlshrc
if not os.path.exists(CONFIG_FILE):
print '\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR)
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
else:
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
if not os.path.exists(HISTORY_DIR):
try:
os.mkdir(HISTORY_DIR)
except OSError:
print '\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR
OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
if os.path.exists(OLD_CONFIG_FILE):
if os.path.exists(CONFIG_FILE):
print '\nWarning: cqlshrc config files were found at both the old location (%s) and \
the new location (%s), the old config file will not be migrated to the new \
location, and the new location will be used for now. You should manually \
consolidate the config files at the new location and remove the old file.' \
% (OLD_CONFIG_FILE, CONFIG_FILE)
else:
os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
if os.path.exists(OLD_HISTORY):
os.rename(OLD_HISTORY, HISTORY)
# END history/config definition
CQL_ERRORS = (
cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut,
cassandra.cluster.NoHostAvailable,
cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException,
cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable
)
debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES')
class NoKeyspaceError(Exception):
pass
class KeyspaceNotFound(Exception):
pass
class ColumnFamilyNotFound(Exception):
pass
class IndexNotFound(Exception):
pass
class MaterializedViewNotFound(Exception):
pass
class ObjectNotFound(Exception):
pass
class VersionNotSupported(Exception):
pass
class UserTypeNotFound(Exception):
pass
class FunctionNotFound(Exception):
pass
class AggregateNotFound(Exception):
pass
class DecodeError(Exception):
verb = 'decode'
def __init__(self, thebytes, err, colname=None):
self.thebytes = thebytes
self.err = err
self.colname = colname
def __str__(self):
return str(self.thebytes)
def message(self):
what = 'value %r' % (self.thebytes,)
if self.colname is not None:
what = 'value %r (for column %r)' % (self.thebytes, self.colname)
return 'Failed to %s %s : %s' \
% (self.verb, what, self.err)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.message())
class FormatError(DecodeError):
verb = 'format'
def full_cql_version(ver):
while ver.count('.') < 2:
ver += '.0'
ver_parts = ver.split('-', 1) + ['']
vertuple = tuple(map(int, ver_parts[0].split('.')) + [ver_parts[1]])
return ver, vertuple
def format_value(val, cqltype, encoding, addcolor=False, date_time_format=None,
float_precision=None, colormap=None, nullval=None):
if isinstance(val, DecodeError):
if addcolor:
return colorme(repr(val.thebytes), colormap, 'error')
else:
return FormattedValue(repr(val.thebytes))
return format_by_type(val, cqltype=cqltype, encoding=encoding, colormap=colormap,
addcolor=addcolor, nullval=nullval, date_time_format=date_time_format,
float_precision=float_precision)
def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None):
if file is None:
file = sys.stderr
try:
file.write(warnings.formatwarning(message, category, filename, lineno, line=''))
except IOError:
pass
warnings.showwarning = show_warning_without_quoting_line
warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure)
def insert_driver_hooks():
class DateOverFlowWarning(RuntimeWarning):
pass
# Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = int64_unpack(byts)
try:
return datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. "
"Timestamps are displayed in milliseconds from epoch."))
return timestamp_ms
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
if hasattr(cassandra, 'deserializers'):
del cassandra.deserializers.DesDateType
# Return cassandra.cqltypes.EMPTY instead of None for empty values
cassandra.cqltypes.CassandraType.support_empty_values = True
class FrozenType(cassandra.cqltypes._ParameterizedType):
"""
Needed until the bundled python driver adds FrozenType.
"""
typename = "frozen"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
class Shell(cmd.Cmd):
custom_prompt = os.getenv('CQLSH_PROMPT', '')
if custom_prompt is not '':
custom_prompt += "\n"
default_prompt = custom_prompt + "cqlsh> "
continue_prompt = " ... "
keyspace_prompt = custom_prompt + "cqlsh:%s> "
keyspace_continue_prompt = "%s ... "
show_line_nums = False
debug = False
stop = False
last_hist = None
shunted_query_out = None
use_paging = True
# TODO remove after virtual tables are added to connection metadata
virtual_keyspaces = None
default_page_size = 100
def __init__(self, hostname, port, color=False,
username=None, password=None, encoding=None, stdin=None, tty=True,
completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
cqlver=None, keyspace=None,
tracing_enabled=False, expand_enabled=False,
display_nanotime_format=DEFAULT_NANOTIME_FORMAT,
display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT,
display_date_format=DEFAULT_DATE_FORMAT,
display_float_precision=DEFAULT_FLOAT_PRECISION,
display_double_precision=DEFAULT_DOUBLE_PRECISION,
display_timezone=None,
max_trace_wait=DEFAULT_MAX_TRACE_WAIT,
ssl=False,
single_statement=None,
request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
protocol_version=None,
connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS,
allow_server_port_discovery=False):
cmd.Cmd.__init__(self, completekey=completekey)
self.hostname = hostname
self.port = port
self.auth_provider = None
if username:
if not password:
password = getpass.getpass()
self.auth_provider = PlainTextAuthProvider(username=username, password=password)
self.username = username
self.keyspace = keyspace
self.ssl = ssl
self.tracing_enabled = tracing_enabled
self.page_size = self.default_page_size
self.expand_enabled = expand_enabled
self.allow_server_port_discovery = allow_server_port_discovery
if use_conn:
self.conn = use_conn
else:
kwargs = {}
if protocol_version is not None:
kwargs['protocol_version'] = protocol_version
self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver,
auth_provider=self.auth_provider,
ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=connect_timeout,
connect_timeout=connect_timeout,
allow_server_port_discovery=allow_server_port_discovery,
**kwargs)
self.owns_connection = not use_conn
if keyspace:
self.session = self.conn.connect(keyspace)
else:
self.session = self.conn.connect()
if browser == "":
browser = None
self.browser = browser
self.color = color
self.display_nanotime_format = display_nanotime_format
self.display_timestamp_format = display_timestamp_format
self.display_date_format = display_date_format
self.display_float_precision = display_float_precision
self.display_double_precision = display_double_precision
self.display_timezone = display_timezone
self.session.default_timeout = request_timeout
self.session.row_factory = ordered_dict_factory
self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE
self.get_connection_versions()
self.set_expanded_cql_version(self.connection_versions['cql'])
self.current_keyspace = keyspace
self.max_trace_wait = max_trace_wait
self.session.max_trace_wait = max_trace_wait
self.tty = tty
self.encoding = encoding
self.check_windows_encoding()
self.output_codec = codecs.lookup(encoding)
self.statement = StringIO()
self.lineno = 1
self.in_comment = False
self.prompt = ''
if stdin is None:
stdin = sys.stdin
if tty:
self.reset_prompt()
self.report_connection()
print 'Use HELP for help.'
else:
self.show_line_nums = True
self.stdin = stdin
self.query_out = sys.stdout
self.consistency_level = cassandra.ConsistencyLevel.ONE
self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL
self.empty_lines = 0
self.statement_error = False
self.single_statement = single_statement
@property
def is_using_utf8(self):
# utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
def check_windows_encoding(self):
if is_win and os.name == 'nt' and self.tty and \
self.is_using_utf8 and sys.stdout.encoding != CP65001:
self.printerr("\nWARNING: console codepage must be set to cp65001 "
"to support {} encoding on Windows platforms.\n"
"If you experience encoding problems, change your console"
" codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
def set_expanded_cql_version(self, ver):
ver, vertuple = full_cql_version(ver)
self.cql_version = ver
self.cql_ver_tuple = vertuple
def cqlver_atleast(self, major, minor=0, patch=0):
return self.cql_ver_tuple[:3] >= (major, minor, patch)
def myformat_value(self, val, cqltype=None, **kwargs):
if isinstance(val, DecodeError):
self.decoding_errors.append(val)
try:
dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format,
date_format=self.display_date_format, nanotime_format=self.display_nanotime_format,
timezone=self.display_timezone)
precision = self.display_double_precision if cqltype is not None and cqltype.type_name == 'double' \
else self.display_float_precision
return format_value(val, cqltype=cqltype, encoding=self.output_codec.name,
addcolor=self.color, date_time_format=dtformats,
float_precision=precision, **kwargs)
except Exception, e:
err = FormatError(val, e)
self.decoding_errors.append(err)
return format_value(err, cqltype=cqltype, encoding=self.output_codec.name, addcolor=self.color)
def myformat_colname(self, name, table_meta=None):
column_colors = COLUMN_NAME_COLORS.copy()
# check column role and color appropriately
if table_meta:
if name in [col.name for col in table_meta.partition_key]:
column_colors.default_factory = lambda: RED
elif name in [col.name for col in table_meta.clustering_key]:
column_colors.default_factory = lambda: CYAN
elif name in table_meta.columns and table_meta.columns[name].is_static:
column_colors.default_factory = lambda: WHITE
return self.myformat_value(name, colormap=column_colors)
def report_connection(self):
self.show_host()
self.show_version()
def show_host(self):
print "Connected to %s at %s:%d." % \
(self.applycolor(self.get_cluster_name(), BLUE),
self.hostname,
self.port)
def show_version(self):
vers = self.connection_versions.copy()
vers['shver'] = version
# system.Versions['cql'] apparently does not reflect changes with
# set_cql_version.
vers['cql'] = self.cql_version
print "[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
'build': result['release_version'],
'protocol': result['native_protocol_version'],
'cql': result['cql_version'],
}
self.connection_versions = vers
def get_keyspace_names(self):
# TODO remove after virtual tables are added to connection metadata
if self.virtual_keyspaces is None:
self.init_virtual_keyspaces_meta()
return map(str, self.conn.metadata.keyspaces.keys() + self.virtual_keyspaces.keys())
def get_columnfamily_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).tables.keys())
def get_materialized_view_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).views.keys())
def get_index_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).indexes.keys())
def get_column_names(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
layout = self.get_table_meta(ksname, cfname)
return [unicode(col) for col in layout.columns]
def get_usertype_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return self.get_keyspace_meta(ksname).user_types.keys()
def get_usertype_layout(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
ks_meta = self.get_keyspace_meta(ksname)
try:
user_type = ks_meta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
return zip(user_type.field_names, user_type.field_types)
def get_userfunction_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).functions.values())
def get_useraggregate_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).aggregates.values())
def get_cluster_name(self):
return self.conn.metadata.cluster_name
def get_partitioner(self):
return self.conn.metadata.partitioner
def get_keyspace_meta(self, ksname):
if ksname in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ksname]
# TODO remove after virtual tables are added to connection metadata
if self.virtual_keyspaces is None:
self.init_virtual_keyspaces_meta()
if ksname in self.virtual_keyspaces:
return self.virtual_keyspaces[ksname]
raise KeyspaceNotFound('Keyspace %r not found.' % ksname)
# TODO remove after virtual tables are added to connection metadata
def init_virtual_keyspaces_meta(self):
self.virtual_keyspaces = {}
for vkeyspace in self.fetch_virtual_keyspaces():
self.virtual_keyspaces[vkeyspace.name] = vkeyspace
# TODO remove after virtual tables are added to connection metadata
def fetch_virtual_keyspaces(self):
keyspaces = []
result = self.session.execute('SELECT keyspace_name FROM system_virtual_schema.keyspaces;')
for row in result:
name = row['keyspace_name']
keyspace = KeyspaceMetadata(name, False, None, None)
tables = self.fetch_virtual_tables(name)
for table in tables:
keyspace.tables[table.name] = table
keyspaces.append(keyspace)
return keyspaces
# TODO remove after virtual tables are added to connection metadata
def fetch_virtual_tables(self, keyspace_name):
tables = []
result = self.session.execute("SELECT * FROM system_virtual_schema.tables WHERE keyspace_name = '{}';".format(keyspace_name))
for row in result:
name = row['table_name']
table = TableMetadata(keyspace_name, name)
self.fetch_virtual_columns(table)
tables.append(table)
return tables
# TODO remove after virtual tables are added to connection metadata
def fetch_virtual_columns(self, table):
result = self.session.execute("SELECT * FROM system_virtual_schema.columns WHERE keyspace_name = '{}' AND table_name = '{}';".format(table.keyspace_name, table.name))
partition_key_columns = []
clustering_columns = []
for row in result:
name = row['column_name']
cql_type = row['type']
kind = row['kind']
position = row['position']
is_static = kind == 'static'
is_reversed = row['clustering_order'] == 'desc'
column = ColumnMetadata(table, name, cql_type, is_static, is_reversed)
table.columns[column.name] = column
if kind == 'partition_key':
partition_key_columns.append((position, column))
elif kind == 'clustering':
clustering_columns.append((position, column))
partition_key_columns.sort(key=lambda t: t[0])
clustering_columns.sort(key=lambda t: t[0])
table.partition_key = map(lambda t: t[1], partition_key_columns)
table.clustering_key = map(lambda t: t[1], clustering_columns)
def get_keyspaces(self):
return self.conn.metadata.keyspaces.values()
def get_ring(self, ks):
self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True)
return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks]
def get_table_meta(self, ksname, tablename):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if tablename not in ksmeta.tables:
if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']:
self.get_fake_auth_table_meta(ksname, tablename)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
else:
return ksmeta.tables[tablename]
def get_fake_auth_table_meta(self, ksname, tablename):
# may be using external auth implementation so internal tables
# aren't actually defined in schema. In this case, we'll fake
# them up
if tablename == 'roles':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'roles')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType)
table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType)
elif tablename == 'role_permissions':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'role_permissions')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type)
table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
def get_index_meta(self, ksname, idxname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if idxname not in ksmeta.indexes:
raise IndexNotFound("Index %r not found" % idxname)
return ksmeta.indexes[idxname]
def get_view_meta(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if viewname not in ksmeta.views:
raise MaterializedViewNotFound("Materialized view %r not found" % viewname)
return ksmeta.views[viewname]
def get_object_meta(self, ks, name):
if name is None:
if ks and ks in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ks]
elif self.current_keyspace is None:
raise ObjectNotFound("%r not found in keyspaces" % (ks))
else:
name = ks
ks = self.current_keyspace
if ks is None:
ks = self.current_keyspace
ksmeta = self.get_keyspace_meta(ks)
if name in ksmeta.tables:
return ksmeta.tables[name]
elif name in ksmeta.indexes:
return ksmeta.indexes[name]
elif name in ksmeta.views:
return ksmeta.views[name]
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def get_usertypes_meta(self):
data = self.session.execute("select * from system.schema_usertypes")
if not data:
return cql3handling.UserTypesMeta({})
return cql3handling.UserTypesMeta.from_layout(data)
def get_trigger_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [trigger.name
for table in self.get_keyspace_meta(ksname).tables.values()
for trigger in table.triggers.values()]
def reset_statement(self):
self.reset_prompt()
self.statement.truncate(0)
self.empty_lines = 0
def reset_prompt(self):
if self.current_keyspace is None:
self.set_prompt(self.default_prompt, True)
else:
self.set_prompt(self.keyspace_prompt % self.current_keyspace, True)
def set_continue_prompt(self):
if self.empty_lines >= 3:
self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.")
self.empty_lines = 0
return
if self.current_keyspace is None:
self.set_prompt(self.continue_prompt)
else:
spaces = ' ' * len(str(self.current_keyspace))
self.set_prompt(self.keyspace_continue_prompt % spaces)
self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0
@contextmanager
def prepare_loop(self):
readline = None
if self.tty and self.completekey:
try:
import readline
except ImportError:
if is_win:
print "WARNING: pyreadline dependency missing. Install to enable tab completion."
pass
else:
old_completer = readline.get_completer()
readline.set_completer(self.complete)
if readline.__doc__ is not None and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '" + self.completekey + "' rl_complete")
readline.parse_and_bind("bind ^R em-inc-search-prev")
else:
readline.parse_and_bind(self.completekey + ": complete")
try:
yield
finally:
if readline is not None:
readline.set_completer(old_completer)
def get_input_line(self, prompt=''):
if self.tty:
try:
self.lastcmd = raw_input(prompt).decode(self.encoding)
except UnicodeDecodeError:
self.lastcmd = ''
traceback.print_exc()
self.check_windows_encoding()
line = self.lastcmd + '\n'
else:
self.lastcmd = self.stdin.readline()
line = self.lastcmd
if not len(line):
raise EOFError
self.lineno += 1
return line
def use_stdin_reader(self, until='', prompt=''):
until += '\n'
while True:
try:
newline = self.get_input_line(prompt=prompt)
except EOFError:
return
if newline == until:
return
yield newline
def cmdloop(self):
"""
Adapted from cmd.Cmd's version, because there is literally no way with
cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
input and an actual EOF.
"""
with self.prepare_loop():
while not self.stop:
try:
if self.single_statement:
line = self.single_statement
self.stop = True
else:
line = self.get_input_line(self.prompt)
self.statement.write(line)
if self.onecmd(self.statement.getvalue()):
self.reset_statement()
except EOFError:
self.handle_eof()
except CQL_ERRORS, cqlerr:
self.printerr(cqlerr.message.decode(encoding='utf-8'))
except KeyboardInterrupt:
self.reset_statement()
print
def onecmd(self, statementtext):
"""
Returns true if the statement is complete and was handled (meaning it
can be reset).
"""
try:
statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
except pylexotron.LexingError, e:
if self.show_line_nums:
self.printerr('Invalid syntax at char %d' % (e.charnum,))
else:
self.printerr('Invalid syntax at line %d, char %d'
% (e.linenum, e.charnum))
statementline = statementtext.split('\n')[e.linenum - 1]
self.printerr(' %s' % statementline)
self.printerr(' %s^' % (' ' * e.charnum))
return True
while statements and not statements[-1]:
statements = statements[:-1]
if not statements:
return True
if endtoken_escaped or statements[-1][-1][0] != 'endtoken':
self.set_continue_prompt()
return
for st in statements:
try:
self.handle_statement(st, statementtext)
except Exception, e:
if self.debug:
traceback.print_exc()
else:
self.printerr(e)
return True
def handle_eof(self):
if self.tty:
print
statement = self.statement.getvalue()
if statement.strip():
if not self.onecmd(statement):
self.printerr('Incomplete statement at end of file')
self.do_exit()
def handle_statement(self, tokens, srcstr):
# Concat multi-line statements and insert into history
if readline is not None:
nl_count = srcstr.count("\n")
new_hist = srcstr.replace("\n", " ").rstrip()
if nl_count > 1 and self.last_hist != new_hist:
readline.add_history(new_hist.encode(self.encoding))
self.last_hist = new_hist
cmdword = tokens[0][1]
if cmdword == '?':
cmdword = 'help'
custom_handler = getattr(self, 'do_' + cmdword.lower(), None)
if custom_handler:
parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr,
startsymbol='cqlshCommand')
if parsed and not parsed.remainder:
# successful complete parse
return custom_handler(parsed)
else:
return self.handle_parse_error(cmdword, tokens, parsed, srcstr)
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
def handle_parse_error(self, cmdword, tokens, parsed, srcstr):
if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate',
'create', 'drop', 'alter', 'grant', 'revoke',
'batch', 'list'):
# hey, maybe they know about some new syntax we don't. type
# assumptions won't work, but maybe the query will.
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
self.printerr('Improper %s command.' % cmdword)
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig()))
if success:
if ksname[0] == '"' and ksname[-1] == '"':
self.current_keyspace = self.cql_unprotect_name(ksname)
else:
self.current_keyspace = ksname.lower()
def do_select(self, parsed):
tracing_was_enabled = self.tracing_enabled
ksname = parsed.get_binding('ksname')
stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces')
self.tracing_enabled = self.tracing_enabled and not stop_tracing
statement = parsed.extract_orig()
self.perform_statement(statement)
self.tracing_enabled = tracing_was_enabled
def perform_statement(self, statement):
stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
success, future = self.perform_simple_statement(stmt)
if future:
if future.warnings:
self.print_warnings(future.warnings)
if self.tracing_enabled:
try:
for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level):
print_trace(self, trace)
except TraceUnavailable:
msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,)
self.writeresult(msg, color=RED)
for trace_id in future.get_query_trace_ids():
self.show_session(trace_id, partial_session=True)
except Exception, err:
self.printerr("Unable to fetch query trace: %s" % (str(err),))
return success
def parse_for_select_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname', None))
try:
return self.get_table_meta(ks, name)
except ColumnFamilyNotFound:
try:
return self.get_view_meta(ks, name)
except MaterializedViewNotFound:
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def parse_for_update_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
return self.get_table_meta(ks, cf)
def perform_simple_statement(self, statement):
if not statement:
return False, None
future = self.session.execute_async(statement, trace=self.tracing_enabled)
result = None
try:
result = future.result()
except CQL_ERRORS, err:
self.printerr(unicode(err.__class__.__name__) + u": " + err.message.decode(encoding='utf-8'))
except Exception:
import traceback
self.printerr(traceback.format_exc())
# Even if statement failed we try to refresh schema if not agreed (see CASSANDRA-9689)
if not future.is_schema_agreed:
try:
self.conn.refresh_schema_metadata(5) # will throw exception if there is a schema mismatch
except Exception:
self.printerr("Warning: schema version mismatch detected; check the schema versions of your "
"nodes in system.local and system.peers.")
self.conn.refresh_schema_metadata(-1)
if result is None:
return False, None
if statement.query_string[:6].lower() == 'select':
self.print_result(result, self.parse_for_select_meta(statement.query_string))
elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"):
self.print_result(result, self.get_table_meta('system_auth', 'roles'))
elif statement.query_string.lower().startswith("list"):
self.print_result(result, self.get_table_meta('system_auth', 'role_permissions'))
elif result:
# CAS INSERT/UPDATE
self.writeresult("")
self.print_static_result(result, self.parse_for_update_meta(statement.query_string))
self.flush_output()
return True, future
def print_result(self, result, table_meta):
self.decoding_errors = []
self.writeresult("")
if result.has_more_pages and self.tty:
num_rows = 0
while True:
if result.current_rows:
num_rows += len(result.current_rows)
self.print_static_result(result, table_meta)
if result.has_more_pages:
if self.shunted_query_out is None:
# Only pause when not capturing.
raw_input("---MORE---")
result.fetch_next_page()
else:
break
else:
num_rows = len(result.current_rows)
self.print_static_result(result, table_meta)
self.writeresult("(%d rows)" % num_rows)
if self.decoding_errors:
for err in self.decoding_errors[:2]:
self.writeresult(err.message(), color=RED)
if len(self.decoding_errors) > 2:
self.writeresult('%d more decoding errors suppressed.'
% (len(self.decoding_errors) - 2), color=RED)
def print_static_result(self, result, table_meta):
if not result.column_names and not table_meta:
return
column_names = result.column_names or table_meta.columns.keys()
formatted_names = [self.myformat_colname(name, table_meta) for name in column_names]
if not result.current_rows:
# print header only
self.print_formatted_result(formatted_names, None)
return
cql_types = []
if result.column_types:
ks_name = table_meta.keyspace_name if table_meta else self.current_keyspace
ks_meta = self.conn.metadata.keyspaces.get(ks_name, None)
cql_types = [CqlType(cql_typename(t), ks_meta) for t in result.column_types]
formatted_values = [map(self.myformat_value, [row[c] for c in column_names], cql_types) for row in result.current_rows]
if self.expand_enabled:
self.print_formatted_result_vertically(formatted_names, formatted_values)
else:
self.print_formatted_result(formatted_names, formatted_values)
def print_formatted_result(self, formatted_names, formatted_values):
# determine column widths
widths = [n.displaywidth for n in formatted_names]
if formatted_values is not None:
for fmtrow in formatted_values:
for num, col in enumerate(fmtrow):
widths[num] = max(widths[num], col.displaywidth)
# print header
header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths))
self.writeresult(' ' + header.rstrip())
self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths))
# stop if there are no rows
if formatted_values is None:
self.writeresult("")
return
# print row data
for row in formatted_values:
line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths))
self.writeresult(' ' + line)
self.writeresult("")
def print_formatted_result_vertically(self, formatted_names, formatted_values):
max_col_width = max([n.displaywidth for n in formatted_names])
max_val_width = max([n.displaywidth for row in formatted_values for n in row])
# for each row returned, list all the column-value pairs
for row_id, row in enumerate(formatted_values):
self.writeresult("@ Row %d" % (row_id + 1))
self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width]))
for field_id, field in enumerate(row):
column = formatted_names[field_id].ljust(max_col_width, color=self.color)
value = field.ljust(field.displaywidth, color=self.color)
self.writeresult(' ' + " | ".join([column, value]))
self.writeresult('')
def print_warnings(self, warnings):
if warnings is None or len(warnings) == 0:
return
self.writeresult('')
self.writeresult('Warnings :')
for warning in warnings:
self.writeresult(warning)
self.writeresult('')
def emptyline(self):
pass
def parseline(self, line):
# this shouldn't be needed
raise NotImplementedError
def complete(self, text, state):
if readline is None:
return
if state == 0:
try:
self.completion_matches = self.find_completions(text)
except Exception:
if debug_completion:
import traceback
traceback.print_exc()
else:
raise
try:
return self.completion_matches[state]
except IndexError:
return None
def find_completions(self, text):
curline = readline.get_line_buffer()
prevlines = self.statement.getvalue()
wholestmt = prevlines + curline
begidx = readline.get_begidx() + len(prevlines)
stuff_to_complete = wholestmt[:begidx]
return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self,
debug=debug_completion, startsymbol='cqlshCommand')
def set_prompt(self, prompt, prepend_user=False):
if prepend_user and self.username:
self.prompt = "%s@%s" % (self.username, prompt)
return
self.prompt = prompt
def cql_unprotect_name(self, namestr):
if namestr is None:
return
return cqlruleset.dequote_name(namestr)
def cql_unprotect_value(self, valstr):
if valstr is not None:
return cqlruleset.dequote_value(valstr)
def print_recreate_keyspace(self, ksdef, out):
out.write(ksdef.export_as_string())
out.write("\n")
def print_recreate_columnfamily(self, ksname, cfname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given table.
Writes output to the given out stream.
"""
out.write(self.get_table_meta(ksname, cfname).export_as_string())
out.write("\n")
def print_recreate_index(self, ksname, idxname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given index.
Writes output to the given out stream.
"""
out.write(self.get_index_meta(ksname, idxname).export_as_string())
out.write("\n")
def print_recreate_materialized_view(self, ksname, viewname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given materialized view.
Writes output to the given out stream.
"""
out.write(self.get_view_meta(ksname, viewname).export_as_string())
out.write("\n")
def print_recreate_object(self, ks, name, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given object (ks, table or index).
Writes output to the given out stream.
"""
out.write(self.get_object_meta(ks, name).export_as_string())
out.write("\n")
def describe_keyspaces(self):
print
cmd.Cmd.columnize(self, protect_names(self.get_keyspace_names()))
print
def describe_keyspace(self, ksname):
print
self.print_recreate_keyspace(self.get_keyspace_meta(ksname), sys.stdout)
print
def describe_columnfamily(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_columnfamily(ksname, cfname, sys.stdout)
print
def describe_index(self, ksname, idxname):
print
self.print_recreate_index(ksname, idxname, sys.stdout)
print
def describe_materialized_view(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_materialized_view(ksname, viewname, sys.stdout)
print
def describe_object(self, ks, name):
print
self.print_recreate_object(ks, name, sys.stdout)
print
def describe_columnfamilies(self, ksname):
print
if ksname is None:
for k in self.get_keyspaces():
name = protect_name(k.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(k.name)))
print
else:
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(ksname)))
print
def describe_functions(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
self._columnize_unicode(ksmeta.functions.keys())
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(ksmeta.functions.keys())
def describe_function(self, ksname, functionname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
functions = filter(lambda f: f.name == functionname, ksmeta.functions.values())
if len(functions) == 0:
raise FunctionNotFound("User defined function %r not found" % functionname)
print "\n\n".join(func.export_as_string() for func in functions)
print
def describe_aggregates(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
self._columnize_unicode(ksmeta.aggregates.keys())
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(ksmeta.aggregates.keys())
def describe_aggregate(self, ksname, aggregatename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
aggregates = filter(lambda f: f.name == aggregatename, ksmeta.aggregates.values())
if len(aggregates) == 0:
raise FunctionNotFound("User defined aggregate %r not found" % aggregatename)
print "\n\n".join(aggr.export_as_string() for aggr in aggregates)
print
def describe_usertypes(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
self._columnize_unicode(ksmeta.user_types.keys(), quote=True)
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(ksmeta.user_types.keys(), quote=True)
def describe_usertype(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
try:
usertype = ksmeta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
print usertype.export_as_string()
def _columnize_unicode(self, name_list, quote=False):
"""
Used when columnizing identifiers that may contain unicode
"""
names = [n.encode('utf-8') for n in name_list]
if quote:
names = protect_names(names)
cmd.Cmd.columnize(self, names)
print
def describe_cluster(self):
print '\nCluster: %s' % self.get_cluster_name()
p = trim_if_present(self.get_partitioner(), 'org.apache.cassandra.dht.')
print 'Partitioner: %s\n' % p
# TODO: snitch?
# snitch = trim_if_present(self.get_snitch(), 'org.apache.cassandra.locator.')
# print 'Snitch: %s\n' % snitch
if self.current_keyspace is not None and self.current_keyspace != 'system':
print "Range ownership:"
ring = self.get_ring(self.current_keyspace)
for entry in ring.items():
print ' %39s [%s]' % (str(entry[0].value), ', '.join([host.address for host in entry[1]]))
print
def describe_schema(self, include_system=False):
print
for k in self.get_keyspaces():
if include_system or k.name not in cql3handling.SYSTEM_KEYSPACES:
self.print_recreate_keyspace(k, sys.stdout)
print
def do_describe(self, parsed):
"""
DESCRIBE [cqlsh only]
(DESC may be used as a shorthand.)
Outputs information about the connected Cassandra cluster, or about
the data objects stored in the cluster. Use in one of the following ways:
DESCRIBE KEYSPACES
Output the names of all keyspaces.
DESCRIBE KEYSPACE [<keyspacename>]
Output CQL commands that could be used to recreate the given keyspace,
and the objects in it (such as tables, types, functions, etc.).
In some cases, as the CQL interface matures, there will be some metadata
about a keyspace that is not representable with CQL. That metadata will not be shown.
The '<keyspacename>' argument may be omitted, in which case the current
keyspace will be described.
DESCRIBE TABLES
Output the names of all tables in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TABLE [<keyspace>.]<tablename>
Output CQL commands that could be used to recreate the given table.
In some cases, as above, there may be table metadata which is not
representable and which will not be shown.
DESCRIBE INDEX <indexname>
Output the CQL command that could be used to recreate the given index.
In some cases, there may be index metadata which is not representable
and which will not be shown.
DESCRIBE MATERIALIZED VIEW <viewname>
Output the CQL command that could be used to recreate the given materialized view.
In some cases, there may be materialized view metadata which is not representable
and which will not be shown.
DESCRIBE CLUSTER
Output information about the connected Cassandra cluster, such as the
cluster name, and the partitioner and snitch in use. When you are
connected to a non-system keyspace, also shows endpoint-range
ownership information for the Cassandra ring.
DESCRIBE [FULL] SCHEMA
Output CQL commands that could be used to recreate the entire (non-system) schema.
Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace
k. Use DESCRIBE FULL SCHEMA to include the system keyspaces.
DESCRIBE TYPES
Output the names of all user-defined-types in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TYPE [<keyspace>.]<type>
Output the CQL command that could be used to recreate the given user-defined-type.
DESCRIBE FUNCTIONS
Output the names of all user-defined-functions in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE FUNCTION [<keyspace>.]<function>
Output the CQL command that could be used to recreate the given user-defined-function.
DESCRIBE AGGREGATES
Output the names of all user-defined-aggregates in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE AGGREGATE [<keyspace>.]<aggregate>
Output the CQL command that could be used to recreate the given user-defined-aggregate.
DESCRIBE <objname>
Output CQL commands that could be used to recreate the entire object schema,
where object can be either a keyspace or a table or an index or a materialized
view (in this order).
"""
what = parsed.matched[1][1].lower()
if what == 'functions':
self.describe_functions(self.current_keyspace)
elif what == 'function':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
functionname = self.cql_unprotect_name(parsed.get_binding('udfname'))
self.describe_function(ksname, functionname)
elif what == 'aggregates':
self.describe_aggregates(self.current_keyspace)
elif what == 'aggregate':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
aggregatename = self.cql_unprotect_name(parsed.get_binding('udaname'))
self.describe_aggregate(ksname, aggregatename)
elif what == 'keyspaces':
self.describe_keyspaces()
elif what == 'keyspace':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', ''))
if not ksname:
ksname = self.current_keyspace
if ksname is None:
self.printerr('Not in any keyspace.')
return
self.describe_keyspace(ksname)
elif what in ('columnfamily', 'table'):
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
self.describe_columnfamily(ks, cf)
elif what == 'index':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
idx = self.cql_unprotect_name(parsed.get_binding('idxname', None))
self.describe_index(ks, idx)
elif what == 'materialized' and parsed.matched[2][1].lower() == 'view':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
mv = self.cql_unprotect_name(parsed.get_binding('mvname'))
self.describe_materialized_view(ks, mv)
elif what in ('columnfamilies', 'tables'):
self.describe_columnfamilies(self.current_keyspace)
elif what == 'types':
self.describe_usertypes(self.current_keyspace)
elif what == 'type':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
ut = self.cql_unprotect_name(parsed.get_binding('utname'))
self.describe_usertype(ks, ut)
elif what == 'cluster':
self.describe_cluster()
elif what == 'schema':
self.describe_schema(False)
elif what == 'full' and parsed.matched[2][1].lower() == 'schema':
self.describe_schema(True)
elif what:
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname'))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('idxname', None))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('mvname', None))
self.describe_object(ks, name)
do_desc = do_describe
def do_copy(self, parsed):
r"""
COPY [cqlsh only]
COPY x FROM: Imports CSV data into a Cassandra table
COPY x TO: Exports data from a Cassandra table in CSV format.
COPY <table_name> [ ( column [, ...] ) ]
FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN )
[ WITH <option>='value' [AND ...] ];
File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv.
COPY <table_name> [ ( column [, ...] ) ]
TO ( '<filename>' | STDOUT )
[ WITH <option>='value' [AND ...] ];
Available common COPY options and defaults:
DELIMITER=',' - character that appears between records
QUOTE='"' - quoting character to be used to quote fields
ESCAPE='\' - character to appear before the QUOTE char when quoted
HEADER=false - whether to ignore the first line
NULL='' - string that represents a null value
DATETIMEFORMAT= - timestamp strftime format
'%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc
MAXATTEMPTS=5 - the maximum number of attempts per batch or range
REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds
DECIMALSEP='.' - the separator for decimal values
THOUSANDSSEP='' - the separator for thousands digit groups
BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false,
for example yes,no or 1,0
NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one
capped at 16
CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser
documentation) where you can specify WITH options under the following optional
sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table],
[copy-from:ks.table], where <ks> is your keyspace name and <table> is your table
name. Options are read from these sections, in the order specified
above, and command line options always override options in configuration files.
Depending on the COPY direction, only the relevant copy-from or copy-to sections
are used. If no configfile is specified then .cqlshrc is searched instead.
RATEFILE='' - an optional file where to print the output statistics
Available COPY FROM options and defaults:
CHUNKSIZE=5000 - the size of chunks passed to worker processes
INGESTRATE=100000 - an approximate ingest rate in rows per second
MINBATCHSIZE=10 - the minimum size of an import batch
MAXBATCHSIZE=20 - the maximum size of an import batch
MAXROWS=-1 - the maximum number of rows, -1 means no maximum
SKIPROWS=0 - the number of rows to skip
SKIPCOLS='' - a comma separated list of column names to skip
MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum
MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum
ERRFILE='' - a file where to store all rows that could not be imported, by default this is
import_ks_table.err where <ks> is your keyspace and <table> is your table name.
PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to
False if you don't mind shifting data parsing to the cluster. The cluster will also
have to compile every batch statement. For large and oversized clusters
this will result in a faster import but for smaller clusters it may generate
timeouts.
TTL=3600 - the time to live in seconds, by default data will not expire
Available COPY TO options and defaults:
ENCODING='utf8' - encoding for CSV output
PAGESIZE='1000' - the page size for fetching results
PAGETIMEOUT=10 - the page timeout in seconds for fetching results
BEGINTOKEN='' - the minimum token string to consider when exporting data
ENDTOKEN='' - the maximum token string to consider when exporting data
MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel
MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines,
beyond this maximum the output file will be split into segments,
-1 means unlimited.
FLOATPRECISION=5 - the number of digits displayed after the decimal point for cql float values
DOUBLEPRECISION=12 - the number of digits displayed after the decimal point for cql double values
When entering CSV data on STDIN, you can use the sequence "\."
on a line by itself to end the data input.
"""
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
if ks is None:
ks = self.current_keyspace
if ks is None:
raise NoKeyspaceError("Not in any keyspace.")
table = self.cql_unprotect_name(parsed.get_binding('cfname'))
columns = parsed.get_binding('colnames', None)
if columns is not None:
columns = map(self.cql_unprotect_name, columns)
else:
# default to all known columns
columns = self.get_column_names(ks, table)
fname = parsed.get_binding('fname', None)
if fname is not None:
fname = self.cql_unprotect_value(fname)
copyoptnames = map(str.lower, parsed.get_binding('optnames', ()))
copyoptvals = map(self.cql_unprotect_value, parsed.get_binding('optvals', ()))
opts = dict(zip(copyoptnames, copyoptvals))
direction = parsed.get_binding('dir').upper()
if direction == 'FROM':
task = ImportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
elif direction == 'TO':
task = ExportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
else:
raise SyntaxError("Unknown direction %s" % direction)
task.run()
def do_show(self, parsed):
"""
SHOW [cqlsh only]
Displays information about the current cqlsh session. Can be called in
the following ways:
SHOW VERSION
Shows the version and build of the connected Cassandra instance, as
well as the version of the CQL spec that the connected Cassandra
instance understands.
SHOW HOST
Shows where cqlsh is currently connected.
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
self.get_connection_versions()
self.show_version()
elif showwhat == 'host':
self.show_host()
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
def do_source(self, parsed):
"""
SOURCE [cqlsh only]
Executes a file containing CQL statements. Gives the output for each
statement in turn, if any, or any errors that occur along the way.
Errors do NOT abort execution of the CQL source file.
Usage:
SOURCE '<file>';
That is, the path to the file to be executed must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
See also the --file option to cqlsh.
"""
fname = parsed.get_binding('fname')
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
encoding, bom_size = get_file_encoding_bomsize(fname)
f = codecs.open(fname, 'r', encoding)
f.seek(bom_size)
except IOError, e:
self.printerr('Could not open %r: %s' % (fname, e))
return
username = self.auth_provider.username if self.auth_provider else None
password = self.auth_provider.password if self.auth_provider else None
subshell = Shell(self.hostname, self.port, color=self.color,
username=username, password=password,
encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
cqlver=self.cql_version, keyspace=self.current_keyspace,
tracing_enabled=self.tracing_enabled,
display_nanotime_format=self.display_nanotime_format,
display_timestamp_format=self.display_timestamp_format,
display_date_format=self.display_date_format,
display_float_precision=self.display_float_precision,
display_double_precision=self.display_double_precision,
display_timezone=self.display_timezone,
max_trace_wait=self.max_trace_wait, ssl=self.ssl,
request_timeout=self.session.default_timeout,
connect_timeout=self.conn.connect_timeout,
allow_server_port_discovery=self.allow_server_port_discovery)
subshell.cmdloop()
f.close()
def do_capture(self, parsed):
"""
CAPTURE [cqlsh only]
Begins capturing command output and appending it to a specified file.
Output will not be shown at the console while it is captured.
Usage:
CAPTURE '<file>';
CAPTURE OFF;
CAPTURE;
That is, the path to the file to be appended to must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
Only query result output is captured. Errors and output from cqlsh-only
commands will still be shown in the cqlsh session.
To stop capturing output and show it in the cqlsh session again, use
CAPTURE OFF.
To inspect the current capture configuration, use CAPTURE with no
arguments.
"""
fname = parsed.get_binding('fname')
if fname is None:
if self.shunted_query_out is not None:
print "Currently capturing query output to %r." % (self.query_out.name,)
else:
print "Currently not capturing query output."
return
if fname.upper() == 'OFF':
if self.shunted_query_out is None:
self.printerr('Not currently capturing output.')
return
self.query_out.close()
self.query_out = self.shunted_query_out
self.color = self.shunted_color
self.shunted_query_out = None
del self.shunted_color
return
if self.shunted_query_out is not None:
self.printerr('Already capturing output to %s. Use CAPTURE OFF'
' to disable.' % (self.query_out.name,))
return
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
f = open(fname, 'a')
except IOError, e:
self.printerr('Could not open %r for append: %s' % (fname, e))
return
self.shunted_query_out = self.query_out
self.shunted_color = self.color
self.query_out = f
self.color = False
print 'Now capturing query output to %r.' % (fname,)
def do_tracing(self, parsed):
"""
TRACING [cqlsh]
Enables or disables request tracing.
TRACING ON
Enables tracing for all further requests.
TRACING OFF
Disables tracing.
TRACING
TRACING with no arguments shows the current tracing status.
"""
self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr)
def do_expand(self, parsed):
"""
EXPAND [cqlsh]
Enables or disables expanded (vertical) output.
EXPAND ON
Enables expanded (vertical) output.
EXPAND OFF
Disables expanded (vertical) output.
EXPAND
EXPAND with no arguments shows the current value of expand setting.
"""
self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr)
def do_consistency(self, parsed):
"""
CONSISTENCY [cqlsh only]
Overrides default consistency level (default level is ONE).
CONSISTENCY <level>
Sets consistency level for future requests.
Valid consistency levels:
ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL.
SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates.
CONSISTENCY
CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level])
return
self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Consistency level set to %s.' % (level.upper(),)
def do_serial(self, parsed):
"""
SERIAL CONSISTENCY [cqlsh only]
Overrides serial consistency level (default level is SERIAL).
SERIAL CONSISTENCY <level>
Sets consistency level for future conditional updates.
Valid consistency levels:
SERIAL, LOCAL_SERIAL.
SERIAL CONSISTENCY
SERIAL CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level])
return
self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Serial consistency level set to %s.' % (level.upper(),)
def do_login(self, parsed):
"""
LOGIN [cqlsh only]
Changes login information without requiring restart.
LOGIN <username> (<password>)
Login using the specified username. If password is specified, it will be used
otherwise, you will be prompted to enter.
"""
username = parsed.get_binding('username')
password = parsed.get_binding('password')
if password is None:
password = getpass.getpass()
else:
password = password[1:-1]
auth_provider = PlainTextAuthProvider(username=username, password=password)
conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version,
protocol_version=self.conn.protocol_version,
auth_provider=auth_provider,
ssl_options=self.conn.ssl_options,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=self.conn.connect_timeout,
connect_timeout=self.conn.connect_timeout)
if self.current_keyspace:
session = conn.connect(self.current_keyspace)
else:
session = conn.connect()
# Copy session properties
session.default_timeout = self.session.default_timeout
session.row_factory = self.session.row_factory
session.default_consistency_level = self.session.default_consistency_level
session.max_trace_wait = self.session.max_trace_wait
# Update after we've connected in case we fail to authenticate
self.conn = conn
self.auth_provider = auth_provider
self.username = username
self.session = session
def do_exit(self, parsed=None):
"""
EXIT/QUIT [cqlsh only]
Exits cqlsh.
"""
self.stop = True
if self.owns_connection:
self.conn.shutdown()
do_quit = do_exit
def do_clear(self, parsed):
"""
CLEAR/CLS [cqlsh only]
Clears the console.
"""
import subprocess
subprocess.call(['clear', 'cls'][is_win], shell=True)
do_cls = do_clear
def do_debug(self, parsed):
import pdb
pdb.set_trace()
def get_help_topics(self):
topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__]
for hide_from_help in ('quit',):
topics.remove(hide_from_help)
return topics
def columnize(self, slist, *a, **kw):
return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw)
def do_help(self, parsed):
"""
HELP [cqlsh only]
Gives information about cqlsh commands. To see available topics,
enter "HELP" without any arguments. To see help on a topic,
use "HELP <topic>".
"""
topics = parsed.get_binding('topic', ())
if not topics:
shell_topics = [t.upper() for t in self.get_help_topics()]
self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80)
cql_topics = [t.upper() for t in cqldocs.get_help_topics()]
self.print_topics("CQL help topics:", cql_topics, 15, 80)
return
for t in topics:
if t.lower() in self.get_help_topics():
doc = getattr(self, 'do_' + t.lower()).__doc__
self.stdout.write(doc + "\n")
elif t.lower() in cqldocs.get_help_topics():
urlpart = cqldocs.get_help_topic(t)
if urlpart is not None:
url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart)
if len(webbrowser._tryorder) == 0:
self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url))
elif self.browser is not None:
webbrowser.get(self.browser).open_new_tab(url)
else:
webbrowser.open_new_tab(url)
else:
self.printerr("*** No help on %s" % (t,))
def do_unicode(self, parsed):
"""
Textual input/output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
def do_paging(self, parsed):
"""
PAGING [cqlsh]
Enables or disables query paging.
PAGING ON
Enables query paging for all further queries.
PAGING OFF
Disables paging.
PAGING
PAGING with no arguments shows the current query paging status.
"""
(self.use_paging, requested_page_size) = SwitchCommandWithValue(
"PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr)
if self.use_paging and requested_page_size is not None:
self.page_size = requested_page_size
if self.use_paging:
print("Page size: {}".format(self.page_size))
else:
self.page_size = self.default_page_size
def applycolor(self, text, color=None):
if not color or not self.color:
return text
return color + text + ANSI_RESET
def writeresult(self, text, color=None, newline=True, out=None):
if out is None:
out = self.query_out
# convert Exceptions, etc to text
if not isinstance(text, (unicode, str)):
text = unicode(text)
if isinstance(text, unicode):
text = text.encode(self.encoding)
to_write = self.applycolor(text, color) + ('\n' if newline else '')
out.write(to_write)
def flush_output(self):
self.query_out.flush()
def printerr(self, text, color=RED, newline=True, shownum=None):
self.statement_error = True
if shownum is None:
shownum = self.show_line_nums
if shownum:
text = '%s:%d:%s' % (self.stdin.name, self.lineno, text)
self.writeresult(text, color, newline=newline, out=sys.stderr)
class SwitchCommand(object):
command = None
description = None
def __init__(self, command, desc):
self.command = command
self.description = desc
def execute(self, state, parsed, printerr):
switch = parsed.get_binding('switch')
if switch is None:
if state:
print "%s is currently enabled. Use %s OFF to disable" \
% (self.description, self.command)
else:
print "%s is currently disabled. Use %s ON to enable." \
% (self.description, self.command)
return state
if switch.upper() == 'ON':
if state:
printerr('%s is already enabled. Use %s OFF to disable.'
% (self.description, self.command))
return state
print 'Now %s is enabled' % (self.description,)
return True
if switch.upper() == 'OFF':
if not state:
printerr('%s is not enabled.' % (self.description,))
return state
print 'Disabled %s.' % (self.description,)
return False
class SwitchCommandWithValue(SwitchCommand):
"""The same as SwitchCommand except it also accepts a value in place of ON.
This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE)
eg: PAGING 50 returns (True, 50)
PAGING OFF returns (False, None)
PAGING ON returns (True, None)
The value_type must match for the PASSED_VALUE, otherwise it will return None.
"""
def __init__(self, command, desc, value_type=int):
SwitchCommand.__init__(self, command, desc)
self.value_type = value_type
def execute(self, state, parsed, printerr):
binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr)
switch = parsed.get_binding('switch')
try:
value = self.value_type(switch)
binary_switch_value = True
except (ValueError, TypeError):
value = None
return (binary_switch_value, value)
def option_with_default(cparser_getter, section, option, default=None):
try:
return cparser_getter(section, option)
except ConfigParser.Error:
return default
def raw_option_with_default(configs, section, option, default=None):
"""
Same (almost) as option_with_default() but won't do any string interpolation.
Useful for config values that include '%' symbol, e.g. time format string.
"""
try:
return configs.get(section, option, raw=True)
except ConfigParser.Error:
return default
def should_use_color():
if not sys.stdout.isatty():
return False
if os.environ.get('TERM', '') in ('dumb', ''):
return False
try:
import subprocess
p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if int(stdout.strip()) < 8:
return False
except (OSError, ImportError, ValueError):
# oh well, we tried. at least we know there's a $TERM and it's
# not "dumb".
pass
return True
def read_options(cmdlineargs, environment):
configs = ConfigParser.SafeConfigParser()
configs.read(CONFIG_FILE)
rawconfigs = ConfigParser.RawConfigParser()
rawconfigs.read(CONFIG_FILE)
optvalues = optparse.Values()
optvalues.username = option_with_default(configs.get, 'authentication', 'username')
optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password')
optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
DEFAULT_COMPLETEKEY)
optvalues.color = option_with_default(configs.getboolean, 'ui', 'color')
optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format',
DEFAULT_TIMESTAMP_FORMAT)
optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format',
DEFAULT_NANOTIME_FORMAT)
optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format',
DEFAULT_DATE_FORMAT)
optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision',
DEFAULT_FLOAT_PRECISION)
optvalues.double_precision = option_with_default(configs.getint, 'ui', 'double_precision',
DEFAULT_DOUBLE_PRECISION)
optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit())
optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait',
DEFAULT_MAX_TRACE_WAIT)
optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None)
optvalues.debug = False
optvalues.file = None
optvalues.ssl = option_with_default(configs.getboolean, 'connection', 'ssl', DEFAULT_SSL)
optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8)
optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty())
optvalues.protocol_version = option_with_default(configs.getint, 'protocol', 'version', None)
optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', None)
optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
optvalues.execute = None
optvalues.allow_server_port_discovery = option_with_default(configs.getboolean, 'connection', 'allow_server_port_discovery', 'False')
(options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST)
port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT)
try:
options.connect_timeout = int(options.connect_timeout)
except ValueError:
parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,))
options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS
try:
options.request_timeout = int(options.request_timeout)
except ValueError:
parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,))
options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS
hostname = environment.get('CQLSH_HOST', hostname)
port = environment.get('CQLSH_PORT', port)
if len(arguments) > 0:
hostname = arguments[0]
if len(arguments) > 1:
port = arguments[1]
if options.file or options.execute:
options.tty = False
if options.execute and not options.execute.endswith(';'):
options.execute += ';'
if optvalues.color in (True, False):
options.color = optvalues.color
else:
if options.file is not None:
options.color = False
else:
options.color = should_use_color()
if options.cqlversion is not None:
options.cqlversion, cqlvertup = full_cql_version(options.cqlversion)
if cqlvertup[0] < 3:
parser.error('%r is not a supported CQL version.' % options.cqlversion)
options.cqlmodule = cql3handling
try:
port = int(port)
except ValueError:
parser.error('%r is not a valid port number.' % port)
return options, hostname, port
def setup_cqlruleset(cqlmodule):
global cqlruleset
cqlruleset = cqlmodule.CqlRuleSet
cqlruleset.append_rules(cqlshhandling.cqlsh_extra_syntax_rules)
for rulename, termname, func in cqlshhandling.cqlsh_syntax_completers:
cqlruleset.completer_for(rulename, termname)(func)
cqlruleset.commands_end_with_newline.update(cqlshhandling.my_commands_ending_with_newline)
def setup_cqldocs(cqlmodule):
global cqldocs
cqldocs = cqlmodule.cqldocs
def init_history():
if readline is not None:
try:
readline.read_history_file(HISTORY)
except IOError:
pass
delims = readline.get_completer_delims()
delims.replace("'", "")
delims += '.'
readline.set_completer_delims(delims)
def save_history():
if readline is not None:
try:
readline.write_history_file(HISTORY)
except IOError:
pass
def main(options, hostname, port):
setup_cqlruleset(options.cqlmodule)
setup_cqldocs(options.cqlmodule)
init_history()
csv.field_size_limit(options.field_size_limit)
if options.file is None:
stdin = None
else:
try:
encoding, bom_size = get_file_encoding_bomsize(options.file)
stdin = codecs.open(options.file, 'r', encoding)
stdin.seek(bom_size)
except IOError, e:
sys.exit("Can't open %r: %s" % (options.file, e))
if options.debug:
sys.stderr.write("Using CQL driver: %s\n" % (cassandra,))
sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,))
sys.stderr.write("Using '%s' encoding\n" % (options.encoding,))
sys.stderr.write("Using ssl: %s\n" % (options.ssl,))
# create timezone based on settings, environment or auto-detection
timezone = None
if options.timezone or 'TZ' in os.environ:
try:
import pytz
if options.timezone:
try:
timezone = pytz.timezone(options.timezone)
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone))
if 'TZ' in os.environ:
try:
timezone = pytz.timezone(os.environ['TZ'])
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ']))
except ImportError:
sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n")
# try auto-detect timezone if tzlocal is installed
if not timezone:
try:
from tzlocal import get_localzone
timezone = get_localzone()
except ImportError:
# we silently ignore and fallback to UTC unless a custom timestamp format (which likely
# does contain a TZ part) was specified
if options.time_format != DEFAULT_TIMESTAMP_FORMAT:
sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, but local timezone could not be detected.\n" +
"Either install Python 'tzlocal' module for auto-detection or specify client timezone in your cqlshrc.\n\n")
try:
shell = Shell(hostname,
port,
color=options.color,
username=options.username,
password=options.password,
stdin=stdin,
tty=options.tty,
completekey=options.completekey,
browser=options.browser,
protocol_version=options.protocol_version,
cqlver=options.cqlversion,
keyspace=options.keyspace,
display_timestamp_format=options.time_format,
display_nanotime_format=options.nanotime_format,
display_date_format=options.date_format,
display_float_precision=options.float_precision,
display_double_precision=options.double_precision,
display_timezone=timezone,
max_trace_wait=options.max_trace_wait,
ssl=options.ssl,
single_statement=options.execute,
request_timeout=options.request_timeout,
connect_timeout=options.connect_timeout,
encoding=options.encoding,
allow_server_port_discovery=options.allow_server_port_discovery)
except KeyboardInterrupt:
sys.exit('Connection aborted.')
except CQL_ERRORS, e:
sys.exit('Connection error: %s' % (e,))
except VersionNotSupported, e:
sys.exit('Unsupported CQL version: %s' % (e,))
if options.debug:
shell.debug = True
shell.cmdloop()
save_history()
batch_mode = options.file or options.execute
if batch_mode and shell.statement_error:
sys.exit(2)
# always call this regardless of module name: when a sub-process is spawned
# on Windows then the module name is not __main__, see CASSANDRA-9304
insert_driver_hooks()
if __name__ == '__main__':
main(*read_options(sys.argv[1:], os.environ))
# vim: set ft=python et ts=4 sw=4 :
| apache-2.0 |
oandrew/home-assistant | tests/components/light/test_mqtt_template.py | 4 | 15790 | """The tests for the MQTT Template light platform.
Configuration example with all features:
light:
platform: mqtt_template
name: mqtt_template_light_1
state_topic: 'home/rgb1'
command_topic: 'home/rgb1/set'
command_on_template: >
on,{{ brightness|d }},{{ red|d }}-{{ green|d }}-{{ blue|d }}
command_off_template: 'off'
state_template: '{{ value.split(",")[0] }}'
brightness_template: '{{ value.split(",")[1] }}'
red_template: '{{ value.split(",")[2].split("-")[0] }}'
green_template: '{{ value.split(",")[2].split("-")[1] }}'
blue_template: '{{ value.split(",")[2].split("-")[2] }}'
If your light doesn't support brightness feature, omit `brightness_template`.
If your light doesn't support rgb feature, omit `(red|green|blue)_template`.
"""
import unittest
from homeassistant.bootstrap import setup_component
from homeassistant.const import STATE_ON, STATE_OFF, ATTR_ASSUMED_STATE
import homeassistant.components.light as light
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message,
assert_setup_component)
class TestLightMQTTTemplate(unittest.TestCase):
"""Test the MQTT Template light."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_fails(self): \
# pylint: disable=invalid-name
"""Test that setup fails with missing required configuration items."""
self.hass.config.components = ['mqtt']
with assert_setup_component(0):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
}
})
self.assertIsNone(self.hass.states.get('light.test'))
def test_state_change_via_topic(self): \
# pylint: disable=invalid-name
"""Test state change via topic."""
self.hass.config.components = ['mqtt']
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'state_topic': 'test_light_rgb',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,'
'{{ brightness|d }},'
'{{ red|d }}-'
'{{ green|d }}-'
'{{ blue|d }}',
'command_off_template': 'off',
'state_template': '{{ value.split(",")[0] }}'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'test_light_rgb', 'on')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
def test_state_brightness_color_change_via_topic(self): \
# pylint: disable=invalid-name
"""Test state, brightness and color change via topic."""
self.hass.config.components = ['mqtt']
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'state_topic': 'test_light_rgb',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,'
'{{ brightness|d }},'
'{{ red|d }}-'
'{{ green|d }}-'
'{{ blue|d }}',
'command_off_template': 'off',
'state_template': '{{ value.split(",")[0] }}',
'brightness_template': '{{ value.split(",")[1] }}',
'red_template': '{{ value.split(",")[2].'
'split("-")[0] }}',
'green_template': '{{ value.split(",")[2].'
'split("-")[1] }}',
'blue_template': '{{ value.split(",")[2].'
'split("-")[2] }}'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
# turn on the light, full white
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,255,255-255-255')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual([255, 255, 255], state.attributes.get('rgb_color'))
self.assertEqual(255, state.attributes.get('brightness'))
# turn the light off
fire_mqtt_message(self.hass, 'test_light_rgb', 'off')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
# lower the brightness
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,100')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual(100, light_state.attributes['brightness'])
# change the color
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,,41-42-43')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.assertEqual([41, 42, 43], light_state.attributes.get('rgb_color'))
def test_optimistic(self): \
# pylint: disable=invalid-name
"""Test optimistic mode."""
self.hass.config.components = ['mqtt']
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,'
'{{ brightness|d }},'
'{{ red|d }}-'
'{{ green|d }}-'
'{{ blue|d }}',
'command_off_template': 'off',
'qos': 2
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertTrue(state.attributes.get(ATTR_ASSUMED_STATE))
# turn on the light
light.turn_on(self.hass, 'light.test')
self.hass.block_till_done()
self.assertEqual(('test_light_rgb/set', 'on,,--', 2, False),
self.mock_publish.mock_calls[-1][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
# turn the light off
light.turn_off(self.hass, 'light.test')
self.hass.block_till_done()
self.assertEqual(('test_light_rgb/set', 'off', 2, False),
self.mock_publish.mock_calls[-1][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
# turn on the light with brightness and color
light.turn_on(self.hass, 'light.test', brightness=50,
rgb_color=[75, 75, 75])
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-1][1][0])
self.assertEqual(2, self.mock_publish.mock_calls[-1][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-1][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-1][1][1]
self.assertEqual('on,50,75-75-75', payload)
# check the state
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual((75, 75, 75), state.attributes['rgb_color'])
self.assertEqual(50, state.attributes['brightness'])
def test_flash(self): \
# pylint: disable=invalid-name
"""Test flash."""
self.hass.config.components = ['mqtt']
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,{{ flash }}',
'command_off_template': 'off',
'qos': 0
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
# short flash
light.turn_on(self.hass, 'light.test', flash='short')
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-1][1][0])
self.assertEqual(0, self.mock_publish.mock_calls[-1][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-1][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-1][1][1]
self.assertEqual('on,short', payload)
# long flash
light.turn_on(self.hass, 'light.test', flash='long')
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-1][1][0])
self.assertEqual(0, self.mock_publish.mock_calls[-1][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-1][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-1][1][1]
self.assertEqual('on,long', payload)
def test_transition(self):
"""Test for transition time being sent when included."""
self.hass.config.components = ['mqtt']
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,{{ transition }}',
'command_off_template': 'off,{{ transition|d }}'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
# transition on
light.turn_on(self.hass, 'light.test', transition=10)
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-1][1][0])
self.assertEqual(0, self.mock_publish.mock_calls[-1][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-1][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-1][1][1]
self.assertEqual('on,10', payload)
# transition off
light.turn_off(self.hass, 'light.test', transition=4)
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-1][1][0])
self.assertEqual(0, self.mock_publish.mock_calls[-1][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-1][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-1][1][1]
self.assertEqual('off,4', payload)
def test_invalid_values(self): \
# pylint: disable=invalid-name
"""Test that invalid values are ignored."""
self.hass.config.components = ['mqtt']
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'state_topic': 'test_light_rgb',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,'
'{{ brightness|d }},'
'{{ red|d }}-'
'{{ green|d }}-'
'{{ blue|d }}',
'command_off_template': 'off',
'state_template': '{{ value.split(",")[0] }}',
'brightness_template': '{{ value.split(",")[1] }}',
'red_template': '{{ value.split(",")[2].'
'split("-")[0] }}',
'green_template': '{{ value.split(",")[2].'
'split("-")[1] }}',
'blue_template': '{{ value.split(",")[2].'
'split("-")[2] }}'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
# turn on the light, full white
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,255,255-255-255')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(255, state.attributes.get('brightness'))
self.assertEqual([255, 255, 255], state.attributes.get('rgb_color'))
# bad state value
fire_mqtt_message(self.hass, 'test_light_rgb', 'offf')
self.hass.block_till_done()
# state should not have changed
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
# bad brightness values
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,off,255-255-255')
self.hass.block_till_done()
# brightness should not have changed
state = self.hass.states.get('light.test')
self.assertEqual(255, state.attributes.get('brightness'))
# bad color values
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,255,a-b-c')
self.hass.block_till_done()
# color should not have changed
state = self.hass.states.get('light.test')
self.assertEqual([255, 255, 255], state.attributes.get('rgb_color'))
| mit |
kool79/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/prepared.py | 623 | 1032 | from ctypes import c_char
from django.contrib.gis.geos.libgeos import GEOM_PTR, PREPGEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# Prepared geometry constructor and destructors.
geos_prepare = GEOSFunc('GEOSPrepare')
geos_prepare.argtypes = [GEOM_PTR]
geos_prepare.restype = PREPGEOM_PTR
prepared_destroy = GEOSFunc('GEOSPreparedGeom_destroy')
prepared_destroy.argtpes = [PREPGEOM_PTR]
prepared_destroy.restype = None
# Prepared geometry binary predicate support.
def prepared_predicate(func):
func.argtypes= [PREPGEOM_PTR, GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
prepared_contains = prepared_predicate(GEOSFunc('GEOSPreparedContains'))
prepared_contains_properly = prepared_predicate(GEOSFunc('GEOSPreparedContainsProperly'))
prepared_covers = prepared_predicate(GEOSFunc('GEOSPreparedCovers'))
prepared_intersects = prepared_predicate(GEOSFunc('GEOSPreparedIntersects'))
| apache-2.0 |
TwolDE2/enigma2 | RecordTimer.py | 1 | 56128 | from boxbranding import getMachineBrand, getMachineName
import xml.etree.cElementTree
from datetime import datetime
from time import localtime, strftime, ctime, time
from bisect import insort
from sys import maxint
import os
from enigma import eEPGCache, getBestPlayableServiceReference, eStreamServer, eServiceReference, iRecordableService, quitMainloop, eActionMap, setPreferredTuner, eServiceCenter
from Components.config import config
from Components import Harddisk
from Components.UsageConfig import defaultMoviePath, calcFrontendPriorityIntval
from Components.TimerSanityCheck import TimerSanityCheck
import Components.RecordingConfig
Components.RecordingConfig.InitRecordingConfig()
from Screens.MessageBox import MessageBox
import Screens.Standby
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import NavigationInstance
from ServiceReference import ServiceReference
from enigma import pNavigation, eDVBFrontend
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
wasRecTimerWakeup = False
InfoBar = False
#//import later (no error message on system start)
#try:
# from Screens.InfoBar import InfoBar
#except Exception, e:
# print "[RecordTimer] import from 'Screens.InfoBar import InfoBar' failed:", e
# InfoBar = False
#//
#+++
debug = False
#+++
#reset wakeup state after ending timer
def resetTimerWakeup():
global wasRecTimerWakeup
if os.path.exists("/tmp/was_rectimer_wakeup"):
os.remove("/tmp/was_rectimer_wakeup")
if debug: print "[RECORDTIMER] reset wakeup state"
wasRecTimerWakeup = False
# parses an event and returns a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return begin, end, name, description, eit
class AFTEREVENT:
def __init__(self):
pass
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
DEFAULT = int(config.recording.default_afterevent.value)
class TIMERTYPE:
def __init__(self):
pass
JUSTPLAY = config.recording.default_timertype.value == "zap"
ALWAYS_ZAP = config.recording.default_timertype.value == "zap+record"
def findSafeRecordPath(dirname):
if not dirname:
return None
dirname = os.path.realpath(dirname)
mountpoint = Harddisk.findMountPoint(dirname)
if not os.path.ismount(mountpoint):
print '[RecordTimer] media is not mounted:', dirname
return None
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except Exception, ex:
print '[RecordTimer] Failed to create dir "%s":' % dirname, ex
return None
return dirname
# type 1 = digital television service
# type 4 = nvod reference service (NYI)
# type 17 = MPEG-2 HD digital television service
# type 22 = advanced codec SD digital television
# type 24 = advanced codec SD NVOD reference service (NYI)
# type 25 = advanced codec HD digital television
# type 27 = advanced codec HD NVOD reference service (NYI)
# type 2 = digital radio sound service
# type 10 = advanced codec digital radio sound service
service_types_tv = '1:7:1:0:0:0:0:0:0:0:(type == 1) || (type == 17) || (type == 22) || (type == 25) || (type == 134) || (type == 195)'
service_types_radio = '1:7:2:0:0:0:0:0:0:0:(type == 2) || (type == 10)'
def getBqRootStr(ref):
ref = ref.toString()
if ref.startswith('1:0:2:'): # we need that also?:----> or ref.startswith('1:0:10:'):
service_types = service_types_radio
if config.usage.multibouquet.value:
bqrootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'
else:
bqrootstr = '%s FROM BOUQUET "userbouquet.favourites.radio" ORDER BY bouquet'% service_types
else:
service_types = service_types_tv
if config.usage.multibouquet.value:
bqrootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
else:
bqrootstr = '%s FROM BOUQUET "userbouquet.favourites.tv" ORDER BY bouquet'% service_types
return bqrootstr
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = TIMERTYPE.JUSTPLAY, afterEvent = AFTEREVENT.DEFAULT, checkOldTimers = False, dirname = None, tags = None, descramble = 'notset', record_ecm = 'notset', rename_repeat = True, isAutoTimer = False, always_zap = TIMERTYPE.ALWAYS_ZAP, MountPath = None):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref and serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.always_zap = always_zap
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.MountPath = None
self.messageString = ""
self.messageStringShow = False
self.messageBoxAnswerPending = False
self.justTriedFreeingTuner = False
if descramble == 'notset' and record_ecm == 'notset':
if config.recording.ecm_data.value == 'descrambled+ecm':
self.descramble = True
self.record_ecm = True
elif config.recording.ecm_data.value == 'scrambled+ecm':
self.descramble = False
self.record_ecm = True
elif config.recording.ecm_data.value == 'normal':
self.descramble = True
self.record_ecm = False
else:
self.descramble = descramble
self.record_ecm = record_ecm
config.usage.frontend_priority_intval.setValue(calcFrontendPriorityIntval(config.usage.frontend_priority, config.usage.frontend_priority_multiselect, config.usage.frontend_priority_strictly))
config.usage.recording_frontend_priority_intval.setValue(calcFrontendPriorityIntval(config.usage.recording_frontend_priority, config.usage.recording_frontend_priority_multiselect, config.usage.recording_frontend_priority_strictly))
self.needChangePriorityFrontend = config.usage.recording_frontend_priority_intval.value != "-2" and config.usage.recording_frontend_priority_intval.value != config.usage.frontend_priority_intval.value
self.change_frontend = False
self.rename_repeat = rename_repeat
self.isAutoTimer = isAutoTimer
self.wasInStandby = False
#workaround for vmc crash - only a dummy entry!!!
self.justremind = False
'''
File "/usr/lib/enigma2/python/Plugins/Extensions/VMC/VMC_Classes.py", line 3704, in TimerChange
"Filename") and not timer.justplay and not timer.justremind and timer.state == TimerEntry.StateEnded:
AttributeError: 'RecordTimerEntry' object has no attribute 'justremind'
'''
###
self.log_entries = []
self.check_justplay()
self.resetState()
def __repr__(self):
if not self.disabled:
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s, isAutoTimer=%s)" % (self.name, ctime(self.begin), self.service_ref, self.justplay, self.isAutoTimer)
else:
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s, isAutoTimer=%s, Disabled)" % (self.name, ctime(self.begin), self.service_ref, self.justplay, self.isAutoTimer)
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def freespace(self):
self.MountPath = None
if not self.dirname:
dirname = findSafeRecordPath(defaultMoviePath())
else:
dirname = findSafeRecordPath(self.dirname)
if dirname is None:
dirname = findSafeRecordPath(defaultMoviePath())
self.dirnameHadToFallback = True
if not dirname:
return False
self.MountPath = dirname
mountwriteable = os.access(dirname, os.W_OK)
if not mountwriteable:
self.log(0, ("Mount '%s' is not writeable." % dirname))
return False
s = os.statvfs(dirname)
if (s.f_bavail * s.f_bsize) / 1000000 < 1024:
self.log(0, _("Not enough free space to record"))
return False
else:
if debug:
self.log(0, "Found enough free space to record")
return True
def calculateFilename(self):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
# print "begin_date: ", begin_date
# print "service_name: ", service_name
# print "name:", self.name
# print "description: ", self.description
#
filename = begin_date + " - " + service_name
if self.name:
if config.recording.filename_composition.value == "veryveryshort":
filename = self.name
elif config.recording.filename_composition.value == "veryshort":
filename = self.name + " - " + begin_date
elif config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(self.begin)) + " - " + self.name
elif config.recording.filename_composition.value == "shortwithtime":
filename = strftime("%Y%m%d %H%M", localtime(self.begin)) + " - " + self.name
elif config.recording.filename_composition.value == "long":
filename += " - " + self.name + " - " + self.description
else:
filename += " - " + self.name # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
self.Filename = Directories.getRecordingFilename(filename, self.MountPath)
if debug:
self.log(0, "Filename calculated as: '%s'" % self.Filename)
return self.Filename
def tryPrepare(self):
if self.justplay:
return True
else:
if not self.calculateFilename():
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.setRecordingPreferredTuner()
try:
#not all images support recording type indicators
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref,False,pNavigation.isRealRecording)
except:
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
self.setRecordingPreferredTuner(setdefault=True)
return False
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
self.description = evt.getShortDescription()
if self.description == "":
self.description = evt.getExtendedDescription()
event_id = evt.getEventId()
else:
event_id = -1
else:
event_id = self.eit
if event_id is None:
event_id = -1
prep_res=self.record_service.prepare(self.Filename + self.record_service.getFilenameExtension(), self.begin, self.end, event_id, self.name.replace("\n", ""), self.description.replace("\n", ""), ' '.join(self.tags), bool(self.descramble), bool(self.record_ecm))
if prep_res:
if prep_res == -255:
self.log(4, "failed to write meta information")
else:
self.log(2, "'prepare' failed: error %d" % prep_res)
# we must calc new start time before stopRecordService call because in Screens/Standby.py TryQuitMainloop tries to get
# the next start time in evEnd event handler...
self.do_backoff()
self.start_prepare = time() + self.backoff
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
self.setRecordingPreferredTuner(setdefault=True)
return False
return True
def do_backoff(self):
if self.backoff == 0:
self.backoff = 5
else:
self.backoff *= 2
if self.backoff > 100:
self.backoff = 100
self.log(10, "backoff: retry in %d seconds" % self.backoff)
def activate(self):
global wasRecTimerWakeup, InfoBar
if not InfoBar:
try:
from Screens.InfoBar import InfoBar
except Exception, e:
print "[RecordTimer] import from 'Screens.InfoBar import InfoBar' failed:", e
if os.path.exists("/tmp/was_rectimer_wakeup") and not wasRecTimerWakeup:
wasRecTimerWakeup = int(open("/tmp/was_rectimer_wakeup", "r").read()) and True or False
next_state = self.state + 1
if debug:
self.log(5, "activating state %d" % next_state)
# print "[TIMER] activate called",time(),next_state,self.first_try_prepare,' pending ',self.messageBoxAnswerPending,' justTried ',self.justTriedFreeingTuner,' show ',self.messageStringShow,self.messageString #TODO remove
if next_state == self.StatePrepared:
if self.messageBoxAnswerPending:
self.start_prepare = time() + 1 # call again in 1 second
return False
if self.justTriedFreeingTuner:
self.start_prepare = time() + 5 # tryPrepare in 5 seconds
self.justTriedFreeingTuner = False
return False
if not self.justplay and not self.freespace():
message = _("Write error while recording. Disk full?\n%s") % self.name
messageboxtyp = MessageBox.TYPE_ERROR
timeout = 5
id = "DiskFullMessage"
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessage(message, messageboxtyp, timeout)
else:
Notifications.AddPopup(message, messageboxtyp, timeout = timeout, id = id)
self.failed = True
self.next_activation = time()
self.end = time() + 5
self.backoff = 0
return True
if self.always_zap:
Screens.Standby.TVinStandby.skipHdmiCecNow('zapandrecordtimer')
if Screens.Standby.inStandby:
self.wasInStandby = True
#eActionMap.getInstance().bindAction('', -maxint - 1, self.keypress)
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
self.log(5, "wakeup and zap to recording service")
else:
cur_zap_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_zap_ref and not cur_zap_ref.getPath():# we do not zap away if it is no live service
self.setRecordingPreferredTuner()
self.failureCB(True)
self.log(5, "zap to recording service")
if self.tryPrepare():
if debug:
self.log(6, "prepare ok, waiting for begin")
if self.messageStringShow:
message = _("In order to record a timer, a tuner was freed successfully:\n\n") + self.messageString
messageboxtyp = MessageBox.TYPE_INFO
timeout = 20
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessage(message, messageboxtyp, timeout)
else:
Notifications.AddNotification(MessageBox, message, messageboxtyp, timeout = timeout)
# create file to "reserve" the filename
# because another recording at the same time on another service can try to record the same event
# i.e. cable / sat.. then the second recording needs an own extension... when we create the file
# here then calculateFilename is happy
if not self.justplay:
open(self.Filename + self.record_service.getFilenameExtension(), "w").close()
# give the Trashcan a chance to clean up
try:
Trashcan.instance.cleanIfIdle()
except Exception, e:
print "[TIMER] Failed to call Trashcan.instance.cleanIfIdle()"
print "[TIMER] Error:", e
# fine. it worked, resources are allocated.
self.next_activation = self.begin
self.backoff = 0
return True
self.log(7, "prepare failed")
if eStreamServer.getInstance().getConnectedClients():
eStreamServer.getInstance().stopStream()
return False
if self.first_try_prepare == 0:
# (0) try to make a tuner available by disabling PIP
self.first_try_prepare += 1
if not InfoBar: from Screens.InfoBar import InfoBar
from Screens.InfoBarGenerics import InfoBarPiP
from Components.ServiceEventTracker import InfoBarCount
InfoBarInstance = InfoBarCount == 1 and InfoBar.instance
if InfoBarInstance and InfoBarPiP.pipShown(InfoBarInstance) == True:
if config.recording.ask_to_abort_pip.value == "ask":
self.log(8, "asking user to disable PIP")
self.messageBoxAnswerPending = True
callback = self.failureCB_pip
message = _("A timer failed to record!\nDisable PIP and try again?\n")
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 20
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
elif config.recording.ask_to_abort_pip.value in ("abort_no_msg", "abort_msg"):
self.log(8, "disable PIP without asking")
self.setRecordingPreferredTuner()
self.failureCB_pip(True)
return False
else:
self.log(8, "currently no PIP active... so we dont need to stop it")
if self.first_try_prepare == 1:
# (1) try to make a tuner available by aborting pseudo recordings
self.first_try_prepare += 1
self.backoff = 0
if len(NavigationInstance.instance.getRecordings(False,pNavigation.isPseudoRecording)) > 0:
if config.recording.ask_to_abort_pseudo_rec.value == "ask":
self.log(8, "asking user to abort pseudo recordings")
self.messageBoxAnswerPending = True
callback = self.failureCB_pseudo_rec
message = _("A timer failed to record!\nAbort pseudo recordings (e.g. EPG refresh) and try again?\n")
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 20
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
elif config.recording.ask_to_abort_pseudo_rec.value in ("abort_no_msg", "abort_msg"):
self.log(8, "abort pseudo recordings without asking")
self.setRecordingPreferredTuner()
self.failureCB_pseudo_rec(True)
return False
else:
self.log(8, "currently no pseudo recordings active... so we dont need to stop it")
if self.first_try_prepare == 2:
# (2) try to make a tuner available by aborting streaming
self.first_try_prepare += 1
self.backoff = 0
if len(NavigationInstance.instance.getRecordings(False,pNavigation.isStreaming)) > 0:
if config.recording.ask_to_abort_streaming.value == "ask":
self.log(8, "asking user to abort streaming")
self.messageBoxAnswerPending = True
callback = self.failureCB_streaming
message = _("A timer failed to record!\nAbort streaming and try again?\n")
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 20
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
elif config.recording.ask_to_abort_streaming.value in ("abort_no_msg", "abort_msg"):
self.log(8, "abort streaming without asking")
self.setRecordingPreferredTuner()
self.failureCB_streaming(True)
return False
else:
self.log(8, "currently no streaming active... so we dont need to stop it")
if self.first_try_prepare == 3:
# (3) try to make a tuner available by switching live TV to the recording service
self.first_try_prepare += 1
self.backoff = 0
cur_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_ref and not cur_ref.getPath():
if Screens.Standby.inStandby:
self.setRecordingPreferredTuner()
self.failureCB(True)
elif not config.recording.asktozap.value:
self.log(8, "asking user to zap away")
self.messageBoxAnswerPending = True
callback = self.failureCB
message = _("A timer failed to record!\nDisable TV and try again?\n")
messageboxtyp = MessageBox.TYPE_YESNO
timeout = 20
default = True
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
else: # zap without asking
self.log(9, "zap without asking")
self.setRecordingPreferredTuner()
self.failureCB(True)
return False
elif cur_ref:
self.log(8, "currently running service is not a live service.. so stopping it makes no sense")
else:
self.log(8, "currently no service running... so we dont need to stop it")
if self.first_try_prepare == 4:
# (4) freeing a tuner failed
self.first_try_prepare += 1
self.log(8, "freeing a tuner failed")
if self.messageString:
Notifications.AddNotification(MessageBox, _("No tuner is available for recording a timer!\n\nThe following methods of freeing a tuner were tried without success:\n\n") + self.messageString, type=MessageBox.TYPE_INFO, timeout=20)
else:
Notifications.AddNotification(MessageBox, _("No tuner is available for recording a timer!\n"), type=MessageBox.TYPE_INFO, timeout=20)
return False
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.failed:
return True
if self.justplay:
Screens.Standby.TVinStandby.skipHdmiCecNow('zaptimer')
if Screens.Standby.inStandby:
self.wasInStandby = True
#eActionMap.getInstance().bindAction('', -maxint - 1, self.keypress)
self.log(11, "wakeup and zap")
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
self.log(11, _("zapping"))
found = False
notFound = False
NavigationInstance.instance.isMovieplayerActive()
from Screens.ChannelSelection import ChannelSelection
ChannelSelectionInstance = ChannelSelection.instance
if ChannelSelectionInstance:
bqrootstr = getBqRootStr(self.service_ref.ref)
rootstr = ''
serviceHandler = eServiceCenter.getInstance()
rootbouquet = eServiceReference(bqrootstr)
bouquet = eServiceReference(bqrootstr)
bouquetlist = serviceHandler.list(bouquet)
# we need a way out of the loop,
# if channel is not in bouquets
bouquetcount = 0
bouquets = []
if not bouquetlist is None:
while True:
bouquet = bouquetlist.getNext()
# can we make it easier?
# or found a way to make another way for that
if bouquets == []:
bouquets.append(bouquet)
else:
for x in bouquets:
if x != bouquet:
bouquets.append(bouquet)
else:
bouquetcount += 1
if bouquetcount >= 5:
notFound = True
break
if bouquet.flags & eServiceReference.isDirectory:
ChannelSelectionInstance.clearPath()
ChannelSelectionInstance.setRoot(bouquet)
servicelist = serviceHandler.list(bouquet)
if not servicelist is None:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
if self.service_ref.ref == serviceIterator:
break
serviceIterator = servicelist.getNext()
if self.service_ref.ref == serviceIterator:
break
if found:
ChannelSelectionInstance.enterPath(rootbouquet)
ChannelSelectionInstance.enterPath(bouquet)
ChannelSelectionInstance.saveRoot()
ChannelSelectionInstance.saveChannel(self.service_ref.ref)
if found:
ChannelSelectionInstance.addToHistory(self.service_ref.ref)
if notFound:
# Can we get a result for that ?
# see if you want to delete the running Timer
self.switchToAll()
else:
NavigationInstance.instance.playService(self.service_ref.ref)
return True
else:
self.log(11, _("start recording"))
record_res = self.record_service.start()
self.setRecordingPreferredTuner(setdefault=True)
if record_res:
self.log(13, "start record returned %d" % record_res)
self.do_backoff()
# retry
self.begin = time() + self.backoff
return False
return True
elif next_state == self.StateEnded or next_state == self.StateFailed:
old_end = self.end
if self.setAutoincreaseEnd():
self.log(12, "autoincrease recording %d minute(s)" % int((self.end - old_end)/60))
self.state -= 1
return True
if self.justplay:
self.log(12, _("end zapping"))
else:
self.log(12, _("stop recording"))
if not self.justplay:
if self.record_service:
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
NavigationInstance.instance.RecordTimer.saveTimer()
box_instandby = Screens.Standby.inStandby
tv_notactive = Screens.Standby.TVinStandby.getTVstate('notactive')
isRecordTime = abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - time()) <= 900 or NavigationInstance.instance.RecordTimer.getStillRecording()
if debug: print "[RECORDTIMER] box_instandby=%s" % box_instandby, "tv_notactive=%s" % tv_notactive, "wasRecTimerWakeup=%s" % wasRecTimerWakeup, "self.wasInStandby=%s" % self.wasInStandby, "self.afterEvent=%s" % self.afterEvent, "isRecordTime=%s" % isRecordTime
timeout = 180
default = True
messageboxtyp = MessageBox.TYPE_YESNO
if self.afterEvent == AFTEREVENT.STANDBY or (self.afterEvent == AFTEREVENT.AUTO and self.wasInStandby and (not wasRecTimerWakeup or (wasRecTimerWakeup and isRecordTime))):
if not box_instandby and not tv_notactive:# not already in standby
callback = self.sendStandbyNotification
message = _("A finished record timer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName())
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
elif not box_instandby:
self.sendStandbyNotification(True)
if isRecordTime or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - time()) <= 900:
if self.afterEvent == AFTEREVENT.DEEPSTANDBY or (wasRecTimerWakeup and self.afterEvent == AFTEREVENT.AUTO and self.wasInStandby) or (self.afterEvent == AFTEREVENT.AUTO and wasRecTimerWakeup):
print '[Timer] Recording or Recording due is next 15 mins, not return to deepstandby'
self.wasInStandby = False
return True
elif abs(NavigationInstance.instance.PowerTimer.getNextPowerManagerTime() - time()) <= 900 or NavigationInstance.instance.PowerTimer.isProcessing(exceptTimer = 0) or not NavigationInstance.instance.PowerTimer.isAutoDeepstandbyEnabled():
if self.afterEvent == AFTEREVENT.DEEPSTANDBY or (wasRecTimerWakeup and self.afterEvent == AFTEREVENT.AUTO and self.wasInStandby) or (self.afterEvent == AFTEREVENT.AUTO and wasRecTimerWakeup):
print '[Timer] PowerTimer due is next 15 mins or is actual currently active, not return to deepstandby'
self.wasInStandby = False
resetTimerWakeup()
return True
if self.afterEvent == AFTEREVENT.DEEPSTANDBY or (wasRecTimerWakeup and self.afterEvent == AFTEREVENT.AUTO and self.wasInStandby):
if not Screens.Standby.inTryQuitMainloop: # no shutdown messagebox is open
if not box_instandby and not tv_notactive: # not already in standby
callback = self.sendTryQuitMainloopNotification
message = _("A finished record timer wants to shut down\nyour %s %s. Shutdown now?") % (getMachineBrand(), getMachineName())
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarMessageWithCallback(callback, message, messageboxtyp, timeout, default)
else:
Notifications.AddNotificationWithCallback(callback, MessageBox, message, messageboxtyp, timeout = timeout, default = default)
else:
print "[RecordTimer] quitMainloop #1"
quitMainloop(1)
elif self.afterEvent == AFTEREVENT.AUTO and wasRecTimerWakeup:
if not Screens.Standby.inTryQuitMainloop: # no shutdown messagebox is open
if Screens.Standby.inStandby: # in standby
print "[RecordTimer] quitMainloop #2"
quitMainloop(1)
self.wasInStandby = False
resetTimerWakeup()
return True
def keypress(self, key=None, flag=1):
if flag and self.wasInStandby:
self.wasInStandby = False
eActionMap.getInstance().unbindAction('', self.keypress)
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin -30
dummyentry = RecordTimerEntry(self.service_ref, self.begin, new_end, self.name, self.description, self.eit, disabled=True, justplay = self.justplay, afterEvent = self.afterEvent, dirname = self.dirname, tags = self.tags)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.RecordTimer.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # allow 30 seconds for prepare
if new_end <= time():
return False
self.end = new_end
return True
def setRecordingPreferredTuner(self, setdefault=False):
if self.needChangePriorityFrontend:
elem = None
if not self.change_frontend and not setdefault:
elem = config.usage.recording_frontend_priority_intval.value
self.change_frontend = True
elif self.change_frontend and setdefault:
elem = config.usage.frontend_priority_intval.value
self.change_frontend = False
if elem is not None:
setPreferredTuner(int(elem))
def sendStandbyNotification(self, answer):
if answer:
session = Screens.Standby.Standby
option = None
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarSession(session, option)
else:
Notifications.AddNotification(session)
def sendTryQuitMainloopNotification(self, answer):
if answer:
session = Screens.Standby.TryQuitMainloop
option = 1
if InfoBar and InfoBar.instance:
InfoBar.instance.openInfoBarSession(session, option)
else:
Notifications.AddNotification(session, option)
def getNextActivation(self, getNextStbPowerOn = False):
self.isStillRecording = False
next_state = self.state + 1
if getNextStbPowerOn:
if next_state == 3:
self.isStillRecording = True
next_day = 0
count_day = 0
wd_timer = datetime.fromtimestamp(self.begin).isoweekday()*-1
wd_repeated = bin(128+int(self.repeated))
for s in range(wd_timer-1,-8,-1):
count_day +=1
if int(wd_repeated[s]):
next_day = s
break
if next_day == 0:
for s in range(-1,wd_timer-1,-1):
count_day +=1
if int(wd_repeated[s]):
next_day = s
break
#return self.begin + 86400 * count_day
return self.start_prepare + 86400 * count_day
elif next_state == 2:
return self.begin
elif next_state == 1:
return self.start_prepare
else:
return -1
if self.state == self.StateEnded or self.state == self.StateFailed:
if self.end > time():
self.isStillRecording = True
return self.end
if next_state == self.StateEnded or next_state == self.StateFailed:
if self.end > time():
self.isStillRecording = True
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end}[next_state]
def failureCB_pip(self, answer):
if answer:
self.log(13, "ok, disable PIP")
global InfoBar
if not InfoBar: from Screens.InfoBar import InfoBar
from Screens.InfoBarGenerics import InfoBarPiP
from Components.ServiceEventTracker import InfoBarCount
InfoBarInstance = InfoBarCount == 1 and InfoBar.instance
if InfoBarInstance:
InfoBarPiP.showPiP(InfoBarInstance)
self.messageString += _("Disabled PIP.\n")
else:
self.log(14, "tried to disable PIP, suddenly found no InfoBar.instance")
self.messageString += _("Tried to disable PIP, suddenly found no InfoBar.instance.\n")
if config.recording.ask_to_abort_pip.value in ("ask", "abort_msg"):
self.messageStringShow = True
self.justTriedFreeingTuner = True
else:
self.log(14, "user didn't want to disable PIP, try other methods of freeing a tuner")
self.messageBoxAnswerPending = False
def failureCB_pseudo_rec(self, answer):
if answer:
self.log(13, "ok, abort pseudo recordings")
for rec in NavigationInstance.instance.getRecordings(False,pNavigation.isPseudoRecording):
NavigationInstance.instance.stopRecordService(rec)
self.messageString += _("Aborted a pseudo recording.\n")
if config.recording.ask_to_abort_pseudo_rec.value in ("ask", "abort_msg"):
self.messageStringShow = True
self.justTriedFreeingTuner = True
else:
self.log(14, "user didn't want to abort pseudo recordings, try other methods of freeing a tuner")
self.messageBoxAnswerPending = False
def failureCB_streaming(self, answer):
if answer:
self.log(13, "ok, abort streaming")
for rec in NavigationInstance.instance.getRecordings(False,pNavigation.isStreaming):
NavigationInstance.instance.stopRecordService(rec)
self.messageString += _("Aborted a streaming service.\n")
if config.recording.ask_to_abort_streaming.value in ("ask", "abort_msg"):
self.messageStringShow = True
self.justTriedFreeingTuner = True
else:
self.log(14, "user didn't want to abort streaming, try other methods of freeing a tuner")
self.messageBoxAnswerPending = False
def failureCB(self, answer):
if answer:
self.log(13, "ok, zapped away")
self.messageString += _("The TV was switched to the recording service!\n")
self.messageStringShow = True
found = False
notFound = False
#NavigationInstance.instance.stopUserServices()
from Screens.ChannelSelection import ChannelSelection
ChannelSelectionInstance = ChannelSelection.instance
if ChannelSelectionInstance:
bqrootstr = getBqRootStr(self.service_ref.ref)
rootstr = ''
serviceHandler = eServiceCenter.getInstance()
rootbouquet = eServiceReference(bqrootstr)
bouquet = eServiceReference(bqrootstr)
bouquetlist = serviceHandler.list(bouquet)
# we need a way out of the loop,
# if channel is not in bouquets
bouquetcount = 0
bouquets = []
if not bouquetlist is None:
while True:
bouquet = bouquetlist.getNext()
# can we make it easier?
# or found a way to make another way for that
if bouquets == []:
bouquets.append(bouquet)
else:
for x in bouquets:
if x != bouquet:
bouquets.append(bouquet)
else:
bouquetcount += 1
if bouquetcount >= 5:
notFound = True
break
if bouquet.flags & eServiceReference.isDirectory:
ChannelSelectionInstance.clearPath()
ChannelSelectionInstance.setRoot(bouquet)
servicelist = serviceHandler.list(bouquet)
if not servicelist is None:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
if self.service_ref.ref == serviceIterator:
found = True
break
serviceIterator = servicelist.getNext()
if self.service_ref.ref == serviceIterator:
found = True
break
if found:
ChannelSelectionInstance.enterPath(rootbouquet)
ChannelSelectionInstance.enterPath(bouquet)
ChannelSelectionInstance.saveRoot()
ChannelSelectionInstance.saveChannel(self.service_ref.ref)
if found:
ChannelSelectionInstance.addToHistory(self.service_ref.ref)
if notFound:
# Can we get a result for that ?
# see if you want to delete the running Timer
self.switchToAll()
else:
NavigationInstance.instance.playService(self.service_ref.ref)
self.justTriedFreeingTuner = True
else:
self.log(14, "user didn't want to zap away, record will probably fail")
self.messageBoxAnswerPending = False
def switchToAll(self):
refStr = self.service_ref.ref.toString()
global InfoBar
if not InfoBar: from Screens.InfoBar import InfoBar
if refStr.startswith('1:0:2:'):
if InfoBar.instance.servicelist.mode != 1:
InfoBar.instance.servicelist.setModeRadio()
InfoBar.instance.servicelist.radioTV = 1
InfoBar.instance.servicelist.clearPath()
rootbouquet = eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet')
bouquet = eServiceReference('%s ORDER BY name'% service_types_radio)
else:
if InfoBar.instance.servicelist.mode != 0:
InfoBar.instance.servicelist.setModeTV()
InfoBar.instance.servicelist.radioTV = 0
InfoBar.instance.servicelist.clearPath()
rootbouquet = eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet')
bouquet = eServiceReference('%s ORDER BY name'% service_types_tv)
if InfoBar.instance.servicelist.bouquet_root != rootbouquet:
InfoBar.instance.servicelist.bouquet_root = rootbouquet
InfoBar.instance.servicelist.enterPath(bouquet)
InfoBar.instance.servicelist.setCurrentSelection(self.service_ref.ref)
InfoBar.instance.servicelist.zap(enable_pipzap = True)
InfoBar.instance.servicelist.correctChannelNumber()
InfoBar.instance.servicelist.startRoot = bouquet
InfoBar.instance.servicelist.addToHistory(self.service_ref.ref)
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) > 60 and int(old_prepare) != int(self.start_prepare):
self.log(15, _("record time changed, start prepare is now: %s") % ctime(self.start_prepare))
def check_justplay(self):
if self.justplay:
self.always_zap = False
def gotRecordEvent(self, record, event):
# TODO: this is not working (never true), please fix. (comparing two swig wrapped ePtrs)
if self.__record_service.__deref__() != record.__deref__():
return
# self.log(16, "record event %d" % event)
if event == iRecordableService.evRecordWriteError:
print "WRITE ERROR on recording, disk full?"
# show notification. the 'id' will make sure that it will be
# displayed only once, even if more timers are failing at the
# same time. (which is very likely in case of disk fullness)
Notifications.AddPopup(text = _("Write error while recording. Disk full?\n"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "DiskFullMessage")
# ok, the recording has been stopped. we need to properly note
# that in our state, with also keeping the possibility to re-try.
# TODO: this has to be done.
elif event == iRecordableService.evStart:
text = _("A recording has been started:\n%s") % self.name
notify = config.usage.show_message_when_recording_starts.value and not Screens.Standby.inStandby
if self.dirnameHadToFallback:
text = '\n'.join((text, _("Please note that the previously selected media could not be accessed and therefore the default directory is being used instead.")))
notify = True
if notify:
Notifications.AddPopup(text = text, type = MessageBox.TYPE_INFO, timeout = 3)
elif event == iRecordableService.evRecordAborted:
NavigationInstance.instance.RecordTimer.removeEntry(self)
elif event == iRecordableService.evGstRecordEnded:
if self.repeated:
self.processRepeated(findRunningEvent = False)
NavigationInstance.instance.RecordTimer.doActivate(self)
# we have record_service as property to automatically subscribe to record service events
def setRecordService(self, service):
if self.__record_service is not None:
# print "[remove callback]"
NavigationInstance.instance.record_event.remove(self.gotRecordEvent)
self.__record_service = service
if self.__record_service is not None:
# print "[add callback]"
NavigationInstance.instance.record_event.append(self.gotRecordEvent)
record_service = property(lambda self: self.__record_service, setRecordService)
def createTimer(xml):
begin = int(xml.get("begin"))
end = int(xml.get("end"))
serviceref = ServiceReference(xml.get("serviceref").encode("utf-8"))
description = xml.get("description").encode("utf-8")
repeated = xml.get("repeated").encode("utf-8")
disabled = long(xml.get("disabled") or "0")
justplay = long(xml.get("justplay") or "0")
always_zap = long(xml.get("always_zap") or "0")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"auto": AFTEREVENT.AUTO
}[afterevent]
eit = xml.get("eit")
if eit and eit != "None":
eit = long(eit)
else:
eit = None
location = xml.get("location")
if location and location != "None":
location = location.encode("utf-8")
else:
location = None
tags = xml.get("tags")
if tags and tags != "None":
tags = tags.encode("utf-8").split(' ')
else:
tags = None
descramble = int(xml.get("descramble") or "1")
record_ecm = int(xml.get("record_ecm") or "0")
isAutoTimer = int(xml.get("isAutoTimer") or "0")
name = xml.get("name").encode("utf-8")
#filename = xml.get("filename").encode("utf-8")
entry = RecordTimerEntry(serviceref, begin, end, name, description, eit, disabled, justplay, afterevent, dirname = location, tags = tags, descramble = descramble, record_ecm = record_ecm, isAutoTimer = isAutoTimer, always_zap = always_zap)
entry.repeated = int(repeated)
for l in xml.findall("log"):
time = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((time, code, msg))
return entry
class RecordTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run through all the stages.
if w.shouldSkip():
w.state = RecordTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
try:
self.timer_list.remove(w)
except:
print '[RecordTimer]: Remove list failed'
# did this timer reach the last state?
if w.state < RecordTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = RecordTimerEntry.StateWaiting
w.first_try_prepare = 0 # changed from a bool to a counter, not renamed for compatibility with openWebif
w.messageBoxAnswerPending = False
w.justTriedFreeingTuner = False
w.messageString = "" # incremental MessageBox string
w.messageStringShow = False
self.addTimerEntry(w)
else:
# check for disabled timers, if time has passed set to completed.
self.cleanupDisabled()
# remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def isRecTimerWakeup(self):
global wasRecTimerWakeup
if os.path.exists("/tmp/was_rectimer_wakeup"):
wasRecTimerWakeup = int(open("/tmp/was_rectimer_wakeup", "r").read()) and True or False
else:
wasRecTimerWakeup = False
return wasRecTimerWakeup
def isRecording(self):
isRunning = False
for timer in self.timer_list:
if timer.isRunning() and not timer.justplay:
isRunning = True
break
return isRunning
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
file = open(self.Filename, 'r')
doc = xml.etree.cElementTree.parse(file)
file.close()
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
# display a message when at least one timer overlaps another one
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at the moment it is enough when the message is displayed once
def saveTimer(self):
list = ['<?xml version="1.0" ?>\n', '<timers>\n']
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' serviceref="' + stringToXML(str(timer.service_ref)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' name="' + str(stringToXML(timer.name)) + '"')
list.append(' description="' + str(stringToXML(timer.description)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.AUTO: "auto"
}[timer.afterEvent])) + '"')
if timer.eit is not None:
list.append(' eit="' + str(timer.eit) + '"')
if timer.dirname is not None:
list.append(' location="' + str(stringToXML(timer.dirname)) + '"')
if timer.tags is not None:
list.append(' tags="' + str(stringToXML(' '.join(timer.tags))) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' justplay="' + str(int(timer.justplay)) + '"')
list.append(' always_zap="' + str(int(timer.always_zap)) + '"')
list.append(' descramble="' + str(int(timer.descramble)) + '"')
list.append(' record_ecm="' + str(int(timer.record_ecm)) + '"')
list.append(' isAutoTimer="' + str(int(timer.isAutoTimer)) + '"')
list.append('>\n')
for time, code, msg in timer.log_entries:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(time) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
for x in list:
file.write(x)
file.flush()
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def getNextZapTime(self):
now = time()
for timer in self.timer_list:
if not timer.justplay or timer.begin < now:
continue
return timer.begin
return -1
def getStillRecording(self):
isStillRecording = False
now = time()
for timer in self.timer_list:
if timer.isStillRecording:
isStillRecording = True
break
elif abs(timer.begin - now) <= 10 and not abs(timer.end - now) <= 10:
isStillRecording = True
break
return isStillRecording
def getNextRecordingTimeOld(self, getNextStbPowerOn = False):
now = time()
if getNextStbPowerOn:
save_act = -1, 0
for timer in self.timer_list:
next_act = timer.getNextActivation(getNextStbPowerOn)
if timer.justplay or next_act + 3 < now:
continue
if debug: print "[recordtimer] next stb power up", strftime("%a, %Y/%m/%d %H:%M", localtime(next_act))
if save_act[0] == -1:
save_act = next_act, int(not timer.always_zap)
else:
if next_act < save_act[0]:
save_act = next_act, int(not timer.always_zap)
return save_act
else:
for timer in self.timer_list:
next_act = timer.getNextActivation()
if timer.justplay or next_act + 3 < now or timer.end == next_act:
continue
return next_act
return -1
def getNextRecordingTime(self, getNextStbPowerOn = False):
#getNextStbPowerOn = True returns tuple -> (timer.begin, set standby)
nextrectime = self.getNextRecordingTimeOld(getNextStbPowerOn)
faketime = time()+300
if getNextStbPowerOn:
if config.timeshift.isRecording.value:
if 0 < nextrectime[0] < faketime:
return nextrectime
else:
return faketime, 0
else:
return nextrectime
else:
if config.timeshift.isRecording.value:
if 0 < nextrectime < faketime:
return nextrectime
else:
return faketime
else:
return nextrectime
def isNextRecordAfterEventActionAuto(self):
for timer in self.timer_list:
# all types needed True for ident in Navigation.py
return True
if timer.justplay:
continue
if timer.afterEvent == AFTEREVENT.AUTO or timer.afterEvent == AFTEREVENT.DEEPSTANDBY:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): # is called by loadTimer with argument dosave=False
entry.check_justplay()
timersanitycheck = TimerSanityCheck(self.timer_list,entry)
if not timersanitycheck.check():
if not ignoreTSC:
print "timer conflict detected!"
return timersanitycheck.getSimulTimerList()
else:
print "ignore timer conflict"
elif timersanitycheck.doubleCheck():
print "ignore double timer"
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def isInTimer(self, eventid, begin, duration, service, getTimer = False):
returnValue = None
type = 0
time_match = 0
isAutoTimer = False
bt = None
check_offset_time = not config.recording.margin_before.value and not config.recording.margin_after.value
end = begin + duration
refstr = ':'.join(service.split(':')[:11])
for x in self.timer_list:
if x.isAutoTimer == 1:
isAutoTimer = True
else:
isAutoTimer = False
check = ':'.join(x.service_ref.ref.toString().split(':')[:11]) == refstr
if check:
timer_end = x.end
timer_begin = x.begin
type_offset = 0
if not x.repeated and check_offset_time:
if 0 < end - timer_end <= 59:
timer_end = end
elif 0 < timer_begin - begin <= 59:
timer_begin = begin
if x.justplay:
type_offset = 5
if (timer_end - x.begin) <= 1:
timer_end += 60
if x.always_zap:
type_offset = 10
if x.repeated != 0:
if bt is None:
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(x.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = x.begin < begin or begin <= x.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = x.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - x.begin) / 60)
if xend < xbegin:
xend += 1440
if x.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
else:
if begin < timer_begin <= end:
if timer_end < end:
# recording within event
time_match = timer_end - timer_begin
type = type_offset + 3
else:
# recording last part of event
time_match = end - timer_begin
type = type_offset + 1
elif timer_begin <= begin <= timer_end:
if timer_end < end:
# recording first part of event
time_match = timer_end - begin
type = type_offset + 4
if x.justplay:
type = type_offset + 2
else: # recording whole event
time_match = end - begin
type = type_offset + 2
if time_match:
if getTimer:
returnValue = (time_match, type, isAutoTimer, x)
else:
returnValue = (time_match, type, isAutoTimer)
if type in (2,7,12): # when full recording do not look further
break
return returnValue
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
# print "state: ", entry.state
# print "in processed: ", entry in self.processed_timers
# print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
| gpl-2.0 |
martydill/url_shortener | code/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 487 | 4374 |
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
## Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
| mit |
vshtanko/scikit-learn | sklearn/utils/validation.py | 66 | 23629 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
| bsd-3-clause |
testmana2/test | Plugins/VcsPlugins/vcsMercurial/QueuesExtension/queues.py | 2 | 35768 | # -*- coding: utf-8 -*-
# Copyright (c) 2011 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the queues extension interface.
"""
from __future__ import unicode_literals
try:
str = unicode
except NameError:
pass
import os
from PyQt5.QtCore import QProcess
from PyQt5.QtWidgets import QDialog, QApplication, QInputDialog
from E5Gui import E5MessageBox
from ..HgExtension import HgExtension
from ..HgDialog import HgDialog
class Queues(HgExtension):
"""
Class implementing the queues extension interface.
"""
APPLIED_LIST = 0
UNAPPLIED_LIST = 1
SERIES_LIST = 2
POP = 0
PUSH = 1
GOTO = 2
QUEUE_DELETE = 0
QUEUE_PURGE = 1
QUEUE_ACTIVATE = 2
def __init__(self, vcs):
"""
Constructor
@param vcs reference to the Mercurial vcs object
"""
super(Queues, self).__init__(vcs)
self.qdiffDialog = None
self.qheaderDialog = None
self.queuesListDialog = None
self.queuesListGuardsDialog = None
self.queuesListAllGuardsDialog = None
self.queuesDefineGuardsDialog = None
self.queuesListQueuesDialog = None
self.queueStatusDialog = None
def shutdown(self):
"""
Public method used to shutdown the queues interface.
"""
if self.qdiffDialog is not None:
self.qdiffDialog.close()
if self.qheaderDialog is not None:
self.qheaderDialog.close()
if self.queuesListDialog is not None:
self.queuesListDialog.close()
if self.queuesListGuardsDialog is not None:
self.queuesListGuardsDialog.close()
if self.queuesListAllGuardsDialog is not None:
self.queuesListAllGuardsDialog.close()
if self.queuesDefineGuardsDialog is not None:
self.queuesDefineGuardsDialog.close()
if self.queuesListQueuesDialog is not None:
self.queuesListQueuesDialog.close()
if self.queueStatusDialog is not None:
self.queueStatusDialog.close()
def __getPatchesList(self, repodir, listType, withSummary=False):
"""
Private method to get a list of patches of a given type.
@param repodir directory name of the repository (string)
@param listType type of patches list to get
(Queues.APPLIED_LIST, Queues.UNAPPLIED_LIST, Queues.SERIES_LIST)
@param withSummary flag indicating to get a summary as well (boolean)
@return list of patches (list of string)
@exception ValueError raised to indicate an invalid patch list type
"""
patchesList = []
if listType == Queues.APPLIED_LIST:
args = self.vcs.initCommand("qapplied")
elif listType == Queues.UNAPPLIED_LIST:
args = self.vcs.initCommand("qunapplied")
elif listType == Queues.SERIES_LIST:
args = self.vcs.initCommand("qseries")
else:
raise ValueError("illegal value for listType")
if withSummary:
args.append("--summary")
client = self.vcs.getClient()
output = ""
if client:
output = client.runcommand(args)[0]
else:
process = QProcess()
process.setWorkingDirectory(repodir)
process.start('hg', args)
procStarted = process.waitForStarted(5000)
if procStarted:
finished = process.waitForFinished(30000)
if finished and process.exitCode() == 0:
output = str(process.readAllStandardOutput(),
self.vcs.getEncoding(), 'replace')
for line in output.splitlines():
if withSummary:
li = line.strip().split(": ")
if len(li) == 1:
patch, summary = li[0][:-1], ""
else:
patch, summary = li[0], li[1]
patchesList.append("{0}@@{1}".format(patch, summary))
else:
patchesList.append(line.strip())
return patchesList
def __getCurrentPatch(self, repodir):
"""
Private method to get the name of the current patch.
@param repodir directory name of the repository (string)
@return name of the current patch (string)
"""
currentPatch = ""
args = self.vcs.initCommand("qtop")
client = self.vcs.getClient()
if client:
currentPatch = client.runcommand(args)[0].strip()
else:
process = QProcess()
process.setWorkingDirectory(repodir)
process.start('hg', args)
procStarted = process.waitForStarted(5000)
if procStarted:
finished = process.waitForFinished(30000)
if finished and process.exitCode() == 0:
currentPatch = str(process.readAllStandardOutput(),
self.vcs.getEncoding(),
'replace').strip()
return currentPatch
def __getCommitMessage(self, repodir):
"""
Private method to get the commit message of the current patch.
@param repodir directory name of the repository (string)
@return name of the current patch (string)
"""
message = ""
args = self.vcs.initCommand("qheader")
client = self.vcs.getClient()
if client:
message = client.runcommand(args)[0]
else:
process = QProcess()
process.setWorkingDirectory(repodir)
process.start('hg', args)
procStarted = process.waitForStarted(5000)
if procStarted:
finished = process.waitForFinished(30000)
if finished and process.exitCode() == 0:
message = str(process.readAllStandardOutput(),
self.vcs.getEncoding(), 'replace')
return message
def getGuardsList(self, repodir, all=True):
"""
Public method to get a list of all guards defined.
@param repodir directory name of the repository (string)
@param all flag indicating to get all guards (boolean)
@return sorted list of guards (list of strings)
"""
guardsList = []
args = self.vcs.initCommand("qselect")
if all:
args.append("--series")
client = self.vcs.getClient()
output = ""
if client:
output = client.runcommand(args)[0]
else:
process = QProcess()
process.setWorkingDirectory(repodir)
process.start('hg', args)
procStarted = process.waitForStarted(5000)
if procStarted:
finished = process.waitForFinished(30000)
if finished and process.exitCode() == 0:
output = str(process.readAllStandardOutput(),
self.vcs.getEncoding(), 'replace')
for guard in output.splitlines():
guard = guard.strip()
if all:
guard = guard[1:]
if guard not in guardsList:
guardsList.append(guard)
return sorted(guardsList)
def hgQueueNewPatch(self, name):
"""
Public method to create a new named patch.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
from .HgQueuesNewPatchDialog import HgQueuesNewPatchDialog
dlg = HgQueuesNewPatchDialog(HgQueuesNewPatchDialog.NEW_MODE)
if dlg.exec_() == QDialog.Accepted:
name, message, (userData, currentUser, userName), \
(dateData, currentDate, dateStr) = dlg.getData()
args = self.vcs.initCommand("qnew")
if message != "":
args.append("--message")
args.append(message)
if userData:
if currentUser:
args.append("--currentuser")
else:
args.append("--user")
args.append(userName)
if dateData:
if currentDate:
args.append("--currentdate")
else:
args.append("--date")
args.append(dateStr)
args.append(name)
dia = HgDialog(self.tr('New Patch'), self.vcs)
res = dia.startProcess(args, repodir)
if res:
dia.exec_()
self.vcs.checkVCSStatus()
def hgQueueRefreshPatch(self, name, editMessage=False):
"""
Public method to refresh the current patch.
@param name file/directory name (string)
@param editMessage flag indicating to edit the current
commit message (boolean)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
args = self.vcs.initCommand("qrefresh")
if editMessage:
currentMessage = self.__getCommitMessage(repodir)
from .HgQueuesNewPatchDialog import HgQueuesNewPatchDialog
dlg = HgQueuesNewPatchDialog(HgQueuesNewPatchDialog.REFRESH_MODE,
currentMessage)
if dlg.exec_() == QDialog.Accepted:
name, message, (userData, currentUser, userName), \
(dateData, currentDate, dateStr) = dlg.getData()
if message != "" and message != currentMessage:
args.append("--message")
args.append(message)
if userData:
if currentUser:
args.append("--currentuser")
else:
args.append("--user")
args.append(userName)
if dateData:
if currentDate:
args.append("--currentdate")
else:
args.append("--date")
args.append(dateStr)
else:
return
dia = HgDialog(self.tr('Update Current Patch'), self.vcs)
res = dia.startProcess(args, repodir)
if res:
dia.exec_()
self.vcs.checkVCSStatus()
def hgQueueShowPatch(self, name):
"""
Public method to show the contents of the current patch.
@param name file/directory name (string)
"""
from ..HgDiffDialog import HgDiffDialog
self.qdiffDialog = HgDiffDialog(self.vcs)
self.qdiffDialog.show()
QApplication.processEvents()
self.qdiffDialog.start(name, qdiff=True)
def hgQueueShowHeader(self, name):
"""
Public method to show the commit message of the current patch.
@param name file/directory name (string)
"""
from .HgQueuesHeaderDialog import HgQueuesHeaderDialog
self.qheaderDialog = HgQueuesHeaderDialog(self.vcs)
self.qheaderDialog.show()
QApplication.processEvents()
self.qheaderDialog.start(name)
def hgQueuePushPopPatches(self, name, operation, all=False, named=False,
force=False):
"""
Public method to push patches onto the stack or pop patches off the
stack.
@param name file/directory name (string)
@param operation operation type to be performed (Queues.POP,
Queues.PUSH, Queues.GOTO)
@keyparam all flag indicating to push/pop all (boolean)
@keyparam named flag indicating to push/pop until a named patch
is at the top of the stack (boolean)
@keyparam force flag indicating a forceful pop (boolean)
@return flag indicating that the project should be reread (boolean)
@exception ValueError raised to indicate an invalid operation
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return False
if operation == Queues.POP:
args = self.vcs.initCommand("qpop")
title = self.tr("Pop Patches")
listType = Queues.APPLIED_LIST
elif operation == Queues.PUSH:
args = self.vcs.initCommand("qpush")
title = self.tr("Push Patches")
listType = Queues.UNAPPLIED_LIST
elif operation == Queues.GOTO:
args = self.vcs.initCommand("qgoto")
title = self.tr("Go to Patch")
listType = Queues.SERIES_LIST
else:
raise ValueError("illegal value for operation")
args.append("-v")
if force:
args.append("--force")
if all and operation in (Queues.POP, Queues.PUSH):
args.append("--all")
elif named or operation == Queues.GOTO:
patchnames = self.__getPatchesList(repodir, listType)
if patchnames:
patch, ok = QInputDialog.getItem(
None,
self.tr("Select Patch"),
self.tr("Select the target patch name:"),
patchnames,
0, False)
if ok and patch:
args.append(patch)
else:
return False
else:
E5MessageBox.information(
None,
self.tr("Select Patch"),
self.tr("""No patches to select from."""))
return False
dia = HgDialog(title, self.vcs)
res = dia.startProcess(args, repodir)
if res:
dia.exec_()
res = dia.hasAddOrDelete()
self.vcs.checkVCSStatus()
return res
def hgQueueListPatches(self, name):
"""
Public method to show a list of all patches.
@param name file/directory name (string)
"""
from .HgQueuesListDialog import HgQueuesListDialog
self.queuesListDialog = HgQueuesListDialog(self.vcs)
self.queuesListDialog.show()
self.queuesListDialog.start(name)
def hgQueueFinishAppliedPatches(self, name):
"""
Public method to finish all applied patches.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
args = self.vcs.initCommand("qfinish")
args.append("--applied")
dia = HgDialog(self.tr('Finish Applied Patches'), self.vcs)
res = dia.startProcess(args, repodir)
if res:
dia.exec_()
self.vcs.checkVCSStatus()
def hgQueueRenamePatch(self, name):
"""
Public method to rename the current or a selected patch.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
args = self.vcs.initCommand("qrename")
patchnames = sorted(self.__getPatchesList(repodir, Queues.SERIES_LIST))
if patchnames:
currentPatch = self.__getCurrentPatch(repodir)
if currentPatch:
from .HgQueuesRenamePatchDialog import \
HgQueuesRenamePatchDialog
dlg = HgQueuesRenamePatchDialog(currentPatch, patchnames)
if dlg.exec_() == QDialog.Accepted:
newName, selectedPatch = dlg.getData()
if selectedPatch:
args.append(selectedPatch)
args.append(newName)
dia = HgDialog(self.tr("Rename Patch"), self.vcs)
res = dia.startProcess(args, repodir)
if res:
dia.exec_()
def hgQueueDeletePatch(self, name):
"""
Public method to delete a selected unapplied patch.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
args = self.vcs.initCommand("qdelete")
patchnames = sorted(self.__getPatchesList(repodir,
Queues.UNAPPLIED_LIST))
if patchnames:
patch, ok = QInputDialog.getItem(
None,
self.tr("Select Patch"),
self.tr("Select the patch to be deleted:"),
patchnames,
0, False)
if ok and patch:
args.append(patch)
dia = HgDialog(self.tr("Delete Patch"), self.vcs)
res = dia.startProcess(args, repodir)
if res:
dia.exec_()
else:
E5MessageBox.information(
None,
self.tr("Select Patch"),
self.tr("""No patches to select from."""))
def hgQueueFoldUnappliedPatches(self, name):
"""
Public method to fold patches into the current patch.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
args = self.vcs.initCommand("qfold")
patchnames = sorted(
self.__getPatchesList(repodir, Queues.UNAPPLIED_LIST,
withSummary=True))
if patchnames:
from .HgQueuesFoldDialog import HgQueuesFoldDialog
dlg = HgQueuesFoldDialog(patchnames)
if dlg.exec_() == QDialog.Accepted:
message, patchesList = dlg.getData()
if message:
args.append("--message")
args.append(message)
if patchesList:
args.extend(patchesList)
dia = HgDialog(self.tr("Fold Patches"), self.vcs)
res = dia.startProcess(args, repodir)
if res:
dia.exec_()
else:
E5MessageBox.information(
None,
self.tr("Fold Patches"),
self.tr("""No patches selected."""))
else:
E5MessageBox.information(
None,
self.tr("Fold Patches"),
self.tr("""No patches available to be folded."""))
def hgQueueGuardsList(self, name):
"""
Public method to list the guards for the current or a named patch.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
patchnames = sorted(
self.__getPatchesList(repodir, Queues.SERIES_LIST))
if patchnames:
from .HgQueuesListGuardsDialog import HgQueuesListGuardsDialog
self.queuesListGuardsDialog = \
HgQueuesListGuardsDialog(self.vcs, patchnames)
self.queuesListGuardsDialog.show()
self.queuesListGuardsDialog.start(name)
else:
E5MessageBox.information(
None,
self.tr("List Guards"),
self.tr("""No patches available to list guards for."""))
def hgQueueGuardsListAll(self, name):
"""
Public method to list all guards of all patches.
@param name file/directory name (string)
"""
from .HgQueuesListAllGuardsDialog import HgQueuesListAllGuardsDialog
self.queuesListAllGuardsDialog = HgQueuesListAllGuardsDialog(self.vcs)
self.queuesListAllGuardsDialog.show()
self.queuesListAllGuardsDialog.start(name)
def hgQueueGuardsDefine(self, name):
"""
Public method to define guards for the current or a named patch.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
patchnames = sorted(
self.__getPatchesList(repodir, Queues.SERIES_LIST))
if patchnames:
from .HgQueuesDefineGuardsDialog import HgQueuesDefineGuardsDialog
self.queuesDefineGuardsDialog = HgQueuesDefineGuardsDialog(
self.vcs, self, patchnames)
self.queuesDefineGuardsDialog.show()
self.queuesDefineGuardsDialog.start(name)
else:
E5MessageBox.information(
None,
self.tr("Define Guards"),
self.tr("""No patches available to define guards for."""))
def hgQueueGuardsDropAll(self, name):
"""
Public method to drop all guards of the current or a named patch.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
patchnames = sorted(
self.__getPatchesList(repodir, Queues.SERIES_LIST))
if patchnames:
patch, ok = QInputDialog.getItem(
None,
self.tr("Drop All Guards"),
self.tr("Select the patch to drop guards for"
" (leave empty for the current patch):"),
[""] + patchnames,
0, False)
if ok:
args = self.vcs.initCommand("qguard")
if patch:
args.append(patch)
args.append("--none")
client = self.vcs.getClient()
if client:
client.runcommand(args)
else:
process = QProcess()
process.setWorkingDirectory(repodir)
process.start('hg', args)
procStarted = process.waitForStarted(5000)
if procStarted:
process.waitForFinished(30000)
else:
E5MessageBox.information(
None,
self.tr("Drop All Guards"),
self.tr("""No patches available to define guards for."""))
def hgQueueGuardsSetActive(self, name):
"""
Public method to set the active guards.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
guardsList = self.getGuardsList(repodir)
if guardsList:
activeGuardsList = self.getGuardsList(repodir, all=False)
from .HgQueuesGuardsSelectionDialog import \
HgQueuesGuardsSelectionDialog
dlg = HgQueuesGuardsSelectionDialog(
guardsList, activeGuards=activeGuardsList, listOnly=False)
if dlg.exec_() == QDialog.Accepted:
guards = dlg.getData()
if guards:
args = self.vcs.initCommand("qselect")
args.extend(guards)
dia = HgDialog(self.tr('Set Active Guards'), self.vcs)
res = dia.startProcess(args, repodir)
if res:
dia.exec_()
else:
E5MessageBox.information(
None,
self.tr("Set Active Guards"),
self.tr("""No guards available to select from."""))
return
def hgQueueGuardsDeactivate(self, name):
"""
Public method to deactivate all active guards.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
args = self.vcs.initCommand("qselect")
args.append("--none")
dia = HgDialog(self.tr('Deactivate Guards'), self.vcs)
res = dia.startProcess(args, repodir)
if res:
dia.exec_()
def hgQueueGuardsIdentifyActive(self, name):
"""
Public method to list all active guards.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
guardsList = self.getGuardsList(repodir, all=False)
if guardsList:
from .HgQueuesGuardsSelectionDialog import \
HgQueuesGuardsSelectionDialog
dlg = HgQueuesGuardsSelectionDialog(guardsList, listOnly=True)
dlg.exec_()
def hgQueueCreateRenameQueue(self, name, isCreate):
"""
Public method to create a new queue or rename the active queue.
@param name file/directory name (string)
@param isCreate flag indicating to create a new queue (boolean)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
if isCreate:
title = self.tr("Create New Queue")
else:
title = self.tr("Rename Active Queue")
from .HgQueuesQueueManagementDialog import \
HgQueuesQueueManagementDialog
dlg = HgQueuesQueueManagementDialog(
HgQueuesQueueManagementDialog.NAME_INPUT,
title, False, repodir, self.vcs)
if dlg.exec_() == QDialog.Accepted:
queueName = dlg.getData()
if queueName:
args = self.vcs.initCommand("qqueue")
if isCreate:
args.append("--create")
else:
args.append("--rename")
args.append(queueName)
client = self.vcs.getClient()
error = ""
if client:
error = client.runcommand(args)[1]
else:
process = QProcess()
process.setWorkingDirectory(repodir)
process.start('hg', args)
procStarted = process.waitForStarted(5000)
if procStarted:
finished = process.waitForFinished(30000)
if finished:
if process.exitCode() != 0:
error = str(process.readAllStandardError(),
self.vcs.getEncoding(), 'replace')
if error:
if isCreate:
errMsg = self.tr(
"Error while creating a new queue.")
else:
errMsg = self.tr(
"Error while renaming the active queue.")
E5MessageBox.warning(
None,
title,
"""<p>{0}</p><p>{1}</p>""".format(errMsg, error))
else:
if self.queuesListQueuesDialog is not None and \
self.queuesListQueuesDialog.isVisible():
self.queuesListQueuesDialog.refresh()
def hgQueueDeletePurgeActivateQueue(self, name, operation):
"""
Public method to delete the reference to a queue and optionally
remove the patch directory or set the active queue.
@param name file/directory name (string)
@param operation operation to be performed (Queues.QUEUE_DELETE,
Queues.QUEUE_PURGE, Queues.QUEUE_ACTIVATE)
@exception ValueError raised to indicate an invalid operation
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
if operation == Queues.QUEUE_PURGE:
title = self.tr("Purge Queue")
elif operation == Queues.QUEUE_DELETE:
title = self.tr("Delete Queue")
elif operation == Queues.QUEUE_ACTIVATE:
title = self.tr("Activate Queue")
else:
raise ValueError("illegal value for operation")
from .HgQueuesQueueManagementDialog import \
HgQueuesQueueManagementDialog
dlg = HgQueuesQueueManagementDialog(
HgQueuesQueueManagementDialog.QUEUE_INPUT,
title, True, repodir, self.vcs)
if dlg.exec_() == QDialog.Accepted:
queueName = dlg.getData()
if queueName:
args = self.vcs.initCommand("qqueue")
if operation == Queues.QUEUE_PURGE:
args.append("--purge")
elif operation == Queues.QUEUE_DELETE:
args.append("--delete")
args.append(queueName)
client = self.vcs.getClient()
error = ""
if client:
error = client.runcommand(args)[1]
else:
process = QProcess()
process.setWorkingDirectory(repodir)
process.start('hg', args)
procStarted = process.waitForStarted(5000)
if procStarted:
finished = process.waitForFinished(30000)
if finished:
if process.exitCode() != 0:
error = str(process.readAllStandardError(),
self.vcs.getEncoding(), 'replace')
if error:
if operation == Queues.QUEUE_PURGE:
errMsg = self.tr("Error while purging the queue.")
elif operation == Queues.QUEUE_DELETE:
errMsg = self.tr("Error while deleting the queue.")
elif operation == Queues.QUEUE_ACTIVATE:
errMsg = self.tr(
"Error while setting the active queue.")
E5MessageBox.warning(
None,
title,
"""<p>{0}</p><p>{1}</p>""".format(errMsg, error))
else:
if self.queuesListQueuesDialog is not None and \
self.queuesListQueuesDialog.isVisible():
self.queuesListQueuesDialog.refresh()
def hgQueueListQueues(self, name):
"""
Public method to list available queues.
@param name file/directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
from .HgQueuesQueueManagementDialog import \
HgQueuesQueueManagementDialog
self.queuesListQueuesDialog = HgQueuesQueueManagementDialog(
HgQueuesQueueManagementDialog.NO_INPUT,
self.tr("Available Queues"),
False, repodir, self.vcs)
self.queuesListQueuesDialog.show()
def hgQueueInit(self, name):
"""
Public method to initialize a new queue repository.
@param name directory name (string)
"""
# find the root of the repo
repodir = self.vcs.splitPath(name)[0]
while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):
repodir = os.path.dirname(repodir)
if os.path.splitdrive(repodir)[1] == os.sep:
return
args = self.vcs.initCommand("init")
args.append('--mq')
args.append(repodir)
# init is not possible with the command server
dia = HgDialog(
self.tr('Initializing new queue repository'), self.vcs)
res = dia.startProcess(args)
if res:
dia.exec_()
def hgQueueStatus(self, name):
"""
Public method used to view the status of a queue repository.
@param name directory name (string)
"""
from ..HgStatusDialog import HgStatusDialog
self.queueStatusDialog = HgStatusDialog(self.vcs, mq=True)
self.queueStatusDialog.show()
self.queueStatusDialog.start(name)
| gpl-3.0 |
angelapper/edx-platform | lms/djangoapps/ccx/plugins.py | 23 | 1306 | """
Registers the CCX feature for the edX platform.
"""
from django.conf import settings
from django.utils.translation import ugettext_noop
from courseware.access import has_access
from student.roles import CourseCcxCoachRole
from xmodule.tabs import CourseTab
class CcxCourseTab(CourseTab):
"""
The representation of the CCX course tab
"""
type = "ccx_coach"
title = ugettext_noop("CCX Coach")
view_name = "ccx_coach_dashboard"
is_dynamic = True # The CCX view is dynamically added to the set of tabs when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if CCX has been enabled and the specified user is a coach
"""
if not settings.FEATURES.get('CUSTOM_COURSES_EDX', False) or not course.enable_ccx:
# If ccx is not enable do not show ccx coach tab.
return False
is_staff_or_instructor = has_access(user, 'staff', course) or has_access(user, 'instructor', course)
if hasattr(course.id, 'ccx') and is_staff_or_instructor:
# if user is staff or instructor then he can always see ccx coach tab.
return True
# check if user has coach access.
role = CourseCcxCoachRole(course.id)
return role.has_user(user)
| agpl-3.0 |
ESS-LLP/erpnext | erpnext/accounts/doctype/tax_rule/test_tax_rule.py | 7 | 6035 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.accounts.doctype.tax_rule.tax_rule import IncorrectCustomerGroup, IncorrectSupplierType, ConflictingTaxRule, get_tax_template
test_records = frappe.get_test_records('Tax Rule')
from six import iteritems
class TestTaxRule(unittest.TestCase):
def setUp(self):
frappe.db.sql("delete from `tabTax Rule`")
def tearDown(self):
frappe.db.sql("delete from `tabTax Rule`")
def test_conflict(self):
tax_rule1 = make_tax_rule(customer= "_Test Customer",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", priority = 1)
tax_rule1.save()
tax_rule2 = make_tax_rule(customer= "_Test Customer",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", priority = 1)
self.assertRaises(ConflictingTaxRule, tax_rule2.save)
def test_conflict_with_non_overlapping_dates(self):
tax_rule1 = make_tax_rule(customer= "_Test Customer",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", priority = 1, from_date = "2015-01-01")
tax_rule1.save()
tax_rule2 = make_tax_rule(customer= "_Test Customer",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", priority = 1, to_date = "2013-01-01")
tax_rule2.save()
self.assertTrue(tax_rule2.name)
def test_for_parent_customer_group(self):
tax_rule1 = make_tax_rule(customer_group= "All Customer Groups",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", priority = 1, from_date = "2015-01-01")
tax_rule1.save()
self.assertEqual(get_tax_template("2015-01-01", {"customer_group" : "Commercial", "use_for_shopping_cart":0}),
"_Test Sales Taxes and Charges Template - _TC")
def test_conflict_with_overlapping_dates(self):
tax_rule1 = make_tax_rule(customer= "_Test Customer",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", priority = 1, from_date = "2015-01-01", to_date = "2015-01-05")
tax_rule1.save()
tax_rule2 = make_tax_rule(customer= "_Test Customer",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", priority = 1, from_date = "2015-01-03", to_date = "2015-01-09")
self.assertRaises(ConflictingTaxRule, tax_rule2.save)
def test_tax_template(self):
tax_rule = make_tax_rule()
self.assertEqual(tax_rule.purchase_tax_template, None)
def test_select_tax_rule_based_on_customer(self):
make_tax_rule(customer= "_Test Customer",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", save=1)
make_tax_rule(customer= "_Test Customer 1",
sales_tax_template = "_Test Sales Taxes and Charges Template 1 - _TC", save=1)
make_tax_rule(customer= "_Test Customer 2",
sales_tax_template = "_Test Sales Taxes and Charges Template 2 - _TC", save=1)
self.assertEqual(get_tax_template("2015-01-01", {"customer":"_Test Customer 2"}),
"_Test Sales Taxes and Charges Template 2 - _TC")
def test_select_tax_rule_based_on_better_match(self):
make_tax_rule(customer= "_Test Customer", billing_city = "Test City", billing_state = "Test State",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", save=1)
make_tax_rule(customer= "_Test Customer", billing_city = "Test City1", billing_state = "Test State",
sales_tax_template = "_Test Sales Taxes and Charges Template 1 - _TC", save=1)
self.assertEqual(get_tax_template("2015-01-01", {"customer":"_Test Customer", "billing_city": "Test City", "billing_state": "Test State"}),
"_Test Sales Taxes and Charges Template - _TC")
def test_select_tax_rule_based_on_state_match(self):
make_tax_rule(customer= "_Test Customer", shipping_state = "Test State",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", save=1)
make_tax_rule(customer= "_Test Customer", shipping_state = "Test State12",
sales_tax_template = "_Test Sales Taxes and Charges Template 1 - _TC", priority=2, save=1)
self.assertEqual(get_tax_template("2015-01-01", {"customer":"_Test Customer", "shipping_state": "Test State"}),
"_Test Sales Taxes and Charges Template - _TC")
def test_select_tax_rule_based_on_better_priority(self):
make_tax_rule(customer= "_Test Customer", billing_city = "Test City",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", priority=1, save=1)
make_tax_rule(customer= "_Test Customer", billing_city = "Test City",
sales_tax_template = "_Test Sales Taxes and Charges Template 1 - _TC", priority=2, save=1)
self.assertEqual(get_tax_template("2015-01-01", {"customer":"_Test Customer", "billing_city": "Test City"}),
"_Test Sales Taxes and Charges Template 1 - _TC")
def test_select_tax_rule_based_cross_matching_keys(self):
make_tax_rule(customer= "_Test Customer", billing_city = "Test City",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", save=1)
make_tax_rule(customer= "_Test Customer 1", billing_city = "Test City 1",
sales_tax_template = "_Test Sales Taxes and Charges Template 1 - _TC", save=1)
self.assertEqual(get_tax_template("2015-01-01", {"customer":"_Test Customer", "billing_city": "Test City 1"}),
None)
def test_select_tax_rule_based_cross_partially_keys(self):
make_tax_rule(customer= "_Test Customer", billing_city = "Test City",
sales_tax_template = "_Test Sales Taxes and Charges Template - _TC", save=1)
make_tax_rule(billing_city = "Test City 1",
sales_tax_template = "_Test Sales Taxes and Charges Template 1 - _TC", save=1)
self.assertEqual(get_tax_template("2015-01-01", {"customer":"_Test Customer", "billing_city": "Test City 1"}),
"_Test Sales Taxes and Charges Template 1 - _TC")
def make_tax_rule(**args):
args = frappe._dict(args)
tax_rule = frappe.new_doc("Tax Rule")
for key, val in iteritems(args):
if key != "save":
tax_rule.set(key, val)
tax_rule.company = args.company or "_Test Company"
if args.save:
tax_rule.insert()
return tax_rule
| gpl-3.0 |
wweiradio/django | tests/gis_tests/distapp/models.py | 259 | 1365 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
from ..utils import gisfield_may_be_null
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class SouthTexasCity(NamedModel):
"City model on projected coordinate system for South Texas."
point = models.PointField(srid=32140)
class SouthTexasCityFt(NamedModel):
"Same City model as above, but U.S. survey feet are the units."
point = models.PointField(srid=2278)
class AustraliaCity(NamedModel):
"City model for Australia, using WGS84."
point = models.PointField()
class CensusZipcode(NamedModel):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
poly = models.PolygonField(srid=4269)
class SouthTexasZipcode(NamedModel):
"Model for a few South Texas ZIP codes."
poly = models.PolygonField(srid=32140, null=gisfield_may_be_null)
class Interstate(NamedModel):
"Geodetic model for U.S. Interstates."
path = models.LineStringField()
class SouthTexasInterstate(NamedModel):
"Projected model for South Texas Interstates."
path = models.LineStringField(srid=32140)
| bsd-3-clause |
moraesnicol/scrapy | scrapy/commands/version.py | 60 | 1652 | from __future__ import print_function
import sys
import platform
import twisted
import OpenSSL
import scrapy
from scrapy.commands import ScrapyCommand
class Command(ScrapyCommand):
def syntax(self):
return "[-v]"
def short_desc(self):
return "Print Scrapy version"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--verbose", "-v", dest="verbose", action="store_true",
help="also display twisted/python/platform info (useful for bug reports)")
def run(self, args, opts):
if opts.verbose:
import lxml.etree
lxml_version = ".".join(map(str, lxml.etree.LXML_VERSION))
libxml2_version = ".".join(map(str, lxml.etree.LIBXML_VERSION))
print("Scrapy : %s" % scrapy.__version__)
print("lxml : %s" % lxml_version)
print("libxml2 : %s" % libxml2_version)
print("Twisted : %s" % twisted.version.short())
print("Python : %s" % sys.version.replace("\n", "- "))
print("pyOpenSSL : %s" % self._get_openssl_version())
print("Platform : %s" % platform.platform())
else:
print("Scrapy %s" % scrapy.__version__)
def _get_openssl_version(self):
try:
openssl = OpenSSL.SSL.SSLeay_version(OpenSSL.SSL.SSLEAY_VERSION)\
.decode('ascii', errors='replace')
# pyOpenSSL 0.12 does not expose openssl version
except AttributeError:
openssl = 'Unknown OpenSSL version'
return '{} ({})'.format(OpenSSL.version.__version__, openssl)
| bsd-3-clause |
ai-se/Tree-Learner | methods1.py | 1 | 2448 | #! /Users/rkrsn/anaconda/bin/python
from os import environ, getcwd, walk
import sys
# Update PYTHONPATH
HOME = environ['HOME']
axe = HOME + '/git/axe/axe/' # AXE
pystat = HOME + '/git/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, cwd])
from dtree import *
from table import *
from _imports.where2 import *
import makeAmodel
import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
import smote
def explore(dir):
datasets = []
for (dirpath, dirnames, filenames) in walk(dir):
datasets.append(dirpath)
training = []
testing = []
for k in datasets[1:]:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
training.append(
[train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store'])
testing.append(test)
return training, testing
def newTable(tbl, headerLabel, Rows):
tbl2 = clone(tbl)
newHead = Sym()
newHead.col = len(tbl.headers)
newHead.name = headerLabel
tbl2.headers = tbl.headers + [newHead]
return clone(tbl2, rows=Rows)
def createTbl(
data,
settings=None,
_smote=False,
isBin=False,
bugThres=1,
duplicate=False):
"""
kwargs:
_smote = True/False : SMOTE input data (or not)
_isBin = True/False : Reduce bugs to defects/no defects
_bugThres = int : Threshold for marking stuff as defective,
default = 1. Not defective => Bugs < 1
"""
makeaModel = makeAmodel.makeAModel()
_r = []
for t in data:
m = makeaModel.csv2py(t, _smote=_smote, duplicate=duplicate)
_r += m._rows
m._rows = _r
prepare(m, settings=None) # Initialize all parameters for where2 to run
tree = where2(m, m._rows) # Decision tree using where2
tbl = table(t)
headerLabel = '=klass'
Rows = []
for k, _ in leaves(tree): # for k, _ in leaves(tree):
for j in k.val:
tmp = j.cells
if isBin:
tmp[-1] = 0 if tmp[-1] < bugThres else 1
tmp.append('_' + str(id(k) % 1000))
j.__dict__.update({'cells': tmp})
Rows.append(j.cells)
return newTable(tbl, headerLabel, Rows)
def test_createTbl():
dir = '../Data/camel/camel-1.6.csv'
newTbl = createTbl([dir], _smote=False)
newTblSMOTE = createTbl([dir], _smote=True)
print(len(newTbl._rows), len(newTblSMOTE._rows))
def drop(test, tree):
loc = apex(test, tree)
return loc
if __name__ == '__main__':
test_createTbl()
| unlicense |
osrg/ryu | ryu/lib/packet/ospf.py | 4 | 34488 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RFC 2328 OSPF version 2
"""
from functools import reduce
import logging
import struct
import six
from ryu.lib import addrconv
from ryu.lib.packet import packet_base
from ryu.lib.packet import packet_utils
from ryu.lib.packet import stream_parser
from ryu.lib.stringify import StringifyMixin
from ryu.lib import type_desc
LOG = logging.getLogger(__name__)
_VERSION = 2
OSPF_MSG_UNKNOWN = 0
OSPF_MSG_HELLO = 1
OSPF_MSG_DB_DESC = 2
OSPF_MSG_LS_REQ = 3
OSPF_MSG_LS_UPD = 4
OSPF_MSG_LS_ACK = 5
OSPF_UNKNOWN_LSA = 0
OSPF_ROUTER_LSA = 1
OSPF_NETWORK_LSA = 2
OSPF_SUMMARY_LSA = 3
OSPF_ASBR_SUMMARY_LSA = 4
OSPF_AS_EXTERNAL_LSA = 5
OSPF_AS_NSSA_LSA = 7 # RFC 3101
OSPF_OPAQUE_LINK_LSA = 9 # RFC 5250
OSPF_OPAQUE_AREA_LSA = 10 # RFC 5250
OSPF_OPAQUE_AS_LSA = 11 # RFC 5250
OSPF_OPTION_T = 1 # Obsolete
OSPF_OPTION_E = 1 << 1 # RFC 2328
OSPF_OPTION_MC = 1 << 2 # RFC 1584
OSPF_OPTION_NP = 1 << 3 # RFC 3101
OSPF_OPTION_EA = 1 << 4 # Obsolete
OSPF_OPTION_DC = 1 << 5 # RFC 2370
OSPF_OPTION_DN = 1 << 7 # RFC 2567
LSA_LINK_TYPE_P2P = 1
LSA_LINK_TYPE_TRANSIT = 2
LSA_LINK_TYPE_STUB = 3
LSA_LINK_TYPE_VL = 4
ROUTER_LSA_BORDER = 0x01 # The router is an ABR
ROUTER_LSA_EXTERNAL = 0x02 # The router is an ASBR
ROUTER_LSA_VIRTUAL = 0x04 # The router has a VL in this area
ROUTER_LSA_NT = 0x10 # The router always translates Type-7
ROUTER_LSA_SHORTCUT = 0x20 # Shortcut-ABR specific flag
AS_EXTERNAL_METRIC = 0x80
OSPF_OPAQUE_TYPE_UNKNOWN = 0
OSPF_OPAQUE_TYPE_EXTENDED_PREFIX_LSA = 7
OSPF_OPAQUE_TYPE_EXTENDED_LINK_LSA = 8
OSPF_EXTENDED_PREFIX_TLV = 1
OSPF_EXTENDED_PREFIX_SID_SUBTLV = 2
class InvalidChecksum(Exception):
pass
class LSAHeader(StringifyMixin):
_HDR_PACK_STR = '!HBB4s4sIHH'
_HDR_LEN = struct.calcsize(_HDR_PACK_STR)
def __init__(self, ls_age=0, options=0, type_=OSPF_UNKNOWN_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=0, length=0, opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN,
opaque_id=0):
self.ls_age = ls_age
self.options = options
self.type_ = type_
if self.type_ < OSPF_OPAQUE_LINK_LSA:
self.id_ = id_
else:
self.opaque_type = opaque_type
self.opaque_id = opaque_id
self.adv_router = adv_router
self.ls_seqnum = ls_seqnum
self.checksum = checksum
self.length = length
@classmethod
def parser(cls, buf):
if len(buf) < cls._HDR_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._HDR_LEN))
(ls_age, options, type_, id_, adv_router, ls_seqnum, checksum,
length,) = struct.unpack_from(cls._HDR_PACK_STR, six.binary_type(buf))
adv_router = addrconv.ipv4.bin_to_text(adv_router)
rest = buf[cls._HDR_LEN:]
lsacls = LSA._lookup_type(type_)
value = {
"ls_age": ls_age,
"options": options,
"type_": type_,
"adv_router": adv_router,
"ls_seqnum": ls_seqnum,
"checksum": checksum,
"length": length,
}
if issubclass(lsacls, OpaqueLSA):
(id_,) = struct.unpack_from('!I', id_)
value['opaque_type'] = (id_ & 0xff000000) >> 24
value['opaque_id'] = (id_ & 0xffffff)
else:
value['id_'] = addrconv.ipv4.bin_to_text(id_)
return value, rest
def serialize(self):
if self.type_ < OSPF_OPAQUE_LINK_LSA:
id_ = addrconv.ipv4.text_to_bin(self.id_)
else:
id_ = (self.opaque_type << 24) + self.opaque_id
(id_,) = struct.unpack_from('4s', struct.pack('!I', id_))
adv_router = addrconv.ipv4.text_to_bin(self.adv_router)
return bytearray(
struct.pack(self._HDR_PACK_STR, self.ls_age,
self.options, self.type_, id_, adv_router,
self.ls_seqnum, self.checksum, self.length))
class LSA(type_desc.TypeDisp, StringifyMixin):
def __init__(self, ls_age=0, options=0, type_=OSPF_UNKNOWN_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=0, length=0, opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN,
opaque_id=0):
if type_ < OSPF_OPAQUE_LINK_LSA:
self.header = LSAHeader(
ls_age=ls_age,
options=options,
type_=type_,
id_=id_,
adv_router=adv_router,
ls_seqnum=ls_seqnum)
else:
self.header = LSAHeader(
ls_age=ls_age,
options=options,
type_=type_,
adv_router=adv_router,
ls_seqnum=ls_seqnum,
opaque_type=opaque_type,
opaque_id=opaque_id)
if not (checksum or length):
tail = self.serialize_tail()
length = self.header._HDR_LEN + len(tail)
if not checksum:
head = self.header.serialize()
checksum = packet_utils.fletcher_checksum(head[2:], 14)
self.header.length = length
self.header.checksum = checksum
@classmethod
def parser(cls, buf):
hdr, rest = LSAHeader.parser(buf)
if len(buf) < hdr['length']:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), hdr['length']))
# exclude ls_age for checksum calculation
csum = packet_utils.fletcher_checksum(buf[2:hdr['length']], 14)
if csum != hdr['checksum']:
raise InvalidChecksum("header has %d, but calculated value is %d"
% (hdr['checksum'], csum))
subcls = cls._lookup_type(hdr['type_'])
body = rest[:hdr['length'] - LSAHeader._HDR_LEN]
rest = rest[hdr['length'] - LSAHeader._HDR_LEN:]
if issubclass(subcls, OpaqueLSA):
kwargs = subcls.parser(body, hdr['opaque_type'])
else:
kwargs = subcls.parser(body)
kwargs.update(hdr)
return subcls(**kwargs), subcls, rest
def serialize(self):
tail = self.serialize_tail()
self.header.length = self.header._HDR_LEN + len(tail)
head = self.header.serialize()
# exclude ls_age for checksum calculation
csum = packet_utils.fletcher_checksum(head[2:] + tail, 14)
self.header.checksum = csum
struct.pack_into("!H", head, 16, csum)
return head + tail
def serialize_tail(self):
# should be implemented in subclass
return b''
@LSA.register_type(OSPF_ROUTER_LSA)
class RouterLSA(LSA):
_PACK_STR = '!BBH'
_PACK_LEN = struct.calcsize(_PACK_STR) # 4bytes
class Link(StringifyMixin):
_PACK_STR = '!4s4sBBH'
_PACK_LEN = struct.calcsize(_PACK_STR) # 12bytes
def __init__(self, id_='0.0.0.0', data='0.0.0.0',
type_=LSA_LINK_TYPE_STUB, tos=0, metric=10):
self.id_ = id_
self.data = data
self.type_ = type_
self.tos = tos
self.metric = metric
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._PACK_LEN))
link = buf[:cls._PACK_LEN]
rest = buf[cls._PACK_LEN:]
(id_, data, type_, tos, metric) = \
struct.unpack_from(cls._PACK_STR, six.binary_type(link))
id_ = addrconv.ipv4.bin_to_text(id_)
data = addrconv.ipv4.bin_to_text(data)
return cls(id_, data, type_, tos, metric), rest
def serialize(self):
id_ = addrconv.ipv4.text_to_bin(self.id_)
data = addrconv.ipv4.text_to_bin(self.data)
return bytearray(
struct.pack(self._PACK_STR, id_, data, self.type_, self.tos,
self.metric))
def __init__(self, ls_age=0, options=0, type_=OSPF_ROUTER_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=None, length=None, flags=0, links=None):
links = links if links else []
self.flags = flags
self.links = links
super(RouterLSA, self).__init__(ls_age, options, type_, id_,
adv_router, ls_seqnum, checksum,
length)
@classmethod
def parser(cls, buf):
links = []
hdr = buf[:cls._PACK_LEN]
buf = buf[cls._PACK_LEN:]
(flags, _, num) = struct.unpack_from(cls._PACK_STR,
six.binary_type(hdr))
while buf:
link, buf = cls.Link.parser(buf)
links.append(link)
assert len(links) == num
return {
"flags": flags,
"links": links,
}
def serialize_tail(self):
head = bytearray(
struct.pack(self._PACK_STR, self.flags, 0, len(self.links)))
try:
return head + reduce(lambda a, b: a + b,
(link.serialize() for link in self.links))
except TypeError:
return head
@LSA.register_type(OSPF_NETWORK_LSA)
class NetworkLSA(LSA):
_PACK_STR = '!4s'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, ls_age=0, options=0, type_=OSPF_NETWORK_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=None, length=None, mask='0.0.0.0', routers=None):
routers = routers if routers else []
self.mask = mask
self.routers = routers
super(NetworkLSA, self).__init__(ls_age, options, type_, id_,
adv_router, ls_seqnum, checksum,
length)
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._PACK_LEN))
binmask = buf[:cls._PACK_LEN]
(mask,) = struct.unpack_from(cls._PACK_STR, six.binary_type(binmask))
mask = addrconv.ipv4.bin_to_text(mask)
buf = buf[cls._PACK_LEN:]
routers = []
while buf:
binrouter = buf[:cls._PACK_LEN]
(router,) = struct.unpack_from(cls._PACK_STR,
six.binary_type(binrouter))
router = addrconv.ipv4.bin_to_text(router)
routers.append(router)
buf = buf[cls._PACK_LEN:]
return {
"mask": mask,
"routers": routers,
}
def serialize_tail(self):
mask = addrconv.ipv4.text_to_bin(self.mask)
routers = [addrconv.ipv4.text_to_bin(router)
for router in self.routers]
return bytearray(
struct.pack("!" + "4s" * (1 + len(routers)), mask, *routers))
@LSA.register_type(OSPF_SUMMARY_LSA)
class SummaryLSA(LSA):
_PACK_STR = '!4sB3s'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, ls_age=0, options=0, type_=OSPF_SUMMARY_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=None, length=None, mask='0.0.0.0', tos=0, metric=0):
self.mask = mask
self.tos = tos
self.metric = metric
super(SummaryLSA, self).__init__(ls_age, options, type_, id_,
adv_router, ls_seqnum, checksum,
length)
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._PACK_LEN))
buf = buf[:cls._PACK_LEN]
(mask, tos, metric) = struct.unpack_from(
cls._PACK_STR, six.binary_type(buf))
mask = addrconv.ipv4.bin_to_text(mask)
metric = type_desc.Int3.to_user(metric)
return {
"mask": mask,
"tos": tos,
"metric": metric,
}
def serialize_tail(self):
mask = addrconv.ipv4.text_to_bin(self.mask)
metric = type_desc.Int3.from_user(self.metric)
return bytearray(struct.pack(self._PACK_STR, mask, self.tos, metric))
@LSA.register_type(OSPF_ASBR_SUMMARY_LSA)
class ASBRSummaryLSA(LSA):
pass
@LSA.register_type(OSPF_AS_EXTERNAL_LSA)
class ASExternalLSA(LSA):
class ExternalNetwork(StringifyMixin):
_PACK_STR = '!4sB3s4sI'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, mask='0.0.0.0', flags=0, metric=0,
fwd_addr='0.0.0.0', tag=0):
self.mask = mask
self.flags = flags
self.metric = metric
self.fwd_addr = fwd_addr
self.tag = tag
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._PACK_LEN))
ext_nw = buf[:cls._PACK_LEN]
rest = buf[cls._PACK_LEN:]
(mask, flags, metric, fwd_addr,
tag) = struct.unpack_from(cls._PACK_STR, six.binary_type(ext_nw))
mask = addrconv.ipv4.bin_to_text(mask)
metric = type_desc.Int3.to_user(metric)
fwd_addr = addrconv.ipv4.bin_to_text(fwd_addr)
return cls(mask, flags, metric, fwd_addr, tag), rest
def serialize(self):
mask = addrconv.ipv4.text_to_bin(self.mask)
metric = type_desc.Int3.from_user(self.metric)
fwd_addr = addrconv.ipv4.text_to_bin(self.fwd_addr)
return bytearray(
struct.pack(self._PACK_STR, mask, self.flags, metric,
fwd_addr, self.tag))
def __init__(self, ls_age=0, options=0, type_=OSPF_AS_EXTERNAL_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=None, length=None, extnws=None):
extnws = extnws if extnws else []
self.extnws = extnws
super(ASExternalLSA, self).__init__(ls_age, options, type_, id_,
adv_router, ls_seqnum, checksum,
length)
@classmethod
def parser(cls, buf):
extnws = []
while buf:
extnw, buf = cls.ExternalNetwork.parser(buf)
extnws.append(extnw)
return {
"extnws": extnws,
}
def serialize_tail(self):
return reduce(lambda a, b: a + b,
(extnw.serialize() for extnw in self.extnws))
@LSA.register_type(OSPF_AS_NSSA_LSA)
class NSSAExternalLSA(LSA):
pass
class ExtendedPrefixTLV(StringifyMixin, type_desc.TypeDisp):
pass
@ExtendedPrefixTLV.register_type(OSPF_EXTENDED_PREFIX_TLV)
class ExtendedPrefixTLV(ExtendedPrefixTLV):
_VALUE_PACK_STR = '!HHBBBB4s'
_VALUE_PACK_LEN = struct.calcsize(_VALUE_PACK_STR)
_VALUE_FIELDS = ['route_type', 'prefix_length', 'address_family', '_pad'
'prefix']
def __init__(self, type_=OSPF_EXTENDED_PREFIX_TLV, length=0, route_type=0,
address_family=0, prefix='0.0.0.0/0'):
self.type_ = type_
self.length = length
self.route_type = route_type
self.address_family = address_family
self.prefix = prefix
@classmethod
def parser(cls, buf):
rest = buf[cls._VALUE_PACK_LEN:]
buf = buf[:cls._VALUE_PACK_LEN]
(type_, length, route_type, prefix_length, address_family, _pad,
prefix) = struct.unpack_from(cls._VALUE_PACK_STR, buf)
prefix = addrconv.ipv4.bin_to_text(prefix)
prefix = "%s/%d" % (prefix, prefix_length)
return cls(type_, length, route_type, address_family, prefix), rest
def serialize(self):
prefix, prefix_length = self.prefix.split('/')
prefix = addrconv.ipv4.text_to_bin(prefix)
prefix_length = int(prefix_length)
return struct.pack(self._VALUE_PACK_STR, OSPF_EXTENDED_PREFIX_TLV,
self._VALUE_PACK_LEN - 4, self.route_type,
prefix_length, self.address_family, 0, prefix)
@ExtendedPrefixTLV.register_type(OSPF_EXTENDED_PREFIX_SID_SUBTLV)
class PrefixSIDSubTLV(ExtendedPrefixTLV):
_VALUE_PACK_STR = '!HHBBBBHHI'
_VALUE_PACK_LEN = struct.calcsize(_VALUE_PACK_STR)
_VALUE_FIELDS = ['flags', 'mt_id', 'algorithm', '_pad', 'range_size',
'_pad', 'index']
def __init__(self, type_=OSPF_EXTENDED_PREFIX_SID_SUBTLV, length=0,
flags=0, mt_id=0, algorithm=0, range_size=0, index=0):
super(PrefixSIDSubTLV, self).__init__()
self.type_ = type_
self.length = length
self.flags = flags
self.mt_id = mt_id
self.algorithm = algorithm
self.range_size = range_size
self.index = index
@classmethod
def parser(cls, buf):
rest = buf[cls._VALUE_PACK_LEN:]
buf = buf[:cls._VALUE_PACK_LEN]
(type_, length, flags, mt_id, algorithm, _pad, range_size, _pad,
index) = struct.unpack_from(cls._VALUE_PACK_STR, buf)
return cls(type_, length, flags, mt_id, algorithm, range_size,
index), rest
def serialize(self):
return struct.pack(self._VALUE_PACK_STR,
OSPF_EXTENDED_PREFIX_SID_SUBTLV,
self._VALUE_PACK_LEN - 4, self.flags, self.mt_id,
self.algorithm, 0, self.range_size, 0, self.index)
class ExtendedLinkTLV(StringifyMixin, type_desc.TypeDisp):
pass
class OpaqueBody(StringifyMixin, type_desc.TypeDisp):
def __init__(self, tlvs=None):
tlvs = tlvs if tlvs else []
self.tlvs = tlvs
def serialize(self):
return reduce(lambda a, b: a + b,
(tlv.serialize() for tlv in self.tlvs))
@OpaqueBody.register_type(OSPF_OPAQUE_TYPE_EXTENDED_PREFIX_LSA)
class ExtendedPrefixOpaqueBody(OpaqueBody):
@classmethod
def parser(cls, buf):
buf = six.binary_type(buf)
tlvs = []
while buf:
(type_, length) = struct.unpack_from('!HH', buf)
if len(buf[struct.calcsize('!HH'):]) < length:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), length))
tlvcls = ExtendedPrefixTLV._lookup_type(type_)
if tlvcls:
tlv, buf = tlvcls.parser(buf)
tlvs.append(tlv)
return cls(tlvs)
@OpaqueBody.register_type(OSPF_OPAQUE_TYPE_EXTENDED_LINK_LSA)
class ExtendedLinkOpaqueBody(OpaqueBody):
@classmethod
def parser(cls, buf):
buf = six.binary_type(buf)
tlvs = []
while buf:
(type_, length) = struct.unpack_from('!HH', buf)
if len(buf[struct.calcsize('!HH'):]) < length:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), length))
tlvcls = ExtendedLinkTLV._lookup_type(type_)
if tlvcls:
tlv, buf = tlvcls.parser(buf)
tlvs.append(tlv)
return cls(tlvs)
class OpaqueLSA(LSA):
def __init__(self, data, *args, **kwargs):
super(OpaqueLSA, self).__init__(*args, **kwargs)
self.data = data
@classmethod
def parser(cls, buf, opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN):
opaquecls = OpaqueBody._lookup_type(opaque_type)
if opaquecls:
data = opaquecls.parser(buf)
else:
data = buf
return {'data': data}
def serialize_tail(self):
if isinstance(self.data, OpaqueBody):
return self.data.serialize()
else:
return self.data
@LSA.register_type(OSPF_OPAQUE_LINK_LSA)
class LocalOpaqueLSA(OpaqueLSA):
def __init__(self, ls_age=0, options=0, type_=OSPF_OPAQUE_LINK_LSA,
adv_router='0.0.0.0', ls_seqnum=0, checksum=0, length=0,
opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN, opaque_id=0, data=None):
self.data = data
super(LocalOpaqueLSA, self).__init__(ls_age, options, type_, 0,
adv_router, ls_seqnum, checksum,
length, opaque_type, opaque_id)
@LSA.register_type(OSPF_OPAQUE_AREA_LSA)
class AreaOpaqueLSA(OpaqueLSA):
def __init__(self, ls_age=0, options=0, type_=OSPF_OPAQUE_AREA_LSA,
adv_router='0.0.0.0', ls_seqnum=0, checksum=0, length=0,
opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN, opaque_id=0, data=None):
self.data = data
super(AreaOpaqueLSA, self).__init__(ls_age, options, type_, 0,
adv_router, ls_seqnum, checksum,
length, opaque_type, opaque_id)
@LSA.register_type(OSPF_OPAQUE_AS_LSA)
class ASOpaqueLSA(OpaqueLSA):
def __init__(self, ls_age=0, options=0, type_=OSPF_OPAQUE_AS_LSA,
adv_router='0.0.0.0', ls_seqnum=0, checksum=0, length=0,
opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN, opaque_id=0, data=None):
self.data = data
super(ASOpaqueLSA, self).__init__(ls_age, options, type_, 0,
adv_router, ls_seqnum, checksum,
length, opaque_type, opaque_id)
class OSPFMessage(packet_base.PacketBase, type_desc.TypeDisp):
"""Base class for OSPF version 2 messages.
"""
_HDR_PACK_STR = '!BBH4s4sHHQ'
_HDR_LEN = struct.calcsize(_HDR_PACK_STR)
def __init__(self, type_, length=None, router_id='0.0.0.0',
area_id='0.0.0.0', au_type=1, authentication=0, checksum=None,
version=_VERSION):
super(OSPFMessage, self).__init__()
self.version = version
self.type_ = type_
self.length = length
self.router_id = router_id
self.area_id = area_id
self.checksum = checksum
self.au_type = au_type
self.authentication = authentication
@classmethod
def _parser(cls, buf):
if len(buf) < cls._HDR_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._HDR_LEN))
(version, type_, length, router_id, area_id, checksum, au_type,
authentication) = struct.unpack_from(cls._HDR_PACK_STR,
six.binary_type(buf))
# Exclude checksum and authentication field for checksum validation.
if packet_utils.checksum(buf[:12] + buf[14:16] + buf[cls._HDR_LEN:]) \
!= checksum:
raise InvalidChecksum
if len(buf) < length:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), length))
router_id = addrconv.ipv4.bin_to_text(router_id)
area_id = addrconv.ipv4.bin_to_text(area_id)
binmsg = buf[cls._HDR_LEN:length]
rest = buf[length:]
subcls = cls._lookup_type(type_)
kwargs = subcls.parser(binmsg)
return subcls(length, router_id, area_id, au_type, int(authentication),
checksum, version, **kwargs), None, rest
@classmethod
def parser(cls, buf):
try:
return cls._parser(buf)
except:
return None, None, buf
def serialize(self, payload=None, prev=None):
tail = self.serialize_tail()
self.length = self._HDR_LEN + len(tail)
head = bytearray(
struct.pack(self._HDR_PACK_STR, self.version,
self.type_, self.length,
addrconv.ipv4.text_to_bin(self.router_id),
addrconv.ipv4.text_to_bin(self.area_id), 0,
self.au_type, self.authentication))
buf = head + tail
csum = packet_utils.checksum(buf[:12] + buf[14:16] +
buf[self._HDR_LEN:])
self.checksum = csum
struct.pack_into("!H", buf, 12, csum)
return buf
# alias
ospf = OSPFMessage
@OSPFMessage.register_type(OSPF_MSG_HELLO)
class OSPFHello(OSPFMessage):
_PACK_STR = '!4sHBBI4s4s' # + neighbors
_PACK_LEN = struct.calcsize(_PACK_STR)
_MIN_LEN = OSPFMessage._HDR_LEN + _PACK_LEN
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
mask='0.0.0.0', hello_interval=10, options=0, priority=1,
dead_interval=40, designated_router='0.0.0.0',
backup_router='0.0.0.0', neighbors=None):
neighbors = neighbors if neighbors else []
super(OSPFHello, self).__init__(OSPF_MSG_HELLO, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.mask = mask
self.hello_interval = hello_interval
self.options = options
self.priority = priority
self.dead_interval = dead_interval
self.designated_router = designated_router
self.backup_router = backup_router
self.neighbors = neighbors
@classmethod
def parser(cls, buf):
(mask, hello_interval, options, priority, dead_interval,
designated_router, backup_router) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf))
mask = addrconv.ipv4.bin_to_text(mask)
designated_router = addrconv.ipv4.bin_to_text(designated_router)
backup_router = addrconv.ipv4.bin_to_text(backup_router)
neighbors = []
binneighbors = buf[cls._PACK_LEN:len(buf)]
while binneighbors:
n = binneighbors[:4]
n = addrconv.ipv4.bin_to_text(six.binary_type(n))
binneighbors = binneighbors[4:]
neighbors.append(n)
return {
"mask": mask,
"hello_interval": hello_interval,
"options": options,
"priority": priority,
"dead_interval": dead_interval,
"designated_router": designated_router,
"backup_router": backup_router,
"neighbors": neighbors,
}
def serialize_tail(self):
head = bytearray(
struct.pack(self._PACK_STR,
addrconv.ipv4.text_to_bin(self.mask),
self.hello_interval, self.options, self.priority,
self.dead_interval,
addrconv.ipv4.text_to_bin(self.designated_router),
addrconv.ipv4.text_to_bin(self.backup_router)))
try:
return head + reduce(lambda a, b: a + b,
(addrconv.ipv4.text_to_bin(n)
for n in self.neighbors))
except TypeError:
return head
@OSPFMessage.register_type(OSPF_MSG_DB_DESC)
class OSPFDBDesc(OSPFMessage):
_PACK_STR = '!HBBI' # + LSA_HEADERS
_PACK_LEN = struct.calcsize(_PACK_STR)
_MIN_LEN = OSPFMessage._HDR_LEN + _PACK_LEN
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
mtu=1500, options=0, i_flag=0, m_flag=0, ms_flag=0,
sequence_number=0, lsa_headers=None):
lsa_headers = lsa_headers if lsa_headers else []
super(OSPFDBDesc, self).__init__(OSPF_MSG_DB_DESC, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.mtu = mtu
self.options = options
self.i_flag = i_flag
self.m_flag = m_flag
self.ms_flag = ms_flag
self.sequence_number = sequence_number
self.lsa_headers = lsa_headers
@classmethod
def parser(cls, buf):
(mtu, options, flags,
sequence_number) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf))
i_flag = (flags >> 2) & 0x1
m_flag = (flags >> 1) & 0x1
ms_flag = flags & 0x1
lsahdrs = []
buf = buf[cls._PACK_LEN:]
while buf:
kwargs, buf = LSAHeader.parser(buf)
lsahdrs.append(LSAHeader(**kwargs))
return {
"mtu": mtu,
"options": options,
"i_flag": i_flag,
"m_flag": m_flag,
"ms_flag": ms_flag,
"sequence_number": sequence_number,
"lsa_headers": lsahdrs,
}
def serialize_tail(self):
flags = ((self.i_flag & 0x1) << 2) ^ \
((self.m_flag & 0x1) << 1) ^ \
(self.ms_flag & 0x1)
head = bytearray(
struct.pack(self._PACK_STR, self.mtu, self.options, flags,
self.sequence_number))
try:
return head + reduce(lambda a, b: a + b,
(hdr.serialize() for hdr in self.lsa_headers))
except TypeError:
return head
@OSPFMessage.register_type(OSPF_MSG_LS_REQ)
class OSPFLSReq(OSPFMessage):
_MIN_LEN = OSPFMessage._HDR_LEN
class Request(StringifyMixin):
_PACK_STR = '!I4s4s'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, type_=OSPF_UNKNOWN_LSA, id_='0.0.0.0',
adv_router='0.0.0.0'):
self.type_ = type_
self.id = id_
self.adv_router = adv_router
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._PACK_LEN))
link = buf[:cls._PACK_LEN]
rest = buf[cls._PACK_LEN:]
(type_, id_, adv_router) = struct.unpack_from(cls._PACK_STR,
six.binary_type(link))
id_ = addrconv.ipv4.bin_to_text(id_)
adv_router = addrconv.ipv4.bin_to_text(adv_router)
return cls(type_, id_, adv_router), rest
def serialize(self):
id_ = addrconv.ipv4.text_to_bin(self.id)
adv_router = addrconv.ipv4.text_to_bin(self.adv_router)
return struct.pack(self._PACK_STR, self.type_, id_, adv_router)
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
lsa_requests=None):
lsa_requests = lsa_requests if lsa_requests else []
super(OSPFLSReq, self).__init__(OSPF_MSG_LS_REQ, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.lsa_requests = lsa_requests
@classmethod
def parser(cls, buf):
reqs = []
while buf:
req, buf = cls.Request.parser(buf)
reqs.append(req)
return {
"lsa_requests": reqs,
}
def serialize_tail(self):
return reduce(lambda a, b: a + b,
(req.serialize() for req in self.lsa_requests))
@OSPFMessage.register_type(OSPF_MSG_LS_UPD)
class OSPFLSUpd(OSPFMessage):
_PACK_STR = '!I'
_PACK_LEN = struct.calcsize(_PACK_STR)
_MIN_LEN = OSPFMessage._HDR_LEN + _PACK_LEN
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
lsas=None):
lsas = lsas if lsas else []
super(OSPFLSUpd, self).__init__(OSPF_MSG_LS_UPD, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.lsas = lsas
@classmethod
def parser(cls, buf):
binnum = buf[:cls._PACK_LEN]
(num,) = struct.unpack_from(cls._PACK_STR, six.binary_type(binnum))
buf = buf[cls._PACK_LEN:]
lsas = []
while buf:
lsa, _cls, buf = LSA.parser(buf)
lsas.append(lsa)
assert len(lsas) == num
return {
"lsas": lsas,
}
def serialize_tail(self):
head = bytearray(struct.pack(self._PACK_STR, len(self.lsas)))
try:
return head + reduce(lambda a, b: a + b,
(lsa.serialize() for lsa in self.lsas))
except TypeError:
return head
@OSPFMessage.register_type(OSPF_MSG_LS_ACK)
class OSPFLSAck(OSPFMessage):
_MIN_LEN = OSPFMessage._HDR_LEN
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
lsa_headers=None):
lsa_headers = lsa_headers if lsa_headers else []
super(OSPFLSAck, self).__init__(OSPF_MSG_LS_ACK, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.lsa_headers = lsa_headers
@classmethod
def parser(cls, buf):
lsahdrs = []
while buf:
kwargs, buf = LSAHeader.parser(buf)
lsahdrs.append(LSAHeader(**kwargs))
return {
"lsa_headers": lsahdrs,
}
def serialize_tail(self):
return reduce(lambda a, b: a + b,
(hdr.serialize() for hdr in self.lsa_headers))
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.