repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
mgaitan/scipy | scipy/_lib/_version.py | 65 | 4792 | """Utility to compare (Numpy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
import re
from scipy._lib.six import string_types
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be >9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance.
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Notes
-----
All dev versions of the same (pre-)release compare equal.
Examples
--------
>>> from scipy._lib._version import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0'):
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (string_types, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, string_types):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr(self):
return "NumpyVersion(%s)" % self.vstring
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/special/tests/test_sph_harm.py | 43 | 1182 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose
import scipy.special as sc
def test_first_harmonics():
# Test against explicit representations of the first four
# spherical harmonics which use `theta` as the azimuthal angle,
# `phi` as the polar angle, and include the Condon-Shortley
# phase.
# Notation is Ymn
def Y00(theta, phi):
return 0.5*np.sqrt(1/np.pi)
def Yn11(theta, phi):
return 0.5*np.sqrt(3/(2*np.pi))*np.exp(-1j*theta)*np.sin(phi)
def Y01(theta, phi):
return 0.5*np.sqrt(3/np.pi)*np.cos(phi)
def Y11(theta, phi):
return -0.5*np.sqrt(3/(2*np.pi))*np.exp(1j*theta)*np.sin(phi)
harms = [Y00, Yn11, Y01, Y11]
m = [0, -1, 0, 1]
n = [0, 1, 1, 1]
theta = np.linspace(0, 2*np.pi)
phi = np.linspace(0, np.pi)
theta, phi = np.meshgrid(theta, phi)
for harm, m, n in zip(harms, m, n):
assert_allclose(sc.sph_harm(m, n, theta, phi),
harm(theta, phi),
rtol=1e-15, atol=1e-15,
err_msg="Y^{}_{} incorrect".format(m, n))
| gpl-3.0 |
petercable/mi-dataset | mi/dataset/parser/dosta_abcdjm_mmp_cds.py | 8 | 1886 | #!/usr/bin/env python
"""
@package mi.dataset.parser.dosta_abcdjm_mmp_cds
@file marine-integrations/mi/dataset/parser/dosta_abcdjm_mmp_cds.py
@author Mark Worden
@brief Particle for the DostaAbcdjmMmpCds dataset driver
Release notes:
initial release
"""
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.dataset.parser.mmp_cds_base import MmpCdsParserDataParticle
log = get_logger()
class DataParticleType(BaseEnum):
INSTRUMENT = 'dosta_abcdjm_mmp_cds_instrument'
class DostaAbcdjmMmpCdsParserDataParticleKey(BaseEnum):
CALIBRATED_PHASE = 'calibrated_phase'
OPTODE_TEMPERATURE = 'optode_temperature'
__author__ = 'Mark Worden'
__license__ = 'Apache 2.0'
class DostaAbcdjmMmpCdsParserDataParticle(MmpCdsParserDataParticle):
"""
Class for parsing data from the DostaAbcdjmMmpCds data set
"""
_data_particle_type = DataParticleType.INSTRUMENT
def _get_mmp_cds_subclass_particle_params(self, dict_data):
"""
This method is required to be implemented by classes that extend the MmpCdsParserDataParticle class.
This implementation returns the particle parameters specific for DostaAbcdjmMmpCds. As noted in the
base, it is okay to allow the following exceptions to propagate: ValueError, TypeError, IndexError, KeyError.
@returns a list of particle params specific to DostaAbcdjmMmpCds
"""
calibrated_phase = self._encode_value(DostaAbcdjmMmpCdsParserDataParticleKey.CALIBRATED_PHASE,
dict_data['doconcs'], float)
optode_temperature = self._encode_value(DostaAbcdjmMmpCdsParserDataParticleKey.OPTODE_TEMPERATURE,
dict_data['t'], float)
subclass_particle_params = [calibrated_phase, optode_temperature]
return subclass_particle_params
| bsd-2-clause |
sid88in/incubator-airflow | airflow/ti_deps/deps/dagrun_exists_dep.py | 20 | 2287 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
from airflow.utils.state import State
class DagrunRunningDep(BaseTIDep):
NAME = "Dagrun Running"
IGNOREABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
dag = ti.task.dag
dagrun = ti.get_dagrun(session)
if not dagrun:
# The import is needed here to avoid a circular dependency
from airflow.models import DagRun
running_dagruns = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
if len(running_dagruns) >= dag.max_active_runs:
reason = ("The maximum number of active dag runs ({0}) for this task "
"instance's DAG '{1}' has been reached.".format(
dag.max_active_runs,
ti.dag_id))
else:
reason = "Unknown reason"
yield self._failing_status(
reason="Task instance's dagrun did not exist: {0}.".format(reason))
else:
if dagrun.state != State.RUNNING:
yield self._failing_status(
reason="Task instance's dagrun was not in the 'running' state but in "
"the state '{}'.".format(dagrun.state))
| apache-2.0 |
yohanko88/gem5-DC | util/pbs/pbs.py | 90 | 6097 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os, popen2, re, sys
class MyPOpen(object):
def __init__(self, cmd, input = None, output = None, bufsize = -1):
self.status = -1
if input is None:
p2c_read, p2c_write = os.pipe()
self.tochild = os.fdopen(p2c_write, 'w', bufsize)
else:
p2c_write = None
if isinstance(input, file):
p2c_read = input.fileno()
elif isinstance(input, str):
input = file(input, 'r')
p2c_read = input.fileno()
elif isinstance(input, int):
p2c_read = input
else:
raise AttributeError
if output is None:
c2p_read, c2p_write = os.pipe()
self.fromchild = os.fdopen(c2p_read, 'r', bufsize)
else:
c2p_read = None
if isinstance(output, file):
c2p_write = output.fileno()
elif isinstance(output, str):
output = file(output, 'w')
c2p_write = output.fileno()
elif isinstance(output, int):
c2p_write = output
else:
raise AttributeError
self.pid = os.fork()
if self.pid == 0:
os.dup2(p2c_read, sys.stdin.fileno())
os.dup2(c2p_write, sys.stdout.fileno())
os.dup2(c2p_write, sys.stderr.fileno())
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
os.close(p2c_read)
os.close(c2p_write)
def poll(self):
if self.status < 0:
pid, status = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self.status = status
return self.status
def wait(self):
if self.status < 0:
pid, status = os.waitpid(self.pid, 0)
if pid == self.pid:
self.status = status
return self.status
class qsub:
def __init__(self):
self.afterok = None
self.hold = False
self.join = False
self.keep_stdout = False
self.keep_stderr = False
self.node_type = None
self.mail_abort = False
self.mail_begin = False
self.mail_end = False
self.name = None
self.stdout = None
self.priority = None
self.queue = None
self.pbshost = None
self.qsub = 'qsub'
self.env = {}
def build(self, script, args = []):
self.cmd = [ self.qsub ]
if self.env:
arg = '-v'
arg += ','.join([ '%s=%s' % i for i in self.env.iteritems() ])
self.cmd.append(arg)
if self.hold:
self.cmd.append('-h')
if self.stdout:
self.cmd.append('-olocalhost:' + self.stdout)
if self.keep_stdout and self.keep_stderr:
self.cmd.append('-koe')
elif self.keep_stdout:
self.cmd.append('-ko')
elif self.keep_stderr:
self.cmd.append('-ke')
else:
self.cmd.append('-kn')
if self.join:
self.cmd.append('-joe')
if self.node_type:
self.cmd.append('-lnodes=' + self.node_type)
if self.mail_abort or self.mail_begin or self.mail_end:
flags = ''
if self.mail_abort:
flags.append('a')
if self.mail_begin:
flags.append('b')
if self.mail_end:
flags.append('e')
if len(flags):
self.cmd.append('-m ' + flags)
else:
self.cmd.append('-mn')
if self.name:
self.cmd.append("-N%s" % self.name)
if self.priority:
self.cmd.append('-p' + self.priority)
if self.queue:
self.cmd.append('-q' + self.queue)
if self.afterok:
self.cmd.append('-Wdepend=afterok:%s' % self.afterok)
self.cmd.extend(args)
self.script = script
self.command = ' '.join(self.cmd + [ self.script ])
def do(self):
pbs = MyPOpen(self.cmd + [ self.script ])
self.result = pbs.fromchild.read()
ec = pbs.wait()
if ec != 0 and self.pbshost:
cmd = ' '.join(self.cmd + [ '-' ])
cmd = [ 'ssh', '-x', self.pbshost, cmd ]
self.command = ' '.join(cmd)
ssh = MyPOpen(cmd, input = self.script)
self.result = ssh.fromchild.read()
ec = ssh.wait()
return ec
| bsd-3-clause |
frreiss/tensorflow-fred | tensorflow/python/kernel_tests/dynamic_partition_op_test.py | 5 | 15596 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DynamicPartition op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DynamicPartitionTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimpleOneDimensional(self):
with self.session(use_gpu=True) as sess:
data = constant_op.constant([0, 13, 2, 39, 4, 17], dtype=dtypes.float32)
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = self.evaluate(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([0, 13], partition_vals[0])
self.assertAllEqual([17], partition_vals[1])
self.assertAllEqual([2, 4], partition_vals[2])
self.assertAllEqual([39], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` vectors of unknown length.
self.assertEqual([None], partitions[0].get_shape().as_list())
self.assertEqual([None], partitions[1].get_shape().as_list())
self.assertEqual([None], partitions[2].get_shape().as_list())
self.assertEqual([None], partitions[3].get_shape().as_list())
@test_util.run_deprecated_v1
def testSimpleTwoDimensional(self):
with self.session(use_gpu=True) as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14], [15, 16, 17]],
dtype=dtypes.float32)
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = self.evaluate(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
self.assertAllEqual([[15, 16, 17]], partition_vals[1])
self.assertAllEqual([[6, 7, 8], [12, 13, 14]], partition_vals[2])
self.assertAllEqual([[9, 10, 11]], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` matrices with an unknown number of rows, and 3 columns.
self.assertEqual([None, 3], partitions[0].get_shape().as_list())
self.assertEqual([None, 3], partitions[1].get_shape().as_list())
self.assertEqual([None, 3], partitions[2].get_shape().as_list())
self.assertEqual([None, 3], partitions[3].get_shape().as_list())
def testLargeOneDimensional(self):
num = 100000
data_list = [x for x in range(num)]
indices_list = [x % 2 for x in range(num)]
part1 = [x for x in range(num) if x % 2 == 0]
part2 = [x for x in range(num) if x % 2 == 1]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = self.evaluate(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual(part1, partition_vals[0])
self.assertAllEqual(part2, partition_vals[1])
def testLargeTwoDimensional(self):
rows = 100000
cols = 100
data_list = [None] * rows
for i in range(rows):
data_list[i] = [i for _ in range(cols)]
num_partitions = 97
indices_list = [(i ** 2) % num_partitions for i in range(rows)]
parts = [[] for _ in range(num_partitions)]
for i in range(rows):
parts[(i ** 2) % num_partitions].append(data_list[i])
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=num_partitions)
partition_vals = self.evaluate(partitions)
self.assertEqual(num_partitions, len(partition_vals))
for i in range(num_partitions):
# reshape because of empty parts
parts_np = np.array(parts[i], dtype=np.float).reshape(-1, cols)
self.assertAllEqual(parts_np, partition_vals[i])
def testSimpleComplex(self):
data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j]
indices_list = [1, 0, 1, 0]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.complex64)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = self.evaluate(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([3 + 4j, 7 + 8j], partition_vals[0])
self.assertAllEqual([1 + 2j, 5 + 6j], partition_vals[1])
def testScalarPartitions(self):
data_list = [10, 13, 12, 11]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float64)
indices = 3
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = self.evaluate(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[0])
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[1])
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[2])
self.assertAllEqual(np.array([10, 13, 12, 11],
dtype=np.float64).reshape(-1, 4),
partition_vals[3])
@test_util.run_deprecated_v1
def testHigherRank(self):
np.random.seed(7)
with self.session(use_gpu=True) as sess:
for n in 2, 3:
for shape in (4,), (4, 5), (4, 5, 2):
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
for extra_shape in (), (6,), (6, 7):
data = np.random.randn(*(shape + extra_shape))
partitions_t = constant_op.constant(partitions, dtype=dtypes.int32)
data_t = constant_op.constant(data)
outputs = data_flow_ops.dynamic_partition(
data_t, partitions_t, num_partitions=n)
self.assertEqual(n, len(outputs))
outputs_val = self.evaluate(outputs)
for i, output in enumerate(outputs_val):
self.assertAllEqual(output, data[partitions == i])
# Test gradients
outputs_grad = [7 * output for output in outputs_val]
grads = gradients_impl.gradients(outputs, [data_t, partitions_t],
outputs_grad)
self.assertEqual(grads[1], None) # Partitions has no gradients
self.assertAllEqual(7 * data, sess.run(grads[0]))
def testEmptyParts(self):
data_list = [1, 2, 3, 4]
indices_list = [1, 3, 1, 3]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = self.evaluate(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([], partition_vals[0])
self.assertAllEqual([1, 3], partition_vals[1])
self.assertAllEqual([], partition_vals[2])
self.assertAllEqual([2, 4], partition_vals[3])
def testEmptyDataTwoDimensional(self):
data_list = [[], []]
indices_list = [0, 1]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=3)
partition_vals = self.evaluate(partitions)
self.assertEqual(3, len(partition_vals))
self.assertAllEqual([[]], partition_vals[0])
self.assertAllEqual([[]], partition_vals[1])
self.assertAllEqual(np.array([], dtype=np.float).reshape(0, 0),
partition_vals[2])
def testEmptyPartitions(self):
data_list = []
indices_list = []
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = self.evaluate(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([], partition_vals[0])
self.assertAllEqual([], partition_vals[1])
@unittest.skip("Fails on windows.")
def testGPUTooManyParts(self):
# This test only makes sense on the GPU. There we do not check
# for errors. In this case, we should discard all but the first
# num_partitions indices.
if not test.is_gpu_available():
return
data_list = [1, 2, 3, 4, 5, 6]
indices_list = [6, 5, 4, 3, 1, 0]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = self.evaluate(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([6], partition_vals[0])
self.assertAllEqual([5], partition_vals[1])
@unittest.skip("Fails on windows.")
def testGPUPartsTooLarge(self):
# This test only makes sense on the GPU. There we do not check
# for errors. In this case, we should discard all the values
# larger than num_partitions.
if not test.is_gpu_available():
return
data_list = [1, 2, 3, 4, 5, 6]
indices_list = [10, 11, 2, 12, 0, 1000]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=5)
partition_vals = self.evaluate(partitions)
self.assertEqual(5, len(partition_vals))
self.assertAllEqual([5], partition_vals[0])
self.assertAllEqual([], partition_vals[1])
self.assertAllEqual([3], partition_vals[2])
self.assertAllEqual([], partition_vals[3])
self.assertAllEqual([], partition_vals[4])
@unittest.skip("Fails on windows.")
def testGPUAllIndicesBig(self):
# This test only makes sense on the GPU. There we do not check
# for errors. In this case, we should discard all the values
# and have an empty output.
if not test.is_gpu_available():
return
data_list = [1.1, 2.1, 3.1, 4.1, 5.1, 6.1]
indices_list = [90, 70, 60, 100, 110, 40]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=40)
partition_vals = self.evaluate(partitions)
self.assertEqual(40, len(partition_vals))
for i in range(40):
self.assertAllEqual([], partition_vals[i])
@test_util.run_deprecated_v1
def testErrorIndexOutOfRange(self):
with self.cached_session() as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
indices = constant_op.constant([0, 2, 99, 2, 2])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
self.evaluate(partitions)
@test_util.run_deprecated_v1
def testScalarIndexOutOfRange(self):
with self.cached_session() as sess:
bad = 17
data = np.zeros(5)
partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7)
with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
self.evaluate(partitions)
@test_util.run_deprecated_v1
def testHigherRankIndexOutOfRange(self):
with self.cached_session() as sess:
shape = (2, 3)
indices = array_ops.placeholder(shape=shape, dtype=np.int32)
data = np.zeros(shape + (5,))
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=7)
for i in xrange(2):
for j in xrange(3):
bad = np.zeros(shape, dtype=np.int32)
bad[i, j] = 17
with self.assertRaisesOpError(
r"partitions\[%d,%d\] = 17 is not in \[0, 7\)" % (i, j)):
sess.run(partitions, feed_dict={indices: bad})
@test_util.run_deprecated_v1
def testErrorWrongDimsIndices(self):
data = constant_op.constant([[0], [1], [2]])
indices = constant_op.constant([[0], [0]])
with self.assertRaises(ValueError):
data_flow_ops.dynamic_partition(data, indices, num_partitions=4)
# see https://github.com/tensorflow/tensorflow/issues/17106
def testCUBBug(self):
x = constant_op.constant(np.random.randn(3072))
inds = [0]*189 + [1]*184 + [2]*184 + [3]*191 + [4]*192 + [5]*195 + [6]*195
inds += [7]*195 + [8]*188 + [9]*195 + [10]*188 + [11]*202 + [12]*194
inds += [13]*194 + [14]*194 + [15]*192
self.assertEqual(len(inds), x.shape[0])
partitioned = data_flow_ops.dynamic_partition(x, inds, 16)
with self.cached_session() as sess:
res = self.evaluate(partitioned)
self.assertEqual(res[-1].shape[0], 192)
# see https://github.com/tensorflow/tensorflow/issues/42500
def testMultiGPU(self):
device_list = config.list_logical_devices("GPU")
results = []
for device in device_list:
with ops.device(device.name):
data = constant_op.constant(np.zeros((1000,)))
partitions = constant_op.constant(np.arange(1000, dtype=np.int32) % 10)
result = data_flow_ops.dynamic_partition(data, partitions, 10)
results.append(self.evaluate(result))
if device_list:
self.assertAllEqual(results, np.zeros((len(device_list), 10, 100)))
if __name__ == "__main__":
test.main()
| apache-2.0 |
rackerlabs/quark | quark/db/migration/alembic/versions/552b213c2b8c_default_policy_without_policy.py | 6 | 2989 | """Ensure default IP policy exists for subnets without IP policies
Revision ID: 552b213c2b8c
Revises: 45a07fac3d38
Create Date: 2014-07-25 15:07:07.418971
"""
# revision identifiers, used by Alembic.
revision = '552b213c2b8c'
down_revision = '45a07fac3d38'
import logging
from quark.plugin_modules import ip_policies
from alembic import op
from sqlalchemy.sql import column, select, table
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
LOG = logging.getLogger("alembic.migration")
def upgrade():
ip_policy = table('quark_ip_policy',
column('id', sa.String(length=36)),
column('tenant_id', sa.String(length=255)),
column('created_at', sa.DateTime()))
ip_policy_cidrs = table('quark_ip_policy_cidrs',
column('id', sa.String(length=36)),
column('created_at', sa.DateTime()),
column('ip_policy_id', sa.String(length=36)),
column('cidr', sa.String(length=64)))
subnets = table('quark_subnets',
column('id', sa.String(length=36)),
column('_cidr', sa.String(length=64)),
column('tenant_id', sa.String(length=255)),
column('ip_policy_id', sa.String(length=36)))
connection = op.get_bind()
# 1. Find all subnets without ip_policy.
data = connection.execute(select([
subnets.c.id, subnets.c._cidr, subnets.c.tenant_id]).where(
subnets.c.ip_policy_id == None)).fetchall() # noqa
if not data:
return
LOG.info("Subnet IDs without IP policies: %s", [d[0] for d in data])
# 2. Insert ip_policy rows with id.
vals = [dict(id=uuidutils.generate_uuid(),
created_at=timeutils.utcnow(),
tenant_id=tenant_id)
for id, cidr, tenant_id in data]
LOG.info("IP Policy IDs to insert: %s", [v["id"] for v in vals])
connection.execute(ip_policy.insert(), *vals)
# 3. Insert default ip_policy_cidrs for those ip_policy's.
vals2 = []
for ((id, cidr, tenant_id), ip_policy) in zip(data, vals):
cidrs = []
ip_policies.ensure_default_policy(cidrs, [dict(cidr=cidr)])
for cidr in cidrs:
vals2.append(dict(id=uuidutils.generate_uuid(),
created_at=timeutils.utcnow(),
ip_policy_id=ip_policy["id"],
cidr=str(cidr)))
LOG.info("IP Policy CIDR IDs to insert: %s", [v["id"] for v in vals2])
connection.execute(ip_policy_cidrs.insert(), *vals2)
# 4. Set ip_policy_id rows in quark_subnets.
for ((id, cidr, tenant_id), ip_policy) in zip(data, vals):
connection.execute(subnets.update().values(
ip_policy_id=ip_policy["id"]).where(
subnets.c.id == id))
def downgrade():
raise NotImplementedError()
| apache-2.0 |
shukiz/VAR-SOM-AM33-Kernel-3-15 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
jnewland/home-assistant | homeassistant/components/pilight/switch.py | 7 | 6543 | """Support for switching devices via Pilight to on and off."""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components import pilight
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_NAME, CONF_ID, CONF_SWITCHES, CONF_STATE,
CONF_PROTOCOL, STATE_ON)
from homeassistant.helpers.restore_state import RestoreEntity
_LOGGER = logging.getLogger(__name__)
CONF_OFF_CODE = 'off_code'
CONF_OFF_CODE_RECEIVE = 'off_code_receive'
CONF_ON_CODE = 'on_code'
CONF_ON_CODE_RECEIVE = 'on_code_receive'
CONF_SYSTEMCODE = 'systemcode'
CONF_UNIT = 'unit'
CONF_UNITCODE = 'unitcode'
CONF_ECHO = 'echo'
COMMAND_SCHEMA = vol.Schema({
vol.Optional(CONF_PROTOCOL): cv.string,
vol.Optional('on'): cv.positive_int,
vol.Optional('off'): cv.positive_int,
vol.Optional(CONF_UNIT): cv.positive_int,
vol.Optional(CONF_UNITCODE): cv.positive_int,
vol.Optional(CONF_ID): vol.Any(cv.positive_int, cv.string),
vol.Optional(CONF_STATE): cv.string,
vol.Optional(CONF_SYSTEMCODE): cv.positive_int,
}, extra=vol.ALLOW_EXTRA)
RECEIVE_SCHEMA = COMMAND_SCHEMA.extend({
vol.Optional(CONF_ECHO): cv.boolean
})
SWITCHES_SCHEMA = vol.Schema({
vol.Required(CONF_ON_CODE): COMMAND_SCHEMA,
vol.Required(CONF_OFF_CODE): COMMAND_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_OFF_CODE_RECEIVE, default=[]): vol.All(cv.ensure_list,
[COMMAND_SCHEMA]),
vol.Optional(CONF_ON_CODE_RECEIVE, default=[]): vol.All(cv.ensure_list,
[COMMAND_SCHEMA])
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SWITCHES):
vol.Schema({cv.string: SWITCHES_SCHEMA}),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pilight platform."""
switches = config.get(CONF_SWITCHES)
devices = []
for dev_name, properties in switches.items():
devices.append(
PilightSwitch(
hass,
properties.get(CONF_NAME, dev_name),
properties.get(CONF_ON_CODE),
properties.get(CONF_OFF_CODE),
properties.get(CONF_ON_CODE_RECEIVE),
properties.get(CONF_OFF_CODE_RECEIVE)
)
)
add_entities(devices)
class _ReceiveHandle:
def __init__(self, config, echo):
"""Initialize the handle."""
self.config_items = config.items()
self.echo = echo
def match(self, code):
"""Test if the received code matches the configured values.
The received values have to be a subset of the configured options.
"""
return self.config_items <= code.items()
def run(self, switch, turn_on):
"""Change the state of the switch."""
switch.set_state(turn_on=turn_on, send_code=self.echo)
class PilightSwitch(SwitchDevice, RestoreEntity):
"""Representation of a Pilight switch."""
def __init__(self, hass, name, code_on, code_off, code_on_receive,
code_off_receive):
"""Initialize the switch."""
self._hass = hass
self._name = name
self._state = False
self._code_on = code_on
self._code_off = code_off
self._code_on_receive = []
self._code_off_receive = []
for code_list, conf in ((self._code_on_receive, code_on_receive),
(self._code_off_receive, code_off_receive)):
for code in conf:
echo = code.pop(CONF_ECHO, True)
code_list.append(_ReceiveHandle(code, echo))
if any(self._code_on_receive) or any(self._code_off_receive):
hass.bus.listen(pilight.EVENT, self._handle_code)
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._state = state.state == STATE_ON
@property
def name(self):
"""Get the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed, state set when correct code is received."""
return False
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return True
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def _handle_code(self, call):
"""Check if received code by the pilight-daemon.
If the code matches the receive on/off codes of this switch the switch
state is changed accordingly.
"""
# - True if off_code/on_code is contained in received code dict, not
# all items have to match.
# - Call turn on/off only once, even if more than one code is received
if any(self._code_on_receive):
for on_code in self._code_on_receive:
if on_code.match(call.data):
on_code.run(switch=self, turn_on=True)
break
if any(self._code_off_receive):
for off_code in self._code_off_receive:
if off_code.match(call.data):
off_code.run(switch=self, turn_on=False)
break
def set_state(self, turn_on, send_code=True):
"""Set the state of the switch.
This sets the state of the switch. If send_code is set to True, then
it will call the pilight.send service to actually send the codes
to the pilight daemon.
"""
if send_code:
if turn_on:
self._hass.services.call(pilight.DOMAIN, pilight.SERVICE_NAME,
self._code_on, blocking=True)
else:
self._hass.services.call(pilight.DOMAIN, pilight.SERVICE_NAME,
self._code_off, blocking=True)
self._state = turn_on
self.schedule_update_ha_state()
def turn_on(self, **kwargs):
"""Turn the switch on by calling pilight.send service with on code."""
self.set_state(turn_on=True)
def turn_off(self, **kwargs):
"""Turn the switch on by calling pilight.send service with off code."""
self.set_state(turn_on=False)
| apache-2.0 |
yannickcr/Sick-Beard | sickbeard/providers/t411.py | 14 | 9106 | # -*- coding: latin-1 -*-
# Author: Guillaume Serre <guillaume.serre@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from bs4 import BeautifulSoup
from sickbeard import classes, show_name_helpers, logger
from sickbeard.common import Quality
import generic
import cookielib
import sickbeard
import urllib
import urllib2
class T411Provider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "T411")
self.supportsBacklog = True
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
self.url = "http://www.t411.me"
self.login_done = False
def isEnabled(self):
return sickbeard.T411
def getSearchParams(self, searchString, audio_lang, subcat, french=None):
if audio_lang == "en" and french==None:
return urllib.urlencode( {'search': searchString, 'cat' : 210, 'submit' : 'Recherche', 'subcat': subcat } ) + "&term%5B17%5D%5B%5D=540&term%5B17%5D%5B%5D=721"
elif audio_lang == "fr" or french:
return urllib.urlencode( {'search': searchString, 'cat' : 210, 'submit' : 'Recherche', 'subcat': subcat } ) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542"
else:
return urllib.urlencode( {'search': searchString, 'cat' : 210, 'submit' : 'Recherche', 'subcat': subcat } )
def seasonValue(self, season):
values = [968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 994, 992, 993, 995, 996, 997]
return values[int(season) -1]
def episodeValue(self, episode):
values = [937, 938, 939, 940, 941, 942, 943, 944, 946, 947, 948, 949, 950, 951, 952, 954, 953, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117]
return values[int(episode) - 1]
def _get_season_search_strings(self, show, season):
showNam = show_name_helpers.allPossibleShowNames(show)
showNames = list(set(showNam))
results = []
for showName in showNames:
if (int(season) < 31):
results.append( self.getSearchParams(showName, show.audio_lang, 433 ) + "&" + urllib.urlencode({'term[46][]': 936, 'term[45][]': self.seasonValue(season)}))
results.append( self.getSearchParams(showName, show.audio_lang, 637 ) + "&" + urllib.urlencode({'term[46][]': 936, 'term[45][]': self.seasonValue(season)}))
#results.append( self.getSearchParams(showName + " S%02d" % season, show.audio_lang, 433 )) TOO MANY ERRORS
#results.append( self.getSearchParams(showName + " S%02d" % season, show.audio_lang, 637 ))
#results.append( self.getSearchParams(showName + " S%02d" % season, show.audio_lang, 634 ))
#results.append( self.getSearchParams(showName + " saison %02d" % season, show.audio_lang, 433 ))
#results.append( self.getSearchParams(showName + " saison %02d" % season, show.audio_lang, 637 ))
results.append( self.getSearchParams(showName + " saison %02d" % season, show.audio_lang, 634 ))
return results
def _get_episode_search_strings(self, ep_obj, french=None):
showNam = show_name_helpers.allPossibleShowNames(ep_obj.show)
showNames = list(set(showNam))
results = []
for showName in showNames:
results.append( self.getSearchParams( "%s S%02dE%02d" % ( showName, ep_obj.scene_season, ep_obj.scene_episode), ep_obj.show.audio_lang, 433, french ))
if (int(ep_obj.scene_season) < 31 and int(ep_obj.scene_episode) < 61):
results.append( self.getSearchParams( showName, ep_obj.show.audio_lang, 433, french)+ "&" + urllib.urlencode({'term[46][]': self.episodeValue(ep_obj.scene_episode), 'term[45][]': self.seasonValue(ep_obj.scene_season)}))
#results.append( self.getSearchParams( "%s %dx%d" % ( showName, ep_obj.season, ep_obj.episode ), ep_obj.show.audio_lang , 433 )) MAY RETURN 1x12 WHEN SEARCHING 1x1
results.append( self.getSearchParams( "%s %dx%02d" % ( showName, ep_obj.scene_season, ep_obj.scene_episode ), ep_obj.show.audio_lang, 433, french ))
results.append( self.getSearchParams( "%s S%02dE%02d" % ( showName, ep_obj.scene_season, ep_obj.scene_episode), ep_obj.show.audio_lang, 637, french ))
if (int(ep_obj.scene_season) < 31 and int(ep_obj.scene_episode) < 61):
results.append( self.getSearchParams( showName, ep_obj.show.audio_lang, 637, french)+ "&" + urllib.urlencode({'term[46][]': self.episodeValue(ep_obj.scene_episode), 'term[45][]': self.seasonValue(ep_obj.scene_season)}))
#results.append( self.getSearchParams( "%s %dx%d" % ( showName, ep_obj.season, ep_obj.episode ), ep_obj.show.audio_lang, 637 ))
results.append( self.getSearchParams( "%s %dx%02d" % ( showName, ep_obj.scene_season, ep_obj.scene_episode ), ep_obj.show.audio_lang, 637, french ))
results.append( self.getSearchParams( "%s S%02dE%02d" % ( showName, ep_obj.scene_season, ep_obj.scene_episode), ep_obj.show.audio_lang, 634, french))
#results.append( self.getSearchParams( "%s %dx%d" % ( showName, ep_obj.season, ep_obj.episode ), ep_obj.show.audio_lang, 634 ))
results.append( self.getSearchParams( "%s %dx%02d" % ( showName, ep_obj.scene_season, ep_obj.scene_episode ), ep_obj.show.audio_lang, 634, french ))
return results
def _get_title_and_url(self, item):
return (item.title, item.url)
def getQuality(self, item):
return item.getQuality()
def _doLogin(self, login, password):
data = urllib.urlencode({'login': login, 'password' : password, 'submit' : 'Connexion', 'remember': 1, 'url' : '/'})
self.opener.open(self.url + '/users/login', data)
def _doSearch(self, searchString, show=None, season=None, french=None):
if not self.login_done:
self._doLogin( sickbeard.T411_USERNAME, sickbeard.T411_PASSWORD )
results = []
searchUrl = self.url + '/torrents/search/?' + searchString.replace('!','')
logger.log(u"Search string: " + searchUrl, logger.DEBUG)
r = self.opener.open( searchUrl )
soup = BeautifulSoup( r, "html.parser" )
resultsTable = soup.find("table", { "class" : "results" })
if resultsTable:
rows = resultsTable.find("tbody").findAll("tr")
for row in rows:
link = row.find("a", title=True)
title = link['title']
id = row.find_all('td')[2].find_all('a')[0]['href'][1:].replace('torrents/nfo/?id=','')
downloadURL = ('http://www.t411.me/torrents/download/?id=%s' % id)
quality = Quality.nameQuality( title )
if quality==Quality.UNKNOWN and title:
if '720p' not in title.lower() and '1080p' not in title.lower():
quality=Quality.SDTV
if show and french==None:
results.append( T411SearchResult( self.opener, link['title'], downloadURL, quality, str(show.audio_lang) ) )
elif show and french:
results.append( T411SearchResult( self.opener, link['title'], downloadURL, quality, 'fr' ) )
else:
results.append( T411SearchResult( self.opener, link['title'], downloadURL, quality ) )
return results
def getResult(self, episodes):
"""
Returns a result of the correct type for this provider
"""
result = classes.TorrentDataSearchResult(episodes)
result.provider = self
return result
class T411SearchResult:
def __init__(self, opener, title, url, quality, audio_langs=None):
self.opener = opener
self.title = title
self.url = url
self.quality = quality
self.audio_langs=audio_langs
def getNZB(self):
return self.opener.open( self.url , 'wb').read()
def getQuality(self):
return self.quality
provider = T411Provider()
| gpl-3.0 |
johnmgregoire/NanoCalorimetry | plot_pprvsTsubtract20110818.py | 1 | 3566 | import numpy, h5py, os
from PnSC_main import *
from PnSC_h5io import *
from PnSC_math import *
p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110816_Zr-Hf-B.h5'
h5f=h5py.File(p, mode='r')
ehl=[\
('quadlinheating2_0817', 'cell9_25mAlinquad2_first_1_of_1', 'Zr-B, 1st'),\
('quadlinheating2_0817', 'cell9_25mAlinquad2_second_1_of_1', 'Zr-B, 2nd'),\
('quadlinheating2_0817', 'cell9_25mAlinquad2_third_1_of_1', 'Zr-B, 3rd'), \
#('quadlinheating2', 'pre_25mApluslinquad2_cell16_1_of_1', 'Hf-B, nth'), \
#('quadlinheating2', 'cell11_25malinquad2_1_of_1', 'empty'), \
]
tarrs=[]
pprarrs=[]
for i, (e, h, l) in enumerate(ehl):
hpsdl=CreateHeatProgSegDictList(p, e, h)
T=hpsdl[2]['sampletemperature'][0, :]
ppr=hpsdl[2]['samplepowerperrate'][0, :]
if 0:
pylab.plot(T, ppr*1.e6, label=l)
pylab.xlabel('Temperature (C)')
pylab.ylabel('power per rate ($\mu$J/K)')
pylab.legend(loc=0)
tarrs+=[T]
pprarrs+=[ppr]
def extremesmooth(x, binn=70, SGpts=170, SGorder=3):
xb=numpy.array([x[i*binn:(i+1)*binn].mean() for i in range(len(x)//binn)])
xbf=savgolsmooth(xb, nptsoneside=SGpts, order =SGorder)
ia=numpy.arange(binn, dtype='float32')/binn
xr=numpy.concatenate([ia*(b-a)+b for a, b in zip(xbf[:-1], xbf[1:])])
xr=numpy.concatenate([(xbf[1]-xbf[0])*ia[:binn//2]+xbf[0], xr, (xbf[-1]-xbf[-2])*ia[:binn//2]+xbf[-1]])
xr=numpy.concatenate([xr, (xbf[-1]-xbf[-2])*ia[:len(x)-len(xr)]+xbf[-1]])
return xr
if 1:
x=extremesmooth(pprarrs[0])
y=extremesmooth(pprarrs[1])
z=extremesmooth(pprarrs[2])
xt=tarrs[0]
yt=tarrs[1]
zt=tarrs[2]
tmin=max([t.min() for t in [xt, yt, zt]])
tmax=min([t.max() for t in [xt, yt, zt]])
tinterp=numpy.linspace(tmin, tmax, 2000)
xinterp=numpy.interp(tinterp, xt, x)
yinterp=numpy.interp(tinterp, yt, y)
zinterp=numpy.interp(tinterp, zt, z)
pylab.figure()
for i, (t, a, ai) in enumerate([(xt, x, xinterp), (yt, y, yinterp), (zt, z, zinterp)]):
pylab.subplot(3, 1, i+1)
pylab.plot(tinterp, ai)
pylab.plot(t, a)
pylab.figure()
xsub=xinterp-(zinterp+yinterp)/2.
for i, (a, l) in enumerate([(xinterp, '1st'), ((zinterp+yinterp)/2., 'subsequent')]):
pylab.plot(tinterp, a*1.e6, label=l, lw=2)
#pylab.legend(loc=2)
pylab.xlabel('Temperature (C)', fontsize=14)
pylab.ylabel('Calorimetric Signal ($\mu$J/K)', fontsize=14)
# pylab.text(700, 14, '1st',color='b', ha='left', fontsize=14)
# pylab.text(450, 14, 'subsequent',color='g', ha='right', fontsize=14)
pylab.annotate('1st',(540, 14),xytext=(630, 14),fontsize=14,color='b',arrowprops={'arrowstyle':'->','color':'b'})
pylab.annotate('subsequent',(490, 14),xytext=(380, 14),fontsize=14,color='g',arrowprops={'arrowstyle':'->','color':'g'}, ha='right')
pylab.xlim(0, 1200)
pylab.figure()
pylab.plot([0, 1200], [0, 0], 'k', lw=1)
pylab.plot(tinterp, xsub*1.e6, 'r-', lw=2)
# pylab.annotate(' ',(510, -2),xytext=(510, 0),color='k',arrowprops={'arrowstyle':'simple','color':'k'})
# pylab.annotate(' ',(1010, -14),xytext=(1010, 0),color='k',arrowprops={'arrowstyle':'simple','color':'k'})
#pylab.legend()
pylab.xlabel('Temperature (C)', fontsize=14)
pylab.ylabel('Differential signal ($\mu$J/K)', fontsize=14)
pylab.xlim(0, 1200)
pylab.subplots_adjust(right=.55, top=.5)
print xsub[(tinterp>260)*(tinterp<670)].sum()*(tinterp[1]-tinterp[0])*1.e6
print xsub[tinterp>670].sum()*(tinterp[1]-tinterp[0])*1.e6
pylab.show()
| bsd-3-clause |
mitya57/django | django/utils/translation/__init__.py | 7 | 7498 | """
Internationalization support.
"""
import re
import warnings
from contextlib import ContextDecorator
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.functional import lazy
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans:
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
# An alias since Django 2.0
ugettext = gettext
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
# An alias since Django 2.0
ungettext = ngettext
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = ugettext_lazy = lazy(gettext, str)
pgettext_lazy = lazy(pgettext, str)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, int):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
original_kwargs = kwargs.copy()
class NumberAwareString(resultclass):
def __bool__(self):
return bool(kwargs['singular'])
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError(
"Your dictionary lacks key '%s\'. Please provide "
"it, because it is required to determine whether "
"string is singular or plural." % number
)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
proxy.__reduce__ = lambda: (_lazy_number_unpickle, (func, resultclass, number, original_kwargs))
return proxy
def _lazy_number_unpickle(func, resultclass, number, kwargs):
return lazy_number(func, resultclass, number=number, **kwargs)
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
# An alias since Django 2.0
ungettext_lazy = ngettext_lazy
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, str, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.old_language is None:
deactivate_all()
elif self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, **kwargs):
from .template import templatize
return templatize(src, **kwargs)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
warnings.warn(
'django.utils.translate.string_concat() is deprecated in '
'favor of django.utils.text.format_lazy().',
RemovedInDjango21Warning, stacklevel=2)
return ''.join(str(s) for s in strings)
string_concat = lazy(_string_concat, str)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if 'fallback' in lang_info and 'name' not in lang_info:
info = get_language_info(lang_info['fallback'][0])
else:
info = lang_info
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
if info:
info['name_translated'] = gettext_lazy(info['name'])
return info
trim_whitespace_re = re.compile(r'\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
| bsd-3-clause |
llvm-mirror/lldb | utils/lui/lldbutil.py | 13 | 33522 | ##===-- lldbutil.py ------------------------------------------*- Python -*-===##
##
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
##
##===----------------------------------------------------------------------===##
"""
This LLDB module contains miscellaneous utilities.
Some of the test suite takes advantage of the utility functions defined here.
They can also be useful for general purpose lldb scripting.
"""
from __future__ import print_function
import lldb
import os
import sys
import io
# ===================================================
# Utilities for locating/checking executable programs
# ===================================================
def is_exe(fpath):
"""Returns True if fpath is an executable."""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
"""Returns the full path to a program; None otherwise."""
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# ===================================================
# Disassembly for an SBFunction or an SBSymbol object
# ===================================================
def disassemble(target, function_or_symbol):
"""Disassemble the function or symbol given a target.
It returns the disassembly content in a string object.
"""
buf = io.StringIO()
insts = function_or_symbol.GetInstructions(target)
for i in insts:
print(i, file=buf)
return buf.getvalue()
# ==========================================================
# Integer (byte size 1, 2, 4, and 8) to bytearray conversion
# ==========================================================
def int_to_bytearray(val, bytesize):
"""Utility function to convert an integer into a bytearray.
It returns the bytearray in the little endian format. It is easy to get the
big endian format, just do ba.reverse() on the returned object.
"""
import struct
if bytesize == 1:
return bytearray([val])
# Little endian followed by a format character.
template = "<%c"
if bytesize == 2:
fmt = template % 'h'
elif bytesize == 4:
fmt = template % 'i'
elif bytesize == 4:
fmt = template % 'q'
else:
return None
packed = struct.pack(fmt, val)
return bytearray(ord(x) for x in packed)
def bytearray_to_int(bytes, bytesize):
"""Utility function to convert a bytearray into an integer.
It interprets the bytearray in the little endian format. For a big endian
bytearray, just do ba.reverse() on the object before passing it in.
"""
import struct
if bytesize == 1:
return bytes[0]
# Little endian followed by a format character.
template = "<%c"
if bytesize == 2:
fmt = template % 'h'
elif bytesize == 4:
fmt = template % 'i'
elif bytesize == 4:
fmt = template % 'q'
else:
return None
unpacked = struct.unpack(fmt, str(bytes))
return unpacked[0]
# ==============================================================
# Get the description of an lldb object or None if not available
# ==============================================================
def get_description(obj, option=None):
"""Calls lldb_obj.GetDescription() and returns a string, or None.
For SBTarget, SBBreakpointLocation, and SBWatchpoint lldb objects, an extra
option can be passed in to describe the detailed level of description
desired:
o lldb.eDescriptionLevelBrief
o lldb.eDescriptionLevelFull
o lldb.eDescriptionLevelVerbose
"""
method = getattr(obj, 'GetDescription')
if not method:
return None
tuple = (lldb.SBTarget, lldb.SBBreakpointLocation, lldb.SBWatchpoint)
if isinstance(obj, tuple):
if option is None:
option = lldb.eDescriptionLevelBrief
stream = lldb.SBStream()
if option is None:
success = method(stream)
else:
success = method(stream, option)
if not success:
return None
return stream.GetData()
# =================================================
# Convert some enum value to its string counterpart
# =================================================
def state_type_to_str(enum):
"""Returns the stateType string given an enum."""
if enum == lldb.eStateInvalid:
return "invalid"
elif enum == lldb.eStateUnloaded:
return "unloaded"
elif enum == lldb.eStateConnected:
return "connected"
elif enum == lldb.eStateAttaching:
return "attaching"
elif enum == lldb.eStateLaunching:
return "launching"
elif enum == lldb.eStateStopped:
return "stopped"
elif enum == lldb.eStateRunning:
return "running"
elif enum == lldb.eStateStepping:
return "stepping"
elif enum == lldb.eStateCrashed:
return "crashed"
elif enum == lldb.eStateDetached:
return "detached"
elif enum == lldb.eStateExited:
return "exited"
elif enum == lldb.eStateSuspended:
return "suspended"
else:
raise Exception("Unknown StateType enum")
def stop_reason_to_str(enum):
"""Returns the stopReason string given an enum."""
if enum == lldb.eStopReasonInvalid:
return "invalid"
elif enum == lldb.eStopReasonNone:
return "none"
elif enum == lldb.eStopReasonTrace:
return "trace"
elif enum == lldb.eStopReasonBreakpoint:
return "breakpoint"
elif enum == lldb.eStopReasonWatchpoint:
return "watchpoint"
elif enum == lldb.eStopReasonSignal:
return "signal"
elif enum == lldb.eStopReasonException:
return "exception"
elif enum == lldb.eStopReasonPlanComplete:
return "plancomplete"
elif enum == lldb.eStopReasonThreadExiting:
return "threadexiting"
else:
raise Exception("Unknown StopReason enum")
def symbol_type_to_str(enum):
"""Returns the symbolType string given an enum."""
if enum == lldb.eSymbolTypeInvalid:
return "invalid"
elif enum == lldb.eSymbolTypeAbsolute:
return "absolute"
elif enum == lldb.eSymbolTypeCode:
return "code"
elif enum == lldb.eSymbolTypeData:
return "data"
elif enum == lldb.eSymbolTypeTrampoline:
return "trampoline"
elif enum == lldb.eSymbolTypeRuntime:
return "runtime"
elif enum == lldb.eSymbolTypeException:
return "exception"
elif enum == lldb.eSymbolTypeSourceFile:
return "sourcefile"
elif enum == lldb.eSymbolTypeHeaderFile:
return "headerfile"
elif enum == lldb.eSymbolTypeObjectFile:
return "objectfile"
elif enum == lldb.eSymbolTypeCommonBlock:
return "commonblock"
elif enum == lldb.eSymbolTypeBlock:
return "block"
elif enum == lldb.eSymbolTypeLocal:
return "local"
elif enum == lldb.eSymbolTypeParam:
return "param"
elif enum == lldb.eSymbolTypeVariable:
return "variable"
elif enum == lldb.eSymbolTypeVariableType:
return "variabletype"
elif enum == lldb.eSymbolTypeLineEntry:
return "lineentry"
elif enum == lldb.eSymbolTypeLineHeader:
return "lineheader"
elif enum == lldb.eSymbolTypeScopeBegin:
return "scopebegin"
elif enum == lldb.eSymbolTypeScopeEnd:
return "scopeend"
elif enum == lldb.eSymbolTypeAdditional:
return "additional"
elif enum == lldb.eSymbolTypeCompiler:
return "compiler"
elif enum == lldb.eSymbolTypeInstrumentation:
return "instrumentation"
elif enum == lldb.eSymbolTypeUndefined:
return "undefined"
def value_type_to_str(enum):
"""Returns the valueType string given an enum."""
if enum == lldb.eValueTypeInvalid:
return "invalid"
elif enum == lldb.eValueTypeVariableGlobal:
return "global_variable"
elif enum == lldb.eValueTypeVariableStatic:
return "static_variable"
elif enum == lldb.eValueTypeVariableArgument:
return "argument_variable"
elif enum == lldb.eValueTypeVariableLocal:
return "local_variable"
elif enum == lldb.eValueTypeRegister:
return "register"
elif enum == lldb.eValueTypeRegisterSet:
return "register_set"
elif enum == lldb.eValueTypeConstResult:
return "constant_result"
else:
raise Exception("Unknown ValueType enum")
# ==================================================
# Get stopped threads due to each stop reason.
# ==================================================
def sort_stopped_threads(process,
breakpoint_threads=None,
crashed_threads=None,
watchpoint_threads=None,
signal_threads=None,
exiting_threads=None,
other_threads=None):
""" Fills array *_threads with threads stopped for the corresponding stop
reason.
"""
for lst in [breakpoint_threads,
watchpoint_threads,
signal_threads,
exiting_threads,
other_threads]:
if lst is not None:
lst[:] = []
for thread in process:
dispatched = False
for (reason, list) in [(lldb.eStopReasonBreakpoint, breakpoint_threads),
(lldb.eStopReasonException, crashed_threads),
(lldb.eStopReasonWatchpoint, watchpoint_threads),
(lldb.eStopReasonSignal, signal_threads),
(lldb.eStopReasonThreadExiting, exiting_threads),
(None, other_threads)]:
if not dispatched and list is not None:
if thread.GetStopReason() == reason or reason is None:
list.append(thread)
dispatched = True
# ==================================================
# Utility functions for setting breakpoints
# ==================================================
def run_break_set_by_file_and_line(
test,
file_name,
line_number,
extra_options=None,
num_expected_locations=1,
loc_exact=False,
module_name=None):
"""Set a breakpoint by file and line, returning the breakpoint number.
If extra_options is not None, then we append it to the breakpoint set command.
If num_expected_locations is -1 we check that we got AT LEAST one location, otherwise we check that num_expected_locations equals the number of locations.
If loc_exact is true, we check that there is one location, and that location must be at the input file and line number."""
if file_name is None:
command = 'breakpoint set -l %d' % (line_number)
else:
command = 'breakpoint set -f "%s" -l %d' % (file_name, line_number)
if module_name:
command += " --shlib '%s'" % (module_name)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
if num_expected_locations == 1 and loc_exact:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations,
file_name=file_name,
line_number=line_number,
module_name=module_name)
else:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_by_symbol(
test,
symbol,
extra_options=None,
num_expected_locations=-1,
sym_exact=False,
module_name=None):
"""Set a breakpoint by symbol name. Common options are the same as run_break_set_by_file_and_line.
If sym_exact is true, then the output symbol must match the input exactly, otherwise we do a substring match."""
command = 'breakpoint set -n "%s"' % (symbol)
if module_name:
command += " --shlib '%s'" % (module_name)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
if num_expected_locations == 1 and sym_exact:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations,
symbol_name=symbol,
module_name=module_name)
else:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_by_selector(
test,
selector,
extra_options=None,
num_expected_locations=-1,
module_name=None):
"""Set a breakpoint by selector. Common options are the same as run_break_set_by_file_and_line."""
command = 'breakpoint set -S "%s"' % (selector)
if module_name:
command += ' --shlib "%s"' % (module_name)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
if num_expected_locations == 1:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations,
symbol_name=selector,
symbol_match_exact=False,
module_name=module_name)
else:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_by_regexp(
test,
regexp,
extra_options=None,
num_expected_locations=-1):
"""Set a breakpoint by regular expression match on symbol name. Common options are the same as run_break_set_by_file_and_line."""
command = 'breakpoint set -r "%s"' % (regexp)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_by_source_regexp(
test,
regexp,
extra_options=None,
num_expected_locations=-1):
"""Set a breakpoint by source regular expression. Common options are the same as run_break_set_by_file_and_line."""
command = 'breakpoint set -p "%s"' % (regexp)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_command(test, command):
"""Run the command passed in - it must be some break set variant - and analyze the result.
Returns a dictionary of information gleaned from the command-line results.
Will assert if the breakpoint setting fails altogether.
Dictionary will contain:
bpno - breakpoint of the newly created breakpoint, -1 on error.
num_locations - number of locations set for the breakpoint.
If there is only one location, the dictionary MAY contain:
file - source file name
line_no - source line number
symbol - symbol name
inline_symbol - inlined symbol name
offset - offset from the original symbol
module - module
address - address at which the breakpoint was set."""
patterns = [
r"^Breakpoint (?P<bpno>[0-9]+): (?P<num_locations>[0-9]+) locations\.$",
r"^Breakpoint (?P<bpno>[0-9]+): (?P<num_locations>no) locations \(pending\)\.",
r"^Breakpoint (?P<bpno>[0-9]+): where = (?P<module>.*)`(?P<symbol>[+\-]{0,1}[^+]+)( \+ (?P<offset>[0-9]+)){0,1}( \[inlined\] (?P<inline_symbol>.*)){0,1} at (?P<file>[^:]+):(?P<line_no>[0-9]+), address = (?P<address>0x[0-9a-fA-F]+)$",
r"^Breakpoint (?P<bpno>[0-9]+): where = (?P<module>.*)`(?P<symbol>.*)( \+ (?P<offset>[0-9]+)){0,1}, address = (?P<address>0x[0-9a-fA-F]+)$"]
match_object = test.match(command, patterns)
break_results = match_object.groupdict()
# We always insert the breakpoint number, setting it to -1 if we couldn't find it
# Also, make sure it gets stored as an integer.
if not 'bpno' in break_results:
break_results['bpno'] = -1
else:
break_results['bpno'] = int(break_results['bpno'])
# We always insert the number of locations
# If ONE location is set for the breakpoint, then the output doesn't mention locations, but it has to be 1...
# We also make sure it is an integer.
if not 'num_locations' in break_results:
num_locations = 1
else:
num_locations = break_results['num_locations']
if num_locations == 'no':
num_locations = 0
else:
num_locations = int(break_results['num_locations'])
break_results['num_locations'] = num_locations
if 'line_no' in break_results:
break_results['line_no'] = int(break_results['line_no'])
return break_results
def get_bpno_from_match(break_results):
return int(break_results['bpno'])
def check_breakpoint_result(
test,
break_results,
file_name=None,
line_number=-1,
symbol_name=None,
symbol_match_exact=True,
module_name=None,
offset=-1,
num_locations=-1):
out_num_locations = break_results['num_locations']
if num_locations == -1:
test.assertTrue(out_num_locations > 0,
"Expecting one or more locations, got none.")
else:
test.assertTrue(
num_locations == out_num_locations,
"Expecting %d locations, got %d." %
(num_locations,
out_num_locations))
if file_name:
out_file_name = ""
if 'file' in break_results:
out_file_name = break_results['file']
test.assertTrue(
file_name == out_file_name,
"Breakpoint file name '%s' doesn't match resultant name '%s'." %
(file_name,
out_file_name))
if line_number != -1:
out_file_line = -1
if 'line_no' in break_results:
out_line_number = break_results['line_no']
test.assertTrue(
line_number == out_line_number,
"Breakpoint line number %s doesn't match resultant line %s." %
(line_number,
out_line_number))
if symbol_name:
out_symbol_name = ""
# Look first for the inlined symbol name, otherwise use the symbol
# name:
if 'inline_symbol' in break_results and break_results['inline_symbol']:
out_symbol_name = break_results['inline_symbol']
elif 'symbol' in break_results:
out_symbol_name = break_results['symbol']
if symbol_match_exact:
test.assertTrue(
symbol_name == out_symbol_name,
"Symbol name '%s' doesn't match resultant symbol '%s'." %
(symbol_name,
out_symbol_name))
else:
test.assertTrue(
out_symbol_name.find(symbol_name) != -
1,
"Symbol name '%s' isn't in resultant symbol '%s'." %
(symbol_name,
out_symbol_name))
if module_name:
out_nodule_name = None
if 'module' in break_results:
out_module_name = break_results['module']
test.assertTrue(
module_name.find(out_module_name) != -
1,
"Symbol module name '%s' isn't in expected module name '%s'." %
(out_module_name,
module_name))
# ==================================================
# Utility functions related to Threads and Processes
# ==================================================
def get_stopped_threads(process, reason):
"""Returns the thread(s) with the specified stop reason in a list.
The list can be empty if no such thread exists.
"""
threads = []
for t in process:
if t.GetStopReason() == reason:
threads.append(t)
return threads
def get_stopped_thread(process, reason):
"""A convenience function which returns the first thread with the given stop
reason or None.
Example usages:
1. Get the stopped thread due to a breakpoint condition
...
from lldbutil import get_stopped_thread
thread = get_stopped_thread(process, lldb.eStopReasonPlanComplete)
self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint condition")
...
2. Get the thread stopped due to a breakpoint
...
from lldbutil import get_stopped_thread
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint")
...
"""
threads = get_stopped_threads(process, reason)
if len(threads) == 0:
return None
return threads[0]
def get_threads_stopped_at_breakpoint(process, bkpt):
""" For a stopped process returns the thread stopped at the breakpoint passed in bkpt"""
stopped_threads = []
threads = []
stopped_threads = get_stopped_threads(process, lldb.eStopReasonBreakpoint)
if len(stopped_threads) == 0:
return threads
for thread in stopped_threads:
# Make sure we've hit our breakpoint...
break_id = thread.GetStopReasonDataAtIndex(0)
if break_id == bkpt.GetID():
threads.append(thread)
return threads
def continue_to_breakpoint(process, bkpt):
""" Continues the process, if it stops, returns the threads stopped at bkpt; otherwise, returns None"""
process.Continue()
if process.GetState() != lldb.eStateStopped:
return None
else:
return get_threads_stopped_at_breakpoint(process, bkpt)
def get_caller_symbol(thread):
"""
Returns the symbol name for the call site of the leaf function.
"""
depth = thread.GetNumFrames()
if depth <= 1:
return None
caller = thread.GetFrameAtIndex(1).GetSymbol()
if caller:
return caller.GetName()
else:
return None
def get_function_names(thread):
"""
Returns a sequence of function names from the stack frames of this thread.
"""
def GetFuncName(i):
return thread.GetFrameAtIndex(i).GetFunctionName()
return [GetFuncName(i) for i in range(thread.GetNumFrames())]
def get_symbol_names(thread):
"""
Returns a sequence of symbols for this thread.
"""
def GetSymbol(i):
return thread.GetFrameAtIndex(i).GetSymbol().GetName()
return [GetSymbol(i) for i in range(thread.GetNumFrames())]
def get_pc_addresses(thread):
"""
Returns a sequence of pc addresses for this thread.
"""
def GetPCAddress(i):
return thread.GetFrameAtIndex(i).GetPCAddress()
return [GetPCAddress(i) for i in range(thread.GetNumFrames())]
def get_filenames(thread):
"""
Returns a sequence of file names from the stack frames of this thread.
"""
def GetFilename(i):
return thread.GetFrameAtIndex(
i).GetLineEntry().GetFileSpec().GetFilename()
return [GetFilename(i) for i in range(thread.GetNumFrames())]
def get_line_numbers(thread):
"""
Returns a sequence of line numbers from the stack frames of this thread.
"""
def GetLineNumber(i):
return thread.GetFrameAtIndex(i).GetLineEntry().GetLine()
return [GetLineNumber(i) for i in range(thread.GetNumFrames())]
def get_module_names(thread):
"""
Returns a sequence of module names from the stack frames of this thread.
"""
def GetModuleName(i):
return thread.GetFrameAtIndex(
i).GetModule().GetFileSpec().GetFilename()
return [GetModuleName(i) for i in range(thread.GetNumFrames())]
def get_stack_frames(thread):
"""
Returns a sequence of stack frames for this thread.
"""
def GetStackFrame(i):
return thread.GetFrameAtIndex(i)
return [GetStackFrame(i) for i in range(thread.GetNumFrames())]
def print_stacktrace(thread, string_buffer=False):
"""Prints a simple stack trace of this thread."""
output = io.StringIO() if string_buffer else sys.stdout
target = thread.GetProcess().GetTarget()
depth = thread.GetNumFrames()
mods = get_module_names(thread)
funcs = get_function_names(thread)
symbols = get_symbol_names(thread)
files = get_filenames(thread)
lines = get_line_numbers(thread)
addrs = get_pc_addresses(thread)
if thread.GetStopReason() != lldb.eStopReasonInvalid:
desc = "stop reason=" + stop_reason_to_str(thread.GetStopReason())
else:
desc = ""
print("Stack trace for thread id={0:#x} name={1} queue={2} ".format(
thread.GetThreadID(), thread.GetName(), thread.GetQueueName()) + desc, file=output)
for i in range(depth):
frame = thread.GetFrameAtIndex(i)
function = frame.GetFunction()
load_addr = addrs[i].GetLoadAddress(target)
if not function:
file_addr = addrs[i].GetFileAddress()
start_addr = frame.GetSymbol().GetStartAddress().GetFileAddress()
symbol_offset = file_addr - start_addr
print(" frame #{num}: {addr:#016x} {mod}`{symbol} + {offset}".format(
num=i, addr=load_addr, mod=mods[i], symbol=symbols[i], offset=symbol_offset), file=output)
else:
print(" frame #{num}: {addr:#016x} {mod}`{func} at {file}:{line} {args}".format(
num=i, addr=load_addr, mod=mods[i], func='%s [inlined]' %
funcs[i] if frame.IsInlined() else funcs[i], file=files[i], line=lines[i], args=get_args_as_string(
frame, showFuncName=False) if not frame.IsInlined() else '()'), file=output)
if string_buffer:
return output.getvalue()
def print_stacktraces(process, string_buffer=False):
"""Prints the stack traces of all the threads."""
output = io.StringIO() if string_buffer else sys.stdout
print("Stack traces for " + str(process), file=output)
for thread in process:
print(print_stacktrace(thread, string_buffer=True), file=output)
if string_buffer:
return output.getvalue()
# ===================================
# Utility functions related to Frames
# ===================================
def get_parent_frame(frame):
"""
Returns the parent frame of the input frame object; None if not available.
"""
thread = frame.GetThread()
parent_found = False
for f in thread:
if parent_found:
return f
if f.GetFrameID() == frame.GetFrameID():
parent_found = True
# If we reach here, no parent has been found, return None.
return None
def get_args_as_string(frame, showFuncName=True):
"""
Returns the args of the input frame object as a string.
"""
# arguments => True
# locals => False
# statics => False
# in_scope_only => True
vars = frame.GetVariables(True, False, False, True) # type of SBValueList
args = [] # list of strings
for var in vars:
args.append("(%s)%s=%s" % (var.GetTypeName(),
var.GetName(),
var.GetValue()))
if frame.GetFunction():
name = frame.GetFunction().GetName()
elif frame.GetSymbol():
name = frame.GetSymbol().GetName()
else:
name = ""
if showFuncName:
return "%s(%s)" % (name, ", ".join(args))
else:
return "(%s)" % (", ".join(args))
def print_registers(frame, string_buffer=False):
"""Prints all the register sets of the frame."""
output = io.StringIO() if string_buffer else sys.stdout
print("Register sets for " + str(frame), file=output)
registerSet = frame.GetRegisters() # Return type of SBValueList.
print("Frame registers (size of register set = %d):" % registerSet.GetSize(
), file=output)
for value in registerSet:
#print >> output, value
print("%s (number of children = %d):" % (
value.GetName(), value.GetNumChildren()), file=output)
for child in value:
print("Name: %s, Value: %s" % (
child.GetName(), child.GetValue()), file=output)
if string_buffer:
return output.getvalue()
def get_registers(frame, kind):
"""Returns the registers given the frame and the kind of registers desired.
Returns None if there's no such kind.
"""
registerSet = frame.GetRegisters() # Return type of SBValueList.
for value in registerSet:
if kind.lower() in value.GetName().lower():
return value
return None
def get_GPRs(frame):
"""Returns the general purpose registers of the frame as an SBValue.
The returned SBValue object is iterable. An example:
...
from lldbutil import get_GPRs
regs = get_GPRs(frame)
for reg in regs:
print "%s => %s" % (reg.GetName(), reg.GetValue())
...
"""
return get_registers(frame, "general purpose")
def get_FPRs(frame):
"""Returns the floating point registers of the frame as an SBValue.
The returned SBValue object is iterable. An example:
...
from lldbutil import get_FPRs
regs = get_FPRs(frame)
for reg in regs:
print "%s => %s" % (reg.GetName(), reg.GetValue())
...
"""
return get_registers(frame, "floating point")
def get_ESRs(frame):
"""Returns the exception state registers of the frame as an SBValue.
The returned SBValue object is iterable. An example:
...
from lldbutil import get_ESRs
regs = get_ESRs(frame)
for reg in regs:
print "%s => %s" % (reg.GetName(), reg.GetValue())
...
"""
return get_registers(frame, "exception state")
# ======================================
# Utility classes/functions for SBValues
# ======================================
class BasicFormatter(object):
"""The basic formatter inspects the value object and prints the value."""
def format(self, value, buffer=None, indent=0):
if not buffer:
output = io.StringIO()
else:
output = buffer
# If there is a summary, it suffices.
val = value.GetSummary()
# Otherwise, get the value.
if val is None:
val = value.GetValue()
if val is None and value.GetNumChildren() > 0:
val = "%s (location)" % value.GetLocation()
print("{indentation}({type}) {name} = {value}".format(
indentation=' ' * indent,
type=value.GetTypeName(),
name=value.GetName(),
value=val), file=output)
return output.getvalue()
class ChildVisitingFormatter(BasicFormatter):
"""The child visiting formatter prints the value and its immediate children.
The constructor takes a keyword arg: indent_child, which defaults to 2.
"""
def __init__(self, indent_child=2):
"""Default indentation of 2 SPC's for the children."""
self.cindent = indent_child
def format(self, value, buffer=None):
if not buffer:
output = io.StringIO()
else:
output = buffer
BasicFormatter.format(self, value, buffer=output)
for child in value:
BasicFormatter.format(
self, child, buffer=output, indent=self.cindent)
return output.getvalue()
class RecursiveDecentFormatter(BasicFormatter):
"""The recursive decent formatter prints the value and the decendents.
The constructor takes two keyword args: indent_level, which defaults to 0,
and indent_child, which defaults to 2. The current indentation level is
determined by indent_level, while the immediate children has an additional
indentation by inden_child.
"""
def __init__(self, indent_level=0, indent_child=2):
self.lindent = indent_level
self.cindent = indent_child
def format(self, value, buffer=None):
if not buffer:
output = io.StringIO()
else:
output = buffer
BasicFormatter.format(self, value, buffer=output, indent=self.lindent)
new_indent = self.lindent + self.cindent
for child in value:
if child.GetSummary() is not None:
BasicFormatter.format(
self, child, buffer=output, indent=new_indent)
else:
if child.GetNumChildren() > 0:
rdf = RecursiveDecentFormatter(indent_level=new_indent)
rdf.format(child, buffer=output)
else:
BasicFormatter.format(
self, child, buffer=output, indent=new_indent)
return output.getvalue()
| apache-2.0 |
felipemontefuscolo/bitme | tactic/bitmex_dummy_tactic.py | 1 | 1028 | from common.quote import Quote
from tactic import TacticInterface, ExchangeInterface, Symbol, OrderCommon, Fill
import pandas as pd
class BitmexDummyTactic(TacticInterface):
"""
This class is associated to orders issued by Bitmex
"""
def finalize(self) -> None:
pass
def handle_quote(self, quote: Quote) -> None:
pass
def handle_order_completed(self, order: OrderCommon) -> None:
pass
def handle_liquidation(self, pnl: float):
pass
def id(self):
return 'DUMMY'
def initialize(self, exchange: ExchangeInterface, preferences: dict) -> None:
pass
def get_symbol(self) -> Symbol:
pass
def handle_1m_candles(self, candles1m: pd.DataFrame) -> None:
pass
def handle_submission_error(self, failed_order: OrderCommon) -> None:
pass
def handle_fill(self, fill: Fill) -> None:
pass
def handle_cancel(self, order: OrderCommon) -> None:
pass
def handle_trade(self):
pass
| mpl-2.0 |
RayMick/SFrame | oss_src/unity/python/sframe/connect/main.py | 8 | 7392 | """
This module contains the main logic for start, query, stop graphlab server client connection.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from ..cython.cy_unity import UnityGlobalProxy
from ..cython.cy_ipc import PyCommClient as Client
from ..cython.cy_ipc import get_public_secret_key_pair
from ..connect.server import LocalServer, RemoteServer
from ..connect import __SERVER__, __CLIENT__, _get_metric_tracker
import decorator
import logging
import os
""" The module level logger object """
__LOGGER__ = logging.getLogger(__name__)
LOCAL_SERVER_TYPE = 'local'
REMOTE_SERVER_TYPE = 'remote'
ENGINE_START_ERROR_MESSAGE = 'Cannot connect to GraphLab Create engine. ' + \
'Contact support@dato.com for help.'
# Decorator which catch the exception and output to log error.
@decorator.decorator
def __catch_and_log__(func, *args, **kargs):
try:
return func(*args, **kargs)
except Exception, error:
logging.getLogger(__name__).error(error)
@__catch_and_log__
def launch(server_addr=None, server_bin=None, server_log=None, auth_token=None,
server_public_key=''):
"""
Launch a connection to the graphlab server. The connection can be stopped by
the `stop` function.
Automatically spawns a local server, if no arguments provided or "server_bin"
is specified.
Notes
-----
Only a single connection can exist at anytime.
Prints warning if trying to launch while there is an active connection.
Parameters
----------
server_addr : string
The address of the server.
server_bin : string
The path to the server binary (local server only).
server_log : string
The path to the server log (local server only).
server_public_key : string
The server's libsodium public key, used for encryption. Default is no encryption.
"""
if is_connected():
__LOGGER__.warning(
"Attempt to connect to a new server while still connected to a server."
" Please stop the connection first by running 'graphlab.stop()' and try again.")
return
try:
server_type = _get_server_type(server_addr)
__LOGGER__.debug("Server type: %s" % server_type)
except ValueError as e:
__LOGGER__.error(e)
_get_metric_tracker().track('server_launch.server_type_error', send_sys_info=True)
return
# construct a server server instance based on the server_type
if (server_type == LOCAL_SERVER_TYPE):
server = LocalServer(server_addr, server_bin, server_log)
elif (server_type == REMOTE_SERVER_TYPE):
server = RemoteServer(server_addr, auth_token, public_key=server_public_key)
else:
raise ValueError('Invalid server type: %s' % server_type)
# start the server
try:
server.start()
except Exception as e:
__LOGGER__.error('Cannot start server: %s' % e)
server.try_stop()
return
# start the client
(public_key, secret_key) = ('', '')
if server_public_key != '':
(public_key, secret_key) = get_public_secret_key_pair()
try:
num_tolerable_ping_failures = 4294967295
client = Client([], server.get_server_addr(), num_tolerable_ping_failures,
public_key=public_key, secret_key=secret_key,
server_public_key=server_public_key)
if hasattr(server, 'proc') and hasattr(server.proc, 'pid'):
client.set_server_alive_watch_pid(server.proc.pid)
if(auth_token is not None):
client.add_auth_method_token(auth_token)
client.start()
except Exception as e:
__LOGGER__.error("Cannot start client: %s" % e)
if (client):
client.stop()
return
_assign_server_and_client(server, client)
assert is_connected()
def _get_server_type(server_addr):
"""
Returns the server type in one of the {LOCAL_SERVER_TYPE, REMOTE_SERVER_TYPE},
identified from the server_addr.
Parameters
----------
server_addr : string
The server address url string.
Returns
--------
out : server_type
{'local', 'remote'}
Raises
-------
ValueError
on invalid server address.
"""
# construct the server object
# Depending on what are the parameters provided, decide to either
# start a remote server or a local server
if server_addr is None:
server_type = LOCAL_SERVER_TYPE
elif server_addr.startswith('tcp'):
server_type = REMOTE_SERVER_TYPE
elif server_addr.startswith('ipc'):
if (os.path.exists(server_addr[6:])):
server_type = REMOTE_SERVER_TYPE
else:
server_type = LOCAL_SERVER_TYPE
else:
raise ValueError('Invalid server address: %s. Server address must starts with ipc:// or tcp://.' % server_addr)
return server_type
@__catch_and_log__
def stop():
"""
Stops the current connection to the graphlab server.
All object created to the server will be inaccessible.
Reset global server and client object to None.
"""
global __CLIENT__, __SERVER__
if not is_connected():
return
__LOGGER__.info("Stopping the server connection.")
if (__CLIENT__):
__CLIENT__.stop()
__CLIENT__ = None
if (__SERVER__):
__SERVER__.try_stop()
__SERVER__ = None
def is_connected():
"""
Returns true if connected to the server.
"""
if (__CLIENT__ is not None and __SERVER__ is not None):
# both client and server are live
return True
elif (__CLIENT__ is None and __SERVER__ is None):
# both client and server are dead
return False
else:
# unlikely state: one of them are live and the other dead
raise RuntimeError('GraphLab connection error.')
def get_client():
"""
Returns the global ipc client object, or None if no connection is present.
"""
if not is_connected():
launch()
assert is_connected(), ENGINE_START_ERROR_MESSAGE
return __CLIENT__
def get_server():
"""
Returns the global graphlab server object, or None if no connection is present.
"""
if not is_connected():
launch()
assert is_connected(), ENGINE_START_ERROR_MESSAGE
return __SERVER__
def get_unity():
"""
Returns the unity global object of the current connection.
If no connection is present, automatically launch a localserver connection.
"""
if not is_connected():
launch()
assert is_connected(), ENGINE_START_ERROR_MESSAGE
return __UNITY_GLOBAL_PROXY__
def _assign_server_and_client(server, client):
"""
Helper function to assign the global __SERVER__ and __CLIENT__ pair.
"""
global __SERVER__, __CLIENT__, __UNITY_GLOBAL_PROXY__
__SERVER__ = server
__CLIENT__ = client
__UNITY_GLOBAL_PROXY__ = UnityGlobalProxy(__CLIENT__)
server.get_logger().info('GraphLab Server Version: %s' %
UnityGlobalProxy(client).get_version())
from ..extensions import _publish
_publish()
# Register an exit callback handler to stop the server on python exit.
import atexit
atexit.register(stop)
| bsd-3-clause |
ckirby/django | django/contrib/messages/storage/cookie.py | 471 | 6545 | import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import SimpleCookie
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.safestring import SafeData, mark_safe
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if len(obj) == 3:
# Compatibility with previously-encoded messages
return Message(*obj[1:])
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return {key: self.process_messages(value)
for key, value in six.iteritems(obj)}
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
else:
response.delete_cookie(self.cookie_name,
domain=settings.SESSION_COOKIE_DOMAIN)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| bsd-3-clause |
rmed/wat-bridge | wat_bridge/signals.py | 1 | 3542 | # -*- coding: utf-8 -*-
#
# wat-bridge
# https://github.com/rmed/wat-bridge
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Rafael Medina García <rafamedgar@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Signal handlers."""
import sys
from wat_bridge.static import SETTINGS, get_logger
from wat_bridge.helper import get_contact, get_phone, db_get_group
from wat_bridge.tg import tgbot
from wat_bridge.wa import wabot
from telebot import util as tgutil
logger = get_logger('signals')
def sigint_handler(signal, frame):
"""Function used as handler for SIGINT to terminate program."""
sys.exit(0)
def to_tg_handler(sender, **kwargs):
"""Handle signals sent to Telegram.
This will involve sending messages through the Telegram bot.
Args:
phone (str): Phone number that sent the message.
message (str): The message received
"""
phone = kwargs.get('phone')
message = kwargs.get('message', '')
# Check if known contact
contact = get_contact(phone)
chat_id = SETTINGS['owner']
if not contact:
# Unknown sender
output = 'Message from #unknown\n'
output += 'Phone number: %s\n' % phone
output += '---------\n'
output += message
logger.info('received message from unknown number: %s' % phone)
else:
group = db_get_group(contact)
if not group:
# Known sender
output = 'Message from #%s\n' % contact
output += '---------\n'
output += message
else:
# Contact is bound to group
chat_id = group
output = message
logger.info('received message from %s' % contact)
# Deliver message through Telegram
for chunk in tgutil.split_string(output, 3000):
tgbot.send_message(chat_id, chunk)
def to_wa_handler(sender, **kwargs):
"""Handle signals sent to Whatsapp.
This will involve sending messages through the Whatsapp bot.
Args:
contact (str): Name of the contact to send the message to.
message (str): The message to send
"""
contact = kwargs.get('contact')
message = kwargs.get('message')
# Check if known contact
phone = get_phone(contact)
if not phone:
# Abort
tgbot.send_message(
SETTINGS['owner'],
'Unknown contact: "%s"' % contact
)
return
logger.info('sending message to %s (%s)' % (contact, phone))
wabot.send_msg(phone=phone, message=message)
| mit |
matmutant/sl4a | python/src/Lib/symtable.py | 53 | 7911 | """Interface to the compiler's internal symbol tables"""
import _symtable
from _symtable import (USE, DEF_GLOBAL, DEF_LOCAL, DEF_PARAM,
DEF_IMPORT, DEF_BOUND, OPT_IMPORT_STAR, OPT_EXEC, OPT_BARE_EXEC,
SCOPE_OFF, SCOPE_MASK, FREE, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
import warnings
import weakref
__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
def symtable(code, filename, compile_type):
raw = _symtable.symtable(code, filename, compile_type)
for top in raw.itervalues():
if top.name == 'top':
break
return _newSymbolTable(top, filename)
class SymbolTableFactory:
def __init__(self):
self.__memo = weakref.WeakValueDictionary()
def new(self, table, filename):
if table.type == _symtable.TYPE_FUNCTION:
return Function(table, filename)
if table.type == _symtable.TYPE_CLASS:
return Class(table, filename)
return SymbolTable(table, filename)
def __call__(self, table, filename):
key = table, filename
obj = self.__memo.get(key, None)
if obj is None:
obj = self.__memo[key] = self.new(table, filename)
return obj
_newSymbolTable = SymbolTableFactory()
class SymbolTable(object):
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "global":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
self._table.name,
self._filename)
def get_type(self):
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: {0}".format(self._table.type)
def get_id(self):
return self._table.id
def get_name(self):
return self._table.name
def get_lineno(self):
return self._table.lineno
def is_optimized(self):
return bool(self._table.type == _symtable.TYPE_FUNCTION
and not self._table.optimized)
def is_nested(self):
return bool(self._table.nested)
def has_children(self):
return bool(self._table.children)
def has_exec(self):
"""Return true if the scope uses exec"""
return bool(self._table.optimized & (OPT_EXEC | OPT_BARE_EXEC))
def has_import_star(self):
"""Return true if the scope uses import *"""
return bool(self._table.optimized & OPT_IMPORT_STAR)
def get_identifiers(self):
return self._table.symbols.keys()
def lookup(self, name):
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
sym = self._symbols[name] = Symbol(name, flags, namespaces)
return sym
def get_symbols(self):
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [_newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
return [_newSymbolTable(st, self._filename)
for st in self._table.children]
class Function(SymbolTable):
# Default values for instance variables
__params = None
__locals = None
__frees = None
__globals = None
def __idents_matching(self, test_func):
return tuple([ident for ident in self.get_identifiers()
if test_func(self._table.symbols[ident])])
def get_parameters(self):
if self.__params is None:
self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
return self.__params
def get_locals(self):
if self.__locals is None:
self.__locals = self.__idents_matching(lambda x:x & DEF_BOUND)
return self.__locals
def get_globals(self):
if self.__globals is None:
glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
self.__globals = self.__idents_matching(test)
return self.__globals
def get_frees(self):
if self.__frees is None:
is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
self.__frees = self.__idents_matching(is_free)
return self.__frees
class Class(SymbolTable):
__methods = None
def get_methods(self):
if self.__methods is None:
d = {}
for st in self._table.children:
d[st.name] = 1
self.__methods = tuple(d)
return self.__methods
class Symbol(object):
def __init__(self, name, flags, namespaces=None):
self.__name = name
self.__flags = flags
self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope()
self.__namespaces = namespaces or ()
def __repr__(self):
return "<symbol {0!r}>".format(self.__name)
def get_name(self):
return self.__name
def is_referenced(self):
return bool(self.__flags & _symtable.USE)
def is_parameter(self):
return bool(self.__flags & DEF_PARAM)
def is_global(self):
return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT))
def is_vararg(self):
warnings.warn("is_vararg() is obsolete and will be removed",
DeprecationWarning, 2)
return False
def is_keywordarg(self):
warnings.warn("is_keywordarg() is obsolete and will be removed",
DeprecationWarning, 2)
return False
def is_declared_global(self):
return bool(self.__scope == GLOBAL_EXPLICIT)
def is_local(self):
return bool(self.__flags & DEF_BOUND)
def is_free(self):
return bool(self.__scope == FREE)
def is_imported(self):
return bool(self.__flags & DEF_IMPORT)
def is_assigned(self):
return bool(self.__flags & DEF_LOCAL)
def is_in_tuple(self):
warnings.warn("is_in_tuple() is obsolete and will be removed",
DeprecationWarning, 2)
def is_namespace(self):
"""Returns true if name binding introduces new namespace.
If the name is used as the target of a function or class
statement, this will be true.
Note that a single name can be bound to multiple objects. If
is_namespace() is true, the name may also be bound to other
objects, like an int or list, that does not introduce a new
namespace.
"""
return bool(self.__namespaces)
def get_namespaces(self):
"""Return a list of namespaces bound to this name"""
return self.__namespaces
def get_namespace(self):
"""Returns the single namespace bound to this name.
Raises ValueError if the name is bound to multiple namespaces.
"""
if len(self.__namespaces) != 1:
raise ValueError, "name is bound to multiple namespaces"
return self.__namespaces[0]
if __name__ == "__main__":
import os, sys
src = open(sys.argv[0]).read()
mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
for ident in mod.get_identifiers():
info = mod.lookup(ident)
print info, info.is_local(), info.is_namespace()
| apache-2.0 |
romain-dartigues/ansible | test/units/modules/cloud/google/test_gcp_url_map.py | 158 | 6086 | import unittest
from ansible.modules.cloud.google.gcp_url_map import _build_path_matchers, _build_url_map_dict
class TestGCPUrlMap(unittest.TestCase):
"""Unit tests for gcp_url_map module."""
params_dict = {
'url_map_name': 'foo_url_map_name',
'description': 'foo_url_map description',
'host_rules': [
{
'description': 'host rules description',
'hosts': [
'www.example.com',
'www2.example.com'
],
'path_matcher': 'host_rules_path_matcher'
}
],
'path_matchers': [
{
'name': 'path_matcher_one',
'description': 'path matcher one',
'defaultService': 'bes-pathmatcher-one-default',
'pathRules': [
{
'service': 'my-one-bes',
'paths': [
'/',
'/aboutus'
]
}
]
},
{
'name': 'path_matcher_two',
'description': 'path matcher two',
'defaultService': 'bes-pathmatcher-two-default',
'pathRules': [
{
'service': 'my-two-bes',
'paths': [
'/webapp',
'/graphs'
]
}
]
}
]
}
def test__build_path_matchers(self):
input_list = [
{
'defaultService': 'bes-pathmatcher-one-default',
'description': 'path matcher one',
'name': 'path_matcher_one',
'pathRules': [
{
'paths': [
'/',
'/aboutus'
],
'service': 'my-one-bes'
}
]
},
{
'defaultService': 'bes-pathmatcher-two-default',
'description': 'path matcher two',
'name': 'path_matcher_two',
'pathRules': [
{
'paths': [
'/webapp',
'/graphs'
],
'service': 'my-two-bes'
}
]
}
]
expected = [
{
'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-one-default',
'description': 'path matcher one',
'name': 'path_matcher_one',
'pathRules': [
{
'paths': [
'/',
'/aboutus'
],
'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-one-bes'
}
]
},
{
'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-two-default',
'description': 'path matcher two',
'name': 'path_matcher_two',
'pathRules': [
{
'paths': [
'/webapp',
'/graphs'
],
'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-two-bes'
}
]
}
]
actual = _build_path_matchers(input_list, 'my-project')
self.assertEqual(expected, actual)
def test__build_url_map_dict(self):
expected = {
'description': 'foo_url_map description',
'hostRules': [
{
'description': 'host rules description',
'hosts': [
'www.example.com',
'www2.example.com'
],
'pathMatcher': 'host_rules_path_matcher'
}
],
'name': 'foo_url_map_name',
'pathMatchers': [
{
'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-one-default',
'description': 'path matcher one',
'name': 'path_matcher_one',
'pathRules': [
{
'paths': [
'/',
'/aboutus'
],
'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-one-bes'
}
]
},
{
'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-two-default',
'description': 'path matcher two',
'name': 'path_matcher_two',
'pathRules': [
{
'paths': [
'/webapp',
'/graphs'
],
'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-two-bes'
}
]
}
]
}
actual = _build_url_map_dict(self.params_dict, 'my-project')
self.assertEqual(expected, actual)
| gpl-3.0 |
allen-fdes/python_demo | venv/Lib/encodings/unicode_internal.py | 827 | 1196 | """ Python 'unicode-internal' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_internal_encode
decode = codecs.unicode_internal_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_internal_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_internal_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-internal',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mit |
chillipeper/will | will/mixins/naturaltime.py | 3 | 1255 | import datetime
import re
import time
from natural.date import day
import parsedatetime.parsedatetime as pdt
class NaturalTimeMixin(object):
def strip_leading_zeros(self, date_str):
date_str = date_str.replace(":0", "__&&")
date_str = re.sub(r"0*(\d+)", r"\g<1>", date_str)
date_str = date_str.replace("__&&", ":0")
return date_str
def parse_natural_time(self, time_str):
cal = pdt.Calendar()
time_tuple = cal.parse(time_str)[0][:-2]
return datetime.datetime(*time_tuple)
def to_natural_day(self, dt):
day_str = day(dt)
return self.strip_leading_zeros(day_str)
def to_natural_day_and_time(self, dt, with_timezone=False):
if dt.minute == 0:
if with_timezone:
time_str = "%s %s" % (dt.strftime("%I%p").lower(), time.tzname[0])
else:
time_str = dt.strftime("%I%p").lower()
else:
if with_timezone:
time_str = "%s %s" % (dt.strftime("%I:%M%p").lower(), time.tzname[0])
else:
time_str = dt.strftime("%I:%M%p").lower()
full_str = "%s at %s" % (self.to_natural_day(dt), time_str)
return self.strip_leading_zeros(full_str)
| mit |
mewtaylor/django | tests/template_tests/filter_tests/test_time.py | 326 | 1812 | from datetime import time
from django.template.defaultfilters import time as time_filter
from django.test import SimpleTestCase
from django.utils import timezone
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeTests(TimezoneTestCase):
"""
#20693: Timezone support for the time template filter
"""
@setup({'time01': '{{ dt|time:"e:O:T:Z" }}'})
def test_time01(self):
output = self.engine.render_to_string('time01', {'dt': self.now_tz_i})
self.assertEqual(output, '+0315:+0315:+0315:11700')
@setup({'time02': '{{ dt|time:"e:T" }}'})
def test_time02(self):
output = self.engine.render_to_string('time02', {'dt': self.now})
self.assertEqual(output, ':' + self.now_tz.tzinfo.tzname(self.now_tz))
@setup({'time03': '{{ t|time:"P:e:O:T:Z" }}'})
def test_time03(self):
output = self.engine.render_to_string('time03', {'t': time(4, 0, tzinfo=timezone.get_fixed_timezone(30))})
self.assertEqual(output, '4 a.m.::::')
@setup({'time04': '{{ t|time:"P:e:O:T:Z" }}'})
def test_time04(self):
output = self.engine.render_to_string('time04', {'t': time(4, 0)})
self.assertEqual(output, '4 a.m.::::')
@setup({'time05': '{{ d|time:"P:e:O:T:Z" }}'})
def test_time05(self):
output = self.engine.render_to_string('time05', {'d': self.today})
self.assertEqual(output, '')
@setup({'time06': '{{ obj|time:"P:e:O:T:Z" }}'})
def test_time06(self):
output = self.engine.render_to_string('time06', {'obj': 'non-datetime-value'})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_inputs(self):
self.assertEqual(time_filter(time(13), 'h'), '01')
self.assertEqual(time_filter(time(0), 'h'), '12')
| bsd-3-clause |
gladgod/zhiliao | zhiliao/galleries/tests.py | 1 | 2128 | from __future__ import unicode_literals
from future.builtins import str
from future.utils import native
import os
from shutil import rmtree
from uuid import uuid4
from zhiliao.conf import settings
from zhiliao.core.templatetags.mezzanine_tags import thumbnail
from zhiliao.galleries.models import Gallery, GALLERIES_UPLOAD_DIR
from zhiliao.utils.tests import TestCase, copy_test_to_media
class GalleriesTests(TestCase):
def test_gallery_import(self):
"""
Test that a gallery creates images when given a zip file to
import, and that descriptions are created.
"""
zip_name = "gallery.zip"
copy_test_to_media("zhiliao.core", zip_name)
title = native(str(uuid4())) # i.e. Py3 str / Py2 unicode
gallery = Gallery.objects.create(title=title, zip_import=zip_name)
images = list(gallery.images.all())
self.assertTrue(images)
self.assertTrue(all([image.description for image in images]))
# Clean up.
rmtree(os.path.join(settings.MEDIA_ROOT,
GALLERIES_UPLOAD_DIR, title))
def test_thumbnail_generation(self):
"""
Test that a thumbnail is created and resized.
"""
try:
from PIL import Image
except ImportError:
return
image_name = "image.jpg"
size = (24, 24)
copy_test_to_media("zhiliao.core", image_name)
thumb_name = os.path.join(settings.THUMBNAILS_DIR_NAME, image_name,
image_name.replace(".", "-%sx%s." % size))
thumb_path = os.path.join(settings.MEDIA_ROOT, thumb_name)
thumb_image = thumbnail(image_name, *size)
self.assertEqual(os.path.normpath(thumb_image.lstrip("/")), thumb_name)
self.assertNotEqual(os.path.getsize(thumb_path), 0)
thumb = Image.open(thumb_path)
self.assertEqual(thumb.size, size)
# Clean up.
del thumb
os.remove(os.path.join(settings.MEDIA_ROOT, image_name))
os.remove(os.path.join(thumb_path))
rmtree(os.path.join(os.path.dirname(thumb_path)))
| bsd-3-clause |
mjkivi/flex | themes/d8_bootstrap/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py | 896 | 91092 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host', 'CC'), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host', 'AR'), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host', 'CXX'), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host', 'LINK'), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| gpl-2.0 |
alex/sqlalchemy | test/engine/test_ddlevents.py | 3 | 22128 |
from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy.schema import DDL, CheckConstraint, AddConstraint, \
DropConstraint
from sqlalchemy import create_engine
from sqlalchemy import MetaData, Integer, String, event, exc, text
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy.testing import AssertsCompiledSQL, eq_
from nose import SkipTest
from sqlalchemy.testing import fixtures
class DDLEventTest(fixtures.TestBase):
class Canary(object):
def __init__(self, schema_item, bind):
self.state = None
self.schema_item = schema_item
self.bind = bind
def before_create(self, schema_item, bind, **kw):
assert self.state is None
assert schema_item is self.schema_item
assert bind is self.bind
self.state = 'before-create'
def after_create(self, schema_item, bind, **kw):
assert self.state in ('before-create', 'skipped')
assert schema_item is self.schema_item
assert bind is self.bind
self.state = 'after-create'
def before_drop(self, schema_item, bind, **kw):
assert self.state is None
assert schema_item is self.schema_item
assert bind is self.bind
self.state = 'before-drop'
def after_drop(self, schema_item, bind, **kw):
assert self.state in ('before-drop', 'skipped')
assert schema_item is self.schema_item
assert bind is self.bind
self.state = 'after-drop'
def setup(self):
self.bind = engines.mock_engine()
self.metadata = MetaData()
self.table = Table('t', self.metadata, Column('id', Integer))
def test_table_create_before(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
event.listen(table, 'before_create', canary.before_create)
table.create(bind)
assert canary.state == 'before-create'
table.drop(bind)
assert canary.state == 'before-create'
def test_table_create_after(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
event.listen(table, 'after_create', canary.after_create)
canary.state = 'skipped'
table.create(bind)
assert canary.state == 'after-create'
table.drop(bind)
assert canary.state == 'after-create'
def test_table_create_both(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
event.listen(table, 'before_create', canary.before_create)
event.listen(table, 'after_create', canary.after_create)
table.create(bind)
assert canary.state == 'after-create'
table.drop(bind)
assert canary.state == 'after-create'
def test_table_drop_before(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
event.listen(table, 'before_drop', canary.before_drop)
table.create(bind)
assert canary.state is None
table.drop(bind)
assert canary.state == 'before-drop'
def test_table_drop_after(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
event.listen(table, 'after_drop', canary.after_drop)
table.create(bind)
assert canary.state is None
canary.state = 'skipped'
table.drop(bind)
assert canary.state == 'after-drop'
def test_table_drop_both(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
event.listen(table, 'before_drop', canary.before_drop)
event.listen(table, 'after_drop', canary.after_drop)
table.create(bind)
assert canary.state is None
table.drop(bind)
assert canary.state == 'after-drop'
def test_table_all(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
event.listen(table, 'before_create', canary.before_create)
event.listen(table, 'after_create', canary.after_create)
event.listen(table, 'before_drop', canary.before_drop)
event.listen(table, 'after_drop', canary.after_drop)
assert canary.state is None
table.create(bind)
assert canary.state == 'after-create'
canary.state = None
table.drop(bind)
assert canary.state == 'after-drop'
def test_table_create_before(self):
metadata, bind = self.metadata, self.bind
canary = self.Canary(metadata, bind)
event.listen(metadata, 'before_create', canary.before_create)
metadata.create_all(bind)
assert canary.state == 'before-create'
metadata.drop_all(bind)
assert canary.state == 'before-create'
def test_metadata_create_after(self):
metadata, bind = self.metadata, self.bind
canary = self.Canary(metadata, bind)
event.listen(metadata, 'after_create', canary.after_create)
canary.state = 'skipped'
metadata.create_all(bind)
assert canary.state == 'after-create'
metadata.drop_all(bind)
assert canary.state == 'after-create'
def test_metadata_create_both(self):
metadata, bind = self.metadata, self.bind
canary = self.Canary(metadata, bind)
event.listen(metadata, 'before_create', canary.before_create)
event.listen(metadata, 'after_create', canary.after_create)
metadata.create_all(bind)
assert canary.state == 'after-create'
metadata.drop_all(bind)
assert canary.state == 'after-create'
def test_metadata_table_isolation(self):
metadata, table, bind = self.metadata, self.table, self.bind
table_canary = self.Canary(table, bind)
event.listen(table, 'before_create', table_canary.before_create)
metadata_canary = self.Canary(metadata, bind)
event.listen(metadata, 'before_create', metadata_canary.before_create)
self.table.create(self.bind)
assert metadata_canary.state == None
def test_append_listener(self):
metadata, table, bind = self.metadata, self.table, self.bind
fn = lambda *a: None
table.append_ddl_listener('before-create', fn)
assert_raises(exc.InvalidRequestError, table.append_ddl_listener,
'blah', fn)
metadata.append_ddl_listener('before-create', fn)
assert_raises(exc.InvalidRequestError, metadata.append_ddl_listener,
'blah', fn)
class DDLExecutionTest(fixtures.TestBase):
def setup(self):
self.engine = engines.mock_engine()
self.metadata = MetaData(self.engine)
self.users = Table('users', self.metadata,
Column('user_id', Integer, primary_key=True),
Column('user_name', String(40)),
)
def test_table_standalone(self):
users, engine = self.users, self.engine
event.listen(users, 'before_create', DDL('mxyzptlk'))
event.listen(users, 'after_create', DDL('klptzyxm'))
event.listen(users, 'before_drop', DDL('xyzzy'))
event.listen(users, 'after_drop', DDL('fnord'))
users.create()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' in strings
assert 'klptzyxm' in strings
assert 'xyzzy' not in strings
assert 'fnord' not in strings
del engine.mock[:]
users.drop()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' not in strings
assert 'klptzyxm' not in strings
assert 'xyzzy' in strings
assert 'fnord' in strings
def test_table_by_metadata(self):
metadata, users, engine = self.metadata, self.users, self.engine
event.listen(users, 'before_create', DDL('mxyzptlk'))
event.listen(users, 'after_create', DDL('klptzyxm'))
event.listen(users, 'before_drop', DDL('xyzzy'))
event.listen(users, 'after_drop', DDL('fnord'))
metadata.create_all()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' in strings
assert 'klptzyxm' in strings
assert 'xyzzy' not in strings
assert 'fnord' not in strings
del engine.mock[:]
metadata.drop_all()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' not in strings
assert 'klptzyxm' not in strings
assert 'xyzzy' in strings
assert 'fnord' in strings
@testing.uses_deprecated(r'See DDLEvents')
def test_table_by_metadata_deprecated(self):
metadata, users, engine = self.metadata, self.users, self.engine
DDL('mxyzptlk').execute_at('before-create', users)
DDL('klptzyxm').execute_at('after-create', users)
DDL('xyzzy').execute_at('before-drop', users)
DDL('fnord').execute_at('after-drop', users)
metadata.create_all()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' in strings
assert 'klptzyxm' in strings
assert 'xyzzy' not in strings
assert 'fnord' not in strings
del engine.mock[:]
metadata.drop_all()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' not in strings
assert 'klptzyxm' not in strings
assert 'xyzzy' in strings
assert 'fnord' in strings
def test_deprecated_append_ddl_listener_table(self):
metadata, users, engine = self.metadata, self.users, self.engine
canary = []
users.append_ddl_listener('before-create',
lambda e, t, b:canary.append('mxyzptlk')
)
users.append_ddl_listener('after-create',
lambda e, t, b:canary.append('klptzyxm')
)
users.append_ddl_listener('before-drop',
lambda e, t, b:canary.append('xyzzy')
)
users.append_ddl_listener('after-drop',
lambda e, t, b:canary.append('fnord')
)
metadata.create_all()
assert 'mxyzptlk' in canary
assert 'klptzyxm' in canary
assert 'xyzzy' not in canary
assert 'fnord' not in canary
del engine.mock[:]
canary[:] = []
metadata.drop_all()
assert 'mxyzptlk' not in canary
assert 'klptzyxm' not in canary
assert 'xyzzy' in canary
assert 'fnord' in canary
def test_deprecated_append_ddl_listener_metadata(self):
metadata, users, engine = self.metadata, self.users, self.engine
canary = []
metadata.append_ddl_listener('before-create',
lambda e, t, b, tables=None:canary.append('mxyzptlk')
)
metadata.append_ddl_listener('after-create',
lambda e, t, b, tables=None:canary.append('klptzyxm')
)
metadata.append_ddl_listener('before-drop',
lambda e, t, b, tables=None:canary.append('xyzzy')
)
metadata.append_ddl_listener('after-drop',
lambda e, t, b, tables=None:canary.append('fnord')
)
metadata.create_all()
assert 'mxyzptlk' in canary
assert 'klptzyxm' in canary
assert 'xyzzy' not in canary
assert 'fnord' not in canary
del engine.mock[:]
canary[:] = []
metadata.drop_all()
assert 'mxyzptlk' not in canary
assert 'klptzyxm' not in canary
assert 'xyzzy' in canary
assert 'fnord' in canary
def test_metadata(self):
metadata, engine = self.metadata, self.engine
event.listen(metadata, 'before_create', DDL('mxyzptlk'))
event.listen(metadata, 'after_create', DDL('klptzyxm'))
event.listen(metadata, 'before_drop', DDL('xyzzy'))
event.listen(metadata, 'after_drop', DDL('fnord'))
metadata.create_all()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' in strings
assert 'klptzyxm' in strings
assert 'xyzzy' not in strings
assert 'fnord' not in strings
del engine.mock[:]
metadata.drop_all()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' not in strings
assert 'klptzyxm' not in strings
assert 'xyzzy' in strings
assert 'fnord' in strings
@testing.uses_deprecated(r'See DDLEvents')
def test_metadata_deprecated(self):
metadata, engine = self.metadata, self.engine
DDL('mxyzptlk').execute_at('before-create', metadata)
DDL('klptzyxm').execute_at('after-create', metadata)
DDL('xyzzy').execute_at('before-drop', metadata)
DDL('fnord').execute_at('after-drop', metadata)
metadata.create_all()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' in strings
assert 'klptzyxm' in strings
assert 'xyzzy' not in strings
assert 'fnord' not in strings
del engine.mock[:]
metadata.drop_all()
strings = [str(x) for x in engine.mock]
assert 'mxyzptlk' not in strings
assert 'klptzyxm' not in strings
assert 'xyzzy' in strings
assert 'fnord' in strings
def test_conditional_constraint(self):
metadata, users, engine = self.metadata, self.users, self.engine
nonpg_mock = engines.mock_engine(dialect_name='sqlite')
pg_mock = engines.mock_engine(dialect_name='postgresql')
constraint = CheckConstraint('a < b', name='my_test_constraint'
, table=users)
# by placing the constraint in an Add/Drop construct, the
# 'inline_ddl' flag is set to False
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql'),
)
event.listen(
users,
'before_drop',
DropConstraint(constraint).execute_if(dialect='postgresql'),
)
metadata.create_all(bind=nonpg_mock)
strings = ' '.join(str(x) for x in nonpg_mock.mock)
assert 'my_test_constraint' not in strings
metadata.drop_all(bind=nonpg_mock)
strings = ' '.join(str(x) for x in nonpg_mock.mock)
assert 'my_test_constraint' not in strings
metadata.create_all(bind=pg_mock)
strings = ' '.join(str(x) for x in pg_mock.mock)
assert 'my_test_constraint' in strings
metadata.drop_all(bind=pg_mock)
strings = ' '.join(str(x) for x in pg_mock.mock)
assert 'my_test_constraint' in strings
@testing.uses_deprecated(r'See DDLEvents')
def test_conditional_constraint_deprecated(self):
metadata, users, engine = self.metadata, self.users, self.engine
nonpg_mock = engines.mock_engine(dialect_name='sqlite')
pg_mock = engines.mock_engine(dialect_name='postgresql')
constraint = CheckConstraint('a < b', name='my_test_constraint'
, table=users)
# by placing the constraint in an Add/Drop construct, the
# 'inline_ddl' flag is set to False
AddConstraint(constraint, on='postgresql'
).execute_at('after-create', users)
DropConstraint(constraint, on='postgresql'
).execute_at('before-drop', users)
metadata.create_all(bind=nonpg_mock)
strings = ' '.join(str(x) for x in nonpg_mock.mock)
assert 'my_test_constraint' not in strings
metadata.drop_all(bind=nonpg_mock)
strings = ' '.join(str(x) for x in nonpg_mock.mock)
assert 'my_test_constraint' not in strings
metadata.create_all(bind=pg_mock)
strings = ' '.join(str(x) for x in pg_mock.mock)
assert 'my_test_constraint' in strings
metadata.drop_all(bind=pg_mock)
strings = ' '.join(str(x) for x in pg_mock.mock)
assert 'my_test_constraint' in strings
def test_ddl_execute(self):
try:
engine = create_engine('sqlite:///')
except ImportError:
raise SkipTest('Requires sqlite')
cx = engine.connect()
table = self.users
ddl = DDL('SELECT 1')
for py in ('engine.execute(ddl)',
'engine.execute(ddl, table)',
'cx.execute(ddl)',
'cx.execute(ddl, table)',
'ddl.execute(engine)',
'ddl.execute(engine, table)',
'ddl.execute(cx)',
'ddl.execute(cx, table)'):
r = eval(py)
assert list(r) == [(1,)], py
for py in ('ddl.execute()',
'ddl.execute(target=table)'):
try:
r = eval(py)
assert False
except tsa.exc.UnboundExecutionError:
pass
for bind in engine, cx:
ddl.bind = bind
for py in ('ddl.execute()',
'ddl.execute(target=table)'):
r = eval(py)
assert list(r) == [(1,)], py
@testing.fails_on('postgresql+pg8000', 'pg8000 requires explicit types')
def test_platform_escape(self):
"""test the escaping of % characters in the DDL construct."""
default_from = testing.db.dialect.statement_compiler(
testing.db.dialect, None).default_from()
# We're abusing the DDL()
# construct here by pushing a SELECT through it
# so that we can verify the round trip.
# the DDL() will trigger autocommit, which prohibits
# some DBAPIs from returning results (pyodbc), so we
# run in an explicit transaction.
with testing.db.begin() as conn:
eq_(
conn.execute(
text("select 'foo%something'" + default_from)
).scalar(),
'foo%something'
)
eq_(
conn.execute(
DDL("select 'foo%%something'" + default_from)
).scalar(),
'foo%something'
)
class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
def mock_engine(self):
executor = lambda *a, **kw: None
engine = create_engine(testing.db.name + '://',
strategy='mock', executor=executor)
engine.dialect.identifier_preparer = \
tsa.sql.compiler.IdentifierPreparer(engine.dialect)
return engine
def test_tokens(self):
m = MetaData()
sane_alone = Table('t', m, Column('id', Integer))
sane_schema = Table('t', m, Column('id', Integer), schema='s')
insane_alone = Table('t t', m, Column('id', Integer))
insane_schema = Table('t t', m, Column('id', Integer),
schema='s s')
ddl = DDL('%(schema)s-%(table)s-%(fullname)s')
dialect = self.mock_engine().dialect
self.assert_compile(ddl.against(sane_alone), '-t-t',
dialect=dialect)
self.assert_compile(ddl.against(sane_schema), 's-t-s.t',
dialect=dialect)
self.assert_compile(ddl.against(insane_alone), '-"t t"-"t t"',
dialect=dialect)
self.assert_compile(ddl.against(insane_schema),
'"s s"-"t t"-"s s"."t t"', dialect=dialect)
# overrides are used piece-meal and verbatim.
ddl = DDL('%(schema)s-%(table)s-%(fullname)s-%(bonus)s',
context={'schema': 'S S', 'table': 'T T', 'bonus': 'b'
})
self.assert_compile(ddl.against(sane_alone), 'S S-T T-t-b',
dialect=dialect)
self.assert_compile(ddl.against(sane_schema), 'S S-T T-s.t-b',
dialect=dialect)
self.assert_compile(ddl.against(insane_alone), 'S S-T T-"t t"-b'
, dialect=dialect)
self.assert_compile(ddl.against(insane_schema),
'S S-T T-"s s"."t t"-b', dialect=dialect)
def test_filter(self):
cx = self.mock_engine()
tbl = Table('t', MetaData(), Column('id', Integer))
target = cx.name
assert DDL('')._should_execute(tbl, cx)
assert DDL('').execute_if(dialect=target)._should_execute(tbl, cx)
assert not DDL('').execute_if(dialect='bogus').\
_should_execute(tbl, cx)
assert DDL('').execute_if(callable_=lambda d, y,z, **kw: True).\
_should_execute(tbl, cx)
assert(DDL('').execute_if(
callable_=lambda d, y,z, **kw: z.engine.name
!= 'bogus').
_should_execute(tbl, cx))
@testing.uses_deprecated(r'See DDLEvents')
def test_filter_deprecated(self):
cx = self.mock_engine()
tbl = Table('t', MetaData(), Column('id', Integer))
target = cx.name
assert DDL('')._should_execute_deprecated('x', tbl, cx)
assert DDL('', on=target)._should_execute_deprecated('x', tbl, cx)
assert not DDL('', on='bogus').\
_should_execute_deprecated('x', tbl, cx)
assert DDL('', on=lambda d, x,y,z: True).\
_should_execute_deprecated('x', tbl, cx)
assert(DDL('', on=lambda d, x,y,z: z.engine.name != 'bogus').
_should_execute_deprecated('x', tbl, cx))
def test_repr(self):
assert repr(DDL('s'))
assert repr(DDL('s', on='engine'))
assert repr(DDL('s', on=lambda x: 1))
assert repr(DDL('s', context={'a':1}))
assert repr(DDL('s', on='engine', context={'a':1}))
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/raw/GLES1/APPLE/framebuffer_multisample.py | 8 | 1296 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES1 import _types as _cs
# End users want this...
from OpenGL.raw.GLES1._types import *
from OpenGL.raw.GLES1 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES1_APPLE_framebuffer_multisample'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES1,'GLES1_APPLE_framebuffer_multisample',error_checker=_errors._error_checker)
GL_DRAW_FRAMEBUFFER_APPLE=_C('GL_DRAW_FRAMEBUFFER_APPLE',0x8CA9)
GL_DRAW_FRAMEBUFFER_BINDING_APPLE=_C('GL_DRAW_FRAMEBUFFER_BINDING_APPLE',0x8CA6)
GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_APPLE=_C('GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_APPLE',0x8D56)
GL_MAX_SAMPLES_APPLE=_C('GL_MAX_SAMPLES_APPLE',0x8D57)
GL_READ_FRAMEBUFFER_APPLE=_C('GL_READ_FRAMEBUFFER_APPLE',0x8CA8)
GL_READ_FRAMEBUFFER_BINDING_APPLE=_C('GL_READ_FRAMEBUFFER_BINDING_APPLE',0x8CAA)
GL_RENDERBUFFER_SAMPLES_APPLE=_C('GL_RENDERBUFFER_SAMPLES_APPLE',0x8CAB)
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei)
def glRenderbufferStorageMultisampleAPPLE(target,samples,internalformat,width,height):pass
@_f
@_p.types(None,)
def glResolveMultisampleFramebufferAPPLE():pass
| gpl-3.0 |
mboeru/maraschino | lib/sqlalchemy/interfaces.py | 22 | 10834 | # sqlalchemy/interfaces.py
# Copyright (C) 2007-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2007 Jason Kirtland jek@discorporate.us
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Interfaces and abstract types.
This module is **deprecated** and is superseded by the
event system.
"""
from sqlalchemy import event, util
class PoolListener(object):
"""Hooks into the lifecycle of connections in a :class:`.Pool`.
.. note:: :class:`.PoolListener` is deprecated. Please
refer to :class:`.PoolEvents`.
Usage::
class MyListener(PoolListener):
def connect(self, dbapi_con, con_record):
'''perform connect operations'''
# etc.
# create a new pool with a listener
p = QueuePool(..., listeners=[MyListener()])
# add a listener after the fact
p.add_listener(MyListener())
# usage with create_engine()
e = create_engine("url://", listeners=[MyListener()])
All of the standard connection :class:`~sqlalchemy.pool.Pool` types can
accept event listeners for key connection lifecycle events:
creation, pool check-out and check-in. There are no events fired
when a connection closes.
For any given DB-API connection, there will be one ``connect``
event, `n` number of ``checkout`` events, and either `n` or `n - 1`
``checkin`` events. (If a ``Connection`` is detached from its
pool via the ``detach()`` method, it won't be checked back in.)
These are low-level events for low-level objects: raw Python
DB-API connections, without the conveniences of the SQLAlchemy
``Connection`` wrapper, ``Dialect`` services or ``ClauseElement``
execution. If you execute SQL through the connection, explicitly
closing all cursors and other resources is recommended.
Events also receive a ``_ConnectionRecord``, a long-lived internal
``Pool`` object that basically represents a "slot" in the
connection pool. ``_ConnectionRecord`` objects have one public
attribute of note: ``info``, a dictionary whose contents are
scoped to the lifetime of the DB-API connection managed by the
record. You can use this shared storage area however you like.
There is no need to subclass ``PoolListener`` to handle events.
Any class that implements one or more of these methods can be used
as a pool listener. The ``Pool`` will inspect the methods
provided by a listener object and add the listener to one or more
internal event queues based on its capabilities. In terms of
efficiency and function call overhead, you're much better off only
providing implementations for the hooks you'll be using.
"""
@classmethod
def _adapt_listener(cls, self, listener):
"""Adapt a :class:`.PoolListener` to individual
:class:`event.Dispatch` events.
"""
listener = util.as_interface(listener, methods=('connect',
'first_connect', 'checkout', 'checkin'))
if hasattr(listener, 'connect'):
event.listen(self, 'connect', listener.connect)
if hasattr(listener, 'first_connect'):
event.listen(self, 'first_connect', listener.first_connect)
if hasattr(listener, 'checkout'):
event.listen(self, 'checkout', listener.checkout)
if hasattr(listener, 'checkin'):
event.listen(self, 'checkin', listener.checkin)
def connect(self, dbapi_con, con_record):
"""Called once for each new DB-API connection or Pool's ``creator()``.
dbapi_con
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
con_record
The ``_ConnectionRecord`` that persistently manages the connection
"""
def first_connect(self, dbapi_con, con_record):
"""Called exactly once for the first DB-API connection.
dbapi_con
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
con_record
The ``_ConnectionRecord`` that persistently manages the connection
"""
def checkout(self, dbapi_con, con_record, con_proxy):
"""Called when a connection is retrieved from the Pool.
dbapi_con
A raw DB-API connection
con_record
The ``_ConnectionRecord`` that persistently manages the connection
con_proxy
The ``_ConnectionFairy`` which manages the connection for the span of
the current checkout.
If you raise an ``exc.DisconnectionError``, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
"""
def checkin(self, dbapi_con, con_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
dbapi_con
A raw DB-API connection
con_record
The ``_ConnectionRecord`` that persistently manages the connection
"""
class ConnectionProxy(object):
"""Allows interception of statement execution by Connections.
.. note:: :class:`.ConnectionProxy` is deprecated. Please
refer to :class:`.ConnectionEvents`.
Either or both of the ``execute()`` and ``cursor_execute()``
may be implemented to intercept compiled statement and
cursor level executions, e.g.::
class MyProxy(ConnectionProxy):
def execute(self, conn, execute, clauseelement, *multiparams, **params):
print "compiled statement:", clauseelement
return execute(clauseelement, *multiparams, **params)
def cursor_execute(self, execute, cursor, statement, parameters, context, executemany):
print "raw statement:", statement
return execute(cursor, statement, parameters, context)
The ``execute`` argument is a function that will fulfill the default
execution behavior for the operation. The signature illustrated
in the example should be used.
The proxy is installed into an :class:`~sqlalchemy.engine.Engine` via
the ``proxy`` argument::
e = create_engine('someurl://', proxy=MyProxy())
"""
@classmethod
def _adapt_listener(cls, self, listener):
def adapt_execute(conn, clauseelement, multiparams, params):
def execute_wrapper(clauseelement, *multiparams, **params):
return clauseelement, multiparams, params
return listener.execute(conn, execute_wrapper,
clauseelement, *multiparams,
**params)
event.listen(self, 'before_execute', adapt_execute)
def adapt_cursor_execute(conn, cursor, statement,
parameters,context, executemany, ):
def execute_wrapper(
cursor,
statement,
parameters,
context,
):
return statement, parameters
return listener.cursor_execute(
execute_wrapper,
cursor,
statement,
parameters,
context,
executemany,
)
event.listen(self, 'before_cursor_execute', adapt_cursor_execute)
def do_nothing_callback(*arg, **kw):
pass
def adapt_listener(fn):
def go(conn, *arg, **kw):
fn(conn, do_nothing_callback, *arg, **kw)
return util.update_wrapper(go, fn)
event.listen(self, 'begin', adapt_listener(listener.begin))
event.listen(self, 'rollback',
adapt_listener(listener.rollback))
event.listen(self, 'commit', adapt_listener(listener.commit))
event.listen(self, 'savepoint',
adapt_listener(listener.savepoint))
event.listen(self, 'rollback_savepoint',
adapt_listener(listener.rollback_savepoint))
event.listen(self, 'release_savepoint',
adapt_listener(listener.release_savepoint))
event.listen(self, 'begin_twophase',
adapt_listener(listener.begin_twophase))
event.listen(self, 'prepare_twophase',
adapt_listener(listener.prepare_twophase))
event.listen(self, 'rollback_twophase',
adapt_listener(listener.rollback_twophase))
event.listen(self, 'commit_twophase',
adapt_listener(listener.commit_twophase))
def execute(self, conn, execute, clauseelement, *multiparams, **params):
"""Intercept high level execute() events."""
return execute(clauseelement, *multiparams, **params)
def cursor_execute(self, execute, cursor, statement, parameters, context, executemany):
"""Intercept low-level cursor execute() events."""
return execute(cursor, statement, parameters, context)
def begin(self, conn, begin):
"""Intercept begin() events."""
return begin()
def rollback(self, conn, rollback):
"""Intercept rollback() events."""
return rollback()
def commit(self, conn, commit):
"""Intercept commit() events."""
return commit()
def savepoint(self, conn, savepoint, name=None):
"""Intercept savepoint() events."""
return savepoint(name=name)
def rollback_savepoint(self, conn, rollback_savepoint, name, context):
"""Intercept rollback_savepoint() events."""
return rollback_savepoint(name, context)
def release_savepoint(self, conn, release_savepoint, name, context):
"""Intercept release_savepoint() events."""
return release_savepoint(name, context)
def begin_twophase(self, conn, begin_twophase, xid):
"""Intercept begin_twophase() events."""
return begin_twophase(xid)
def prepare_twophase(self, conn, prepare_twophase, xid):
"""Intercept prepare_twophase() events."""
return prepare_twophase(xid)
def rollback_twophase(self, conn, rollback_twophase, xid, is_prepared):
"""Intercept rollback_twophase() events."""
return rollback_twophase(xid, is_prepared)
def commit_twophase(self, conn, commit_twophase, xid, is_prepared):
"""Intercept commit_twophase() events."""
return commit_twophase(xid, is_prepared)
| mit |
cevaris/pants | src/python/pants/util/objects.py | 10 | 1821 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import OrderedDict, namedtuple
def datatype(*args, **kwargs):
"""A wrapper for `namedtuple` that accounts for the type of the object in equality.
"""
class DataType(namedtuple(*args, **kwargs)):
__slots__ = ()
def __eq__(self, other):
if self is other:
return True
# Compare types and fields.
if type(self) != type(other):
return False
# Explicitly return super.__eq__'s value in case super returns NotImplemented
return super(DataType, self).__eq__(other)
def __ne__(self, other):
return not (self == other)
# NB: As datatype is not iterable, we need to override both __iter__ and all of the
# namedtuple methods that expect self to be iterable.
def __iter__(self):
raise TypeError("'{}' object is not iterable".format(type(self).__name__))
def _asdict(self):
'''Return a new OrderedDict which maps field names to their values'''
return OrderedDict(zip(self._fields, super(DataType, self).__iter__()))
def _replace(_self, **kwds):
'''Return a new datatype object replacing specified fields with new values'''
result = _self._make(map(kwds.pop, _self._fields, super(DataType, _self).__iter__()))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'''Return self as a plain tuple. Used by copy and pickle.'''
return tuple(super(DataType, self).__iter__())
return DataType
| apache-2.0 |
40223134/w16b_test | static/Brython3.1.3-20150514-095342/Lib/_testcapi.py | 742 | 4231 |
CHAR_MAX = 127
CHAR_MIN = -128
DBL_MAX = 1.7976931348623157e+308
DBL_MIN = 2.2250738585072014e-308
FLT_MAX = 3.4028234663852886e+38
FLT_MIN = 1.1754943508222875e-38
INT_MAX = 2147483647
INT_MIN = -2147483648
LLONG_MAX = 9223372036854775807
LLONG_MIN = -9223372036854775808
LONG_MAX = 2147483647
LONG_MIN = -2147483648
PY_SSIZE_T_MAX = 2147483647
PY_SSIZE_T_MIN = -2147483648
SHRT_MAX = 32767
SHRT_MIN = -32768
SIZEOF_PYGC_HEAD = 16
UCHAR_MAX = 255
UINT_MAX = 4294967295
ULLONG_MAX = 18446744073709551615
ULONG_MAX = 4294967295
USHRT_MAX = 65535
__loader__ = "<_frozen_importlib.ExtensionFileLoader object at 0x00C98DD0>"
def _pending_threadfunc(*args,**kw):
pass
class _test_structmembersType(object):
pass
def _test_thread_state(*args,**kw):
pass
def argparsing(*args,**kw):
pass
def code_newempty(*args,**kw):
pass
def codec_incrementaldecoder(*args,**kw):
pass
def codec_incrementalencoder(*args,**kw):
pass
def crash_no_current_thread(*args,**kw):
pass
class error(Exception):
pass
def exception_print(*args,**kw):
pass
def getargs_B(*args,**kw):
pass
def getargs_H(*args,**kw):
pass
def getargs_I(*args,**kw):
pass
def getargs_K(*args,**kw):
pass
def getargs_L(*args,**kw):
pass
def getargs_Z(*args,**kw):
pass
def getargs_Z_hash(*args,**kw):
pass
def getargs_b(*args,**kw):
pass
def getargs_c(*args,**kw):
pass
def getargs_h(*args,**kw):
pass
def getargs_i(*args,**kw):
pass
def getargs_k(*args,**kw):
pass
def getargs_keyword_only(*args,**kw):
pass
def getargs_keywords(*args,**kw):
pass
def getargs_l(*args,**kw):
pass
def getargs_n(*args,**kw):
pass
def getargs_p(*args,**kw):
pass
def getargs_s(*args,**kw):
pass
def getargs_s_hash(*args,**kw):
pass
def getargs_s_star(*args,**kw):
pass
def getargs_tuple(*args,**kw):
pass
def getargs_u(*args,**kw):
pass
def getargs_u_hash(*args,**kw):
pass
def getargs_w_star(*args,**kw):
pass
def getargs_y(*args,**kw):
pass
def getargs_y_hash(*args,**kw):
pass
def getargs_y_star(*args,**kw):
pass
def getargs_z(*args,**kw):
pass
def getargs_z_hash(*args,**kw):
pass
def getargs_z_star(*args,**kw):
pass
class instancemethod(object):
pass
def make_exception_with_doc(*args,**kw):
pass
def make_memoryview_from_NULL_pointer(*args,**kw):
pass
def parse_tuple_and_keywords(*args,**kw):
pass
def pytime_object_to_time_t(*args,**kw):
pass
def pytime_object_to_timespec(*args,**kw):
pass
def pytime_object_to_timeval(*args,**kw):
pass
def raise_exception(*args,**kw):
pass
def raise_memoryerror(*args,**kw):
pass
def run_in_subinterp(*args,**kw):
pass
def set_exc_info(*args,**kw):
pass
def test_L_code(*args,**kw):
pass
def test_Z_code(*args,**kw):
pass
def test_capsule(*args,**kw):
pass
def test_config(*args,**kw):
pass
def test_datetime_capi(*args,**kw):
pass
def test_dict_iteration(*args,**kw):
pass
def test_empty_argparse(*args,**kw):
pass
def test_k_code(*args,**kw):
pass
def test_lazy_hash_inheritance(*args,**kw):
pass
def test_list_api(*args,**kw):
pass
def test_long_and_overflow(*args,**kw):
pass
def test_long_api(*args,**kw):
pass
def test_long_as_double(*args,**kw):
pass
def test_long_as_size_t(*args,**kw):
pass
def test_long_long_and_overflow(*args,**kw):
pass
def test_long_numbits(*args,**kw):
pass
def test_longlong_api(*args,**kw):
pass
def test_null_strings(*args,**kw):
pass
def test_s_code(*args,**kw):
pass
def test_string_from_format(*args,**kw):
pass
def test_string_to_double(*args,**kw):
pass
def test_u_code(*args,**kw):
pass
def test_unicode_compare_with_ascii(*args,**kw):
pass
def test_widechar(*args,**kw):
pass
def test_with_docstring(*args,**kw):
"""This is a pretty normal docstring."""
pass
def traceback_print(*args,**kw):
pass
def unicode_aswidechar(*args,**kw):
pass
def unicode_aswidecharstring(*args,**kw):
pass
def unicode_encodedecimal(*args,**kw):
pass
def unicode_transformdecimaltoascii(*args,**kw):
pass
| agpl-3.0 |
ammaradil/fibonacci | Lib/site-packages/pip/_vendor/ipaddress.py | 206 | 72089 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
__version__ = '1.0.7'
import struct
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
_compat_bytes_to_byte_vals = lambda byt: byt
else:
_compat_bytes_to_byte_vals = (lambda byt:
[struct.unpack(b'!B', b)[0] for b in byt])
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
_compat_bit_length = lambda i: i.bit_length()
else:
_compat_bit_length = lambda i: len(bin(abs(i))) - 2
def _compat_range(start, end):
i = start
while i < end:
yield i
i += 1
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except:
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except:
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Returns:
A tuple containing the first and last IP addresses in the sequence.
"""
first = last = addresses[0]
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
else:
break
return (first, last)
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) & 1:
return i
# All bits of interest were zero, even if there are more in the number
return bits
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" %
(first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip('%s/%d' % (first, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
first = first.__class__(first_int)
def _collapse_addresses_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_recursive([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
while True:
last_addr = None
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
last_addr = cur_addr
ret_array.append(cur_addr)
elif (cur_addr.network_address >= last_addr.network_address and
cur_addr.broadcast_address <= last_addr.broadcast_address):
optimized = True
elif cur_addr == list(last_addr.supernet().subnets())[1]:
ret_array[-1] = last_addr = last_addr.supernet()
optimized = True
else:
last_addr = cur_addr
ret_array.append(cur_addr)
addresses = ret_array
if not optimized:
return addresses
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last) = _find_address_range(ips[i:])
i = ips.index(last) + 1
addrs.extend(summarize_address_range(first, last))
return iter(_collapse_addresses_recursive(sorted(
addrs + nets, key=_BaseNetwork._get_networks_key)))
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _TotalOrderingMixin(object):
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = ("%r (len %d != %d) is not permitted as an IPv%d address "
"(did you pass in a bytes instead of a unicode object?)")
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if prefixlen is None:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int, mask=32):
"""Return prefix length from the decimal netmask.
Args:
ip_int: An integer, the IP address.
mask: The netmask. Defaults to 32.
Returns:
An integer, the prefix length.
"""
return mask - _count_righthand_zero_bits(ip_int, mask)
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
def __init__(self, address):
if (not isinstance(address, bytes) and '/' in _compat_str(address)):
raise AddressValueError("Unexpected '/' in %r" % address)
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError
return self._address_class(broadcast + n)
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
addr1.address_exclude(addr2) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
addr1.address_exclude(addr2) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of difffering address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
raise StopIteration
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if (other.network_address >= s1.network_address and
other.broadcast_address <= s1.broadcast_address):
yield s2
s1, s2 = s1.subnets()
elif (other.network_address >= s2.network_address and
other.broadcast_address <= s2.broadcast_address):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
first = self.__class__('%s/%s' %
(self.network_address,
self._prefixlen + prefixlen_diff))
yield first
current = first
while True:
broadcast = current.broadcast_address
if broadcast == self.broadcast_address:
return
new_addr = self._address_class(int(broadcast) + 1)
current = self.__class__('%s/%s' % (new_addr,
new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
if self.prefixlen - prefixlen_diff < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
# TODO (pmoody): optimize this.
t = self.__class__('%s/%d' % (self.network_address,
self.prefixlen - prefixlen_diff),
strict=False)
return t.__class__('%s/%d' % (t.network_address, t.prefixlen))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset((255, 254, 252, 248, 240, 224, 192, 128, 0))
def __init__(self, address):
self._version = 4
self._max_prefixlen = IPV4LENGTH
def _explode_shorthand_ip_string(self):
return _compat_str(self)
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
bvs = map(self._parse_octet, octets)
return _compat_int_from_byte_vals(bvs, 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
def _parse_octet(self, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
try:
for x in mask:
if int(x) not in self._valid_mask_octets:
return False
except ValueError:
# Found something that isn't an integer or isn't valid
return False
for idx, y in enumerate(mask):
if idx > 0 and y > mask[idx - 1]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= self._max_prefixlen
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
_BaseAddress.__init__(self, address)
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
reserved_network = IPv4Network('240.0.0.0/4')
return self in reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
private_10 = IPv4Network('10.0.0.0/8')
private_172 = IPv4Network('172.16.0.0/12')
private_192 = IPv4Network('192.168.0.0/16')
return (self in private_10 or
self in private_172 or
self in private_192)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
multicast_network = IPv4Network('224.0.0.0/4')
return self in multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
unspecified_address = IPv4Address('0.0.0.0')
return self == unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
loopback_network = IPv4Network('127.0.0.0/8')
return self in loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
linklocal_network = IPv4Network('169.254.0.0/16')
return self in linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes,) + _compat_int_types):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionaly equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseV4.__init__(self, address)
_BaseNetwork.__init__(self, address)
# Constructing from a packed address
if isinstance(address, bytes):
self.network_address = IPv4Address(address)
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ALL_ONES)
#fixme: address/network test here
return
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self.network_address = IPv4Address(address)
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ALL_ONES)
#fixme: address/network test here.
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
mask = addr[1].split('.')
if len(mask) == 4:
# We have dotted decimal netmask.
if self._is_valid_netmask(addr[1]):
self.netmask = IPv4Address(self._ip_int_from_string(
addr[1]))
elif self._is_hostmask(addr[1]):
self.netmask = IPv4Address(
self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
else:
raise NetmaskValueError('%r is not a valid netmask'
% addr[1])
self._prefixlen = self._prefix_from_ip_int(int(self.netmask))
else:
# We have a netmask in prefix length form.
if not self._is_valid_netmask(addr[1]):
raise NetmaskValueError('%r is not a valid netmask'
% addr[1])
self._prefixlen = int(addr[1])
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
def __init__(self, address):
self._version = 6
self._max_prefixlen = IPV6LENGTH
def _ip_int_from_string(self, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = self._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = ("At most %d colons permitted in %r" %
(_max_parts - 1, ip_str))
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (self._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != self._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (self._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
_BaseAddress.__init__(self, address)
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
multicast_network = IPv6Network('ff00::/8')
return self in multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
reserved_nets = [IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9')]
return any(self in x for x in reserved_nets)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
linklocal_network = IPv6Network('fe80::/10')
return self in linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
sitelocal_network = IPv6Network('fec0::/10')
return self in sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
private_network = IPv6Network('fc00::/7')
return self in private_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseV6.__init__(self, address)
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self.network_address = IPv6Address(address)
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ALL_ONES)
return
# Constructing from a packed address
if isinstance(address, bytes):
self.network_address = IPv6Address(address)
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
if self._is_valid_netmask(addr[1]):
self._prefixlen = int(addr[1])
else:
raise NetmaskValueError('%r is not a valid netmask'
% addr[1])
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def _is_valid_netmask(self, prefixlen):
"""Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask.
"""
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= self._max_prefixlen
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
| mit |
Caylo/easybuild-framework | easybuild/toolchains/linalg/libsci.py | 1 | 3408 | ##
# Copyright 2014-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for Cray's LibSci library, which provides BLAS/LAPACK support.
cfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/
:author: Petar Forai (IMP/IMBA, Austria)
:author: Kenneth Hoste (Ghent University)
"""
import os
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.toolchain.linalg import LinAlg
CRAY_LIBSCI_MODULE_NAME = 'cray-libsci'
class LibSci(LinAlg):
"""Support for Cray's LibSci library, which provides BLAS/LAPACK support."""
# BLAS/LAPACK support
# via cray-libsci module, which gets loaded via the PrgEnv module
# see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/
BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]
# no need to specify libraries, compiler driver takes care of linking the right libraries
# FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper.
BLAS_LIB = []
BLAS_LIB_MT = []
LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]
LAPACK_IS_BLAS = True
BLACS_MODULE_NAME = []
SCALAPACK_MODULE_NAME = []
def _get_software_root(self, name):
"""Get install prefix for specified software name; special treatment for Cray modules."""
if name == 'cray-libsci':
# Cray-provided LibSci module
env_var = 'CRAY_LIBSCI_PREFIX_DIR'
root = os.getenv(env_var, None)
if root is None:
raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_var)
else:
self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root)
else:
root = super(LibSci, self)._get_software_root(name)
return root
def _set_blacs_variables(self):
"""Skip setting BLACS related variables"""
pass
def _set_scalapack_variables(self):
"""Skip setting ScaLAPACK related variables"""
pass
def definition(self):
"""
Filter BLAS module from toolchain definition.
The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module,
and thus is not a direct toolchain component.
"""
tc_def = super(LibSci, self).definition()
tc_def['BLAS'] = []
tc_def['LAPACK'] = []
return tc_def
| gpl-2.0 |
RedstonerServer/redstoner-utils | imout.py | 2 | 1138 | from helpers import *
from adminchat import *
imout_toggle_list = []
@hook.command("imout")
def on_imout_command(sender, command, label, args):
if not is_player(sender):
msg(sender, "&cThis command can't be run from the console")
return True
if sender.hasPermission("utils.imout"):
name = sender.getName()
symbol = "&a&l+"
if name in imout_toggle_list:
msg(sender, "&eWelcome back! You are no longer hidden")
msg(sender, "&6We disabled /act for you!")
runas(sender, "vanish off")
if name in imout_toggle_list:
imout_toggle_list.remove(name)
if name in ac_toggle_list:
ac_toggle_list.remove(name)
else:
symbol = "&c&l-"
msg(sender, "&eYou just left... Or didn't you?")
imout_toggle_list.append(name)
runas(sender, "vanish on")
if name not in ac_toggle_list:
msg(sender, "&6We enabled /act for you!")
ac_toggle_list.append(name)
broadcast(None, "%s &7%s" % (symbol, name))
return True
| mit |
abg1979/git | contrib/hooks/multimail/git_multimail.py | 186 | 110172 | #! /usr/bin/env python2
# Copyright (c) 2015 Matthieu Moy and others
# Copyright (c) 2012-2014 Michael Haggerty and others
# Derived from contrib/hooks/post-receive-email, which is
# Copyright (c) 2007 Andy Parkins
# and also includes contributions by other authors.
#
# This file is part of git-multimail.
#
# git-multimail is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License version
# 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
"""Generate notification emails for pushes to a git repository.
This hook sends emails describing changes introduced by pushes to a
git repository. For each reference that was changed, it emits one
ReferenceChange email summarizing how the reference was changed,
followed by one Revision email for each new commit that was introduced
by the reference change.
Each commit is announced in exactly one Revision email. If the same
commit is merged into another branch in the same or a later push, then
the ReferenceChange email will list the commit's SHA1 and its one-line
summary, but no new Revision email will be generated.
This script is designed to be used as a "post-receive" hook in a git
repository (see githooks(5)). It can also be used as an "update"
script, but this usage is not completely reliable and is deprecated.
To help with debugging, this script accepts a --stdout option, which
causes the emails to be written to standard output rather than sent
using sendmail.
See the accompanying README file for the complete documentation.
"""
import sys
import os
import re
import bisect
import socket
import subprocess
import shlex
import optparse
import smtplib
import time
try:
from email.utils import make_msgid
from email.utils import getaddresses
from email.utils import formataddr
from email.utils import formatdate
from email.header import Header
except ImportError:
# Prior to Python 2.5, the email module used different names:
from email.Utils import make_msgid
from email.Utils import getaddresses
from email.Utils import formataddr
from email.Utils import formatdate
from email.Header import Header
DEBUG = False
ZEROS = '0' * 40
LOGBEGIN = '- Log -----------------------------------------------------------------\n'
LOGEND = '-----------------------------------------------------------------------\n'
ADDR_HEADERS = set(['from', 'to', 'cc', 'bcc', 'reply-to', 'sender'])
# It is assumed in many places that the encoding is uniformly UTF-8,
# so changing these constants is unsupported. But define them here
# anyway, to make it easier to find (at least most of) the places
# where the encoding is important.
(ENCODING, CHARSET) = ('UTF-8', 'utf-8')
REF_CREATED_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s created'
' (now %(newrev_short)s)'
)
REF_UPDATED_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s updated'
' (%(oldrev_short)s -> %(newrev_short)s)'
)
REF_DELETED_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s deleted'
' (was %(oldrev_short)s)'
)
COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s updated: %(oneline)s'
)
REFCHANGE_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Subject: %(subject)s
MIME-Version: 1.0
Content-Type: text/plain; charset=%(charset)s
Content-Transfer-Encoding: 8bit
Message-ID: %(msgid)s
From: %(fromaddr)s
Reply-To: %(reply_to)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Oldrev: %(oldrev)s
X-Git-Newrev: %(newrev)s
Auto-Submitted: auto-generated
"""
REFCHANGE_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a change to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
FOOTER_TEMPLATE = """\
-- \n\
To stop receiving notification emails like this one, please contact
%(administrator)s.
"""
REWIND_ONLY_TEMPLATE = """\
This update removed existing revisions from the reference, leaving the
reference pointing at a previous point in the repository history.
* -- * -- N %(refname)s (%(newrev_short)s)
\\
O -- O -- O (%(oldrev_short)s)
Any revisions marked "omits" are not gone; other references still
refer to them. Any revisions marked "discards" are gone forever.
"""
NON_FF_TEMPLATE = """\
This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
%(refname_type)s are not in the new version. This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:
* -- * -- B -- O -- O -- O (%(oldrev_short)s)
\\
N -- N -- N %(refname)s (%(newrev_short)s)
You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.
Any revisions marked "omits" are not gone; other references still
refer to them. Any revisions marked "discards" are gone forever.
"""
NO_NEW_REVISIONS_TEMPLATE = """\
No new revisions were added by this update.
"""
DISCARDED_REVISIONS_TEMPLATE = """\
This change permanently discards the following revisions:
"""
NO_DISCARDED_REVISIONS_TEMPLATE = """\
The revisions that were on this %(refname_type)s are still contained in
other references; therefore, this change does not discard any commits
from the repository.
"""
NEW_REVISIONS_TEMPLATE = """\
The %(tot)s revisions listed above as "new" are entirely new to this
repository and will be described in separate emails. The revisions
listed as "adds" were already present in the repository and have only
been added to this reference.
"""
TAG_CREATED_TEMPLATE = """\
at %(newrev_short)-9s (%(newrev_type)s)
"""
TAG_UPDATED_TEMPLATE = """\
*** WARNING: tag %(short_refname)s was modified! ***
from %(oldrev_short)-9s (%(oldrev_type)s)
to %(newrev_short)-9s (%(newrev_type)s)
"""
TAG_DELETED_TEMPLATE = """\
*** WARNING: tag %(short_refname)s was deleted! ***
"""
# The template used in summary tables. It looks best if this uses the
# same alignment as TAG_CREATED_TEMPLATE and TAG_UPDATED_TEMPLATE.
BRIEF_SUMMARY_TEMPLATE = """\
%(action)10s %(rev_short)-9s %(text)s
"""
NON_COMMIT_UPDATE_TEMPLATE = """\
This is an unusual reference change because the reference did not
refer to a commit either before or after the change. We do not know
how to provide full information about this reference change.
"""
REVISION_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Cc: %(cc_recipients)s
Subject: %(emailprefix)s%(num)02d/%(tot)02d: %(oneline)s
MIME-Version: 1.0
Content-Type: text/plain; charset=%(charset)s
Content-Transfer-Encoding: 8bit
From: %(fromaddr)s
Reply-To: %(reply_to)s
In-Reply-To: %(reply_to_msgid)s
References: %(reply_to_msgid)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Rev: %(rev)s
Auto-Submitted: auto-generated
"""
REVISION_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
REVISION_FOOTER_TEMPLATE = FOOTER_TEMPLATE
# Combined, meaning refchange+revision email (for single-commit additions)
COMBINED_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Subject: %(subject)s
MIME-Version: 1.0
Content-Type: text/plain; charset=%(charset)s
Content-Transfer-Encoding: 8bit
Message-ID: %(msgid)s
From: %(fromaddr)s
Reply-To: %(reply_to)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Oldrev: %(oldrev)s
X-Git-Newrev: %(newrev)s
X-Git-Rev: %(rev)s
Auto-Submitted: auto-generated
"""
COMBINED_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
COMBINED_FOOTER_TEMPLATE = FOOTER_TEMPLATE
class CommandError(Exception):
def __init__(self, cmd, retcode):
self.cmd = cmd
self.retcode = retcode
Exception.__init__(
self,
'Command "%s" failed with retcode %s' % (' '.join(cmd), retcode,)
)
class ConfigurationException(Exception):
pass
# The "git" program (this could be changed to include a full path):
GIT_EXECUTABLE = 'git'
# How "git" should be invoked (including global arguments), as a list
# of words. This variable is usually initialized automatically by
# read_git_output() via choose_git_command(), but if a value is set
# here then it will be used unconditionally.
GIT_CMD = None
def choose_git_command():
"""Decide how to invoke git, and record the choice in GIT_CMD."""
global GIT_CMD
if GIT_CMD is None:
try:
# Check to see whether the "-c" option is accepted (it was
# only added in Git 1.7.2). We don't actually use the
# output of "git --version", though if we needed more
# specific version information this would be the place to
# do it.
cmd = [GIT_EXECUTABLE, '-c', 'foo.bar=baz', '--version']
read_output(cmd)
GIT_CMD = [GIT_EXECUTABLE, '-c', 'i18n.logoutputencoding=%s' % (ENCODING,)]
except CommandError:
GIT_CMD = [GIT_EXECUTABLE]
def read_git_output(args, input=None, keepends=False, **kw):
"""Read the output of a Git command."""
if GIT_CMD is None:
choose_git_command()
return read_output(GIT_CMD + args, input=input, keepends=keepends, **kw)
def read_output(cmd, input=None, keepends=False, **kw):
if input:
stdin = subprocess.PIPE
else:
stdin = None
p = subprocess.Popen(
cmd, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw
)
(out, err) = p.communicate(input)
retcode = p.wait()
if retcode:
raise CommandError(cmd, retcode)
if not keepends:
out = out.rstrip('\n\r')
return out
def read_git_lines(args, keepends=False, **kw):
"""Return the lines output by Git command.
Return as single lines, with newlines stripped off."""
return read_git_output(args, keepends=True, **kw).splitlines(keepends)
def git_rev_list_ish(cmd, spec, args=None, **kw):
"""Common functionality for invoking a 'git rev-list'-like command.
Parameters:
* cmd is the Git command to run, e.g., 'rev-list' or 'log'.
* spec is a list of revision arguments to pass to the named
command. If None, this function returns an empty list.
* args is a list of extra arguments passed to the named command.
* All other keyword arguments (if any) are passed to the
underlying read_git_lines() function.
Return the output of the Git command in the form of a list, one
entry per output line.
"""
if spec is None:
return []
if args is None:
args = []
args = [cmd, '--stdin'] + args
spec_stdin = ''.join(s + '\n' for s in spec)
return read_git_lines(args, input=spec_stdin, **kw)
def git_rev_list(spec, **kw):
"""Run 'git rev-list' with the given list of revision arguments.
See git_rev_list_ish() for parameter and return value
documentation.
"""
return git_rev_list_ish('rev-list', spec, **kw)
def git_log(spec, **kw):
"""Run 'git log' with the given list of revision arguments.
See git_rev_list_ish() for parameter and return value
documentation.
"""
return git_rev_list_ish('log', spec, **kw)
def header_encode(text, header_name=None):
"""Encode and line-wrap the value of an email header field."""
try:
if isinstance(text, str):
text = text.decode(ENCODING, 'replace')
return Header(text, header_name=header_name).encode()
except UnicodeEncodeError:
return Header(text, header_name=header_name, charset=CHARSET,
errors='replace').encode()
def addr_header_encode(text, header_name=None):
"""Encode and line-wrap the value of an email header field containing
email addresses."""
return Header(
', '.join(
formataddr((header_encode(name), emailaddr))
for name, emailaddr in getaddresses([text])
),
header_name=header_name
).encode()
class Config(object):
def __init__(self, section, git_config=None):
"""Represent a section of the git configuration.
If git_config is specified, it is passed to "git config" in
the GIT_CONFIG environment variable, meaning that "git config"
will read the specified path rather than the Git default
config paths."""
self.section = section
if git_config:
self.env = os.environ.copy()
self.env['GIT_CONFIG'] = git_config
else:
self.env = None
@staticmethod
def _split(s):
"""Split NUL-terminated values."""
words = s.split('\0')
assert words[-1] == ''
return words[:-1]
def get(self, name, default=None):
try:
values = self._split(read_git_output(
['config', '--get', '--null', '%s.%s' % (self.section, name)],
env=self.env, keepends=True,
))
assert len(values) == 1
return values[0]
except CommandError:
return default
def get_bool(self, name, default=None):
try:
value = read_git_output(
['config', '--get', '--bool', '%s.%s' % (self.section, name)],
env=self.env,
)
except CommandError:
return default
return value == 'true'
def get_all(self, name, default=None):
"""Read a (possibly multivalued) setting from the configuration.
Return the result as a list of values, or default if the name
is unset."""
try:
return self._split(read_git_output(
['config', '--get-all', '--null', '%s.%s' % (self.section, name)],
env=self.env, keepends=True,
))
except CommandError, e:
if e.retcode == 1:
# "the section or key is invalid"; i.e., there is no
# value for the specified key.
return default
else:
raise
def get_recipients(self, name, default=None):
"""Read a recipients list from the configuration.
Return the result as a comma-separated list of email
addresses, or default if the option is unset. If the setting
has multiple values, concatenate them with comma separators."""
lines = self.get_all(name, default=None)
if lines is None:
return default
return ', '.join(line.strip() for line in lines)
def set(self, name, value):
read_git_output(
['config', '%s.%s' % (self.section, name), value],
env=self.env,
)
def add(self, name, value):
read_git_output(
['config', '--add', '%s.%s' % (self.section, name), value],
env=self.env,
)
def __contains__(self, name):
return self.get_all(name, default=None) is not None
# We don't use this method anymore internally, but keep it here in
# case somebody is calling it from their own code:
def has_key(self, name):
return name in self
def unset_all(self, name):
try:
read_git_output(
['config', '--unset-all', '%s.%s' % (self.section, name)],
env=self.env,
)
except CommandError, e:
if e.retcode == 5:
# The name doesn't exist, which is what we wanted anyway...
pass
else:
raise
def set_recipients(self, name, value):
self.unset_all(name)
for pair in getaddresses([value]):
self.add(name, formataddr(pair))
def generate_summaries(*log_args):
"""Generate a brief summary for each revision requested.
log_args are strings that will be passed directly to "git log" as
revision selectors. Iterate over (sha1_short, subject) for each
commit specified by log_args (subject is the first line of the
commit message as a string without EOLs)."""
cmd = [
'log', '--abbrev', '--format=%h %s',
] + list(log_args) + ['--']
for line in read_git_lines(cmd):
yield tuple(line.split(' ', 1))
def limit_lines(lines, max_lines):
for (index, line) in enumerate(lines):
if index < max_lines:
yield line
if index >= max_lines:
yield '... %d lines suppressed ...\n' % (index + 1 - max_lines,)
def limit_linelength(lines, max_linelength):
for line in lines:
# Don't forget that lines always include a trailing newline.
if len(line) > max_linelength + 1:
line = line[:max_linelength - 7] + ' [...]\n'
yield line
class CommitSet(object):
"""A (constant) set of object names.
The set should be initialized with full SHA1 object names. The
__contains__() method returns True iff its argument is an
abbreviation of any the names in the set."""
def __init__(self, names):
self._names = sorted(names)
def __len__(self):
return len(self._names)
def __contains__(self, sha1_abbrev):
"""Return True iff this set contains sha1_abbrev (which might be abbreviated)."""
i = bisect.bisect_left(self._names, sha1_abbrev)
return i < len(self) and self._names[i].startswith(sha1_abbrev)
class GitObject(object):
def __init__(self, sha1, type=None):
if sha1 == ZEROS:
self.sha1 = self.type = self.commit_sha1 = None
else:
self.sha1 = sha1
self.type = type or read_git_output(['cat-file', '-t', self.sha1])
if self.type == 'commit':
self.commit_sha1 = self.sha1
elif self.type == 'tag':
try:
self.commit_sha1 = read_git_output(
['rev-parse', '--verify', '%s^0' % (self.sha1,)]
)
except CommandError:
# Cannot deref tag to determine commit_sha1
self.commit_sha1 = None
else:
self.commit_sha1 = None
self.short = read_git_output(['rev-parse', '--short', sha1])
def get_summary(self):
"""Return (sha1_short, subject) for this commit."""
if not self.sha1:
raise ValueError('Empty commit has no summary')
return iter(generate_summaries('--no-walk', self.sha1)).next()
def __eq__(self, other):
return isinstance(other, GitObject) and self.sha1 == other.sha1
def __hash__(self):
return hash(self.sha1)
def __nonzero__(self):
return bool(self.sha1)
def __str__(self):
return self.sha1 or ZEROS
class Change(object):
"""A Change that has been made to the Git repository.
Abstract class from which both Revisions and ReferenceChanges are
derived. A Change knows how to generate a notification email
describing itself."""
def __init__(self, environment):
self.environment = environment
self._values = None
def _compute_values(self):
"""Return a dictionary {keyword: expansion} for this Change.
Derived classes overload this method to add more entries to
the return value. This method is used internally by
get_values(). The return value should always be a new
dictionary."""
return self.environment.get_values()
def get_values(self, **extra_values):
"""Return a dictionary {keyword: expansion} for this Change.
Return a dictionary mapping keywords to the values that they
should be expanded to for this Change (used when interpolating
template strings). If any keyword arguments are supplied, add
those to the return value as well. The return value is always
a new dictionary."""
if self._values is None:
self._values = self._compute_values()
values = self._values.copy()
if extra_values:
values.update(extra_values)
return values
def expand(self, template, **extra_values):
"""Expand template.
Expand the template (which should be a string) using string
interpolation of the values for this Change. If any keyword
arguments are provided, also include those in the keywords
available for interpolation."""
return template % self.get_values(**extra_values)
def expand_lines(self, template, **extra_values):
"""Break template into lines and expand each line."""
values = self.get_values(**extra_values)
for line in template.splitlines(True):
yield line % values
def expand_header_lines(self, template, **extra_values):
"""Break template into lines and expand each line as an RFC 2822 header.
Encode values and split up lines that are too long. Silently
skip lines that contain references to unknown variables."""
values = self.get_values(**extra_values)
for line in template.splitlines():
(name, value) = line.split(':', 1)
try:
value = value % values
except KeyError, e:
if DEBUG:
self.environment.log_warning(
'Warning: unknown variable %r in the following line; line skipped:\n'
' %s\n'
% (e.args[0], line,)
)
else:
if name.lower() in ADDR_HEADERS:
value = addr_header_encode(value, name)
else:
value = header_encode(value, name)
for splitline in ('%s: %s\n' % (name, value)).splitlines(True):
yield splitline
def generate_email_header(self):
"""Generate the RFC 2822 email headers for this Change, a line at a time.
The output should not include the trailing blank line."""
raise NotImplementedError()
def generate_email_intro(self):
"""Generate the email intro for this Change, a line at a time.
The output will be used as the standard boilerplate at the top
of the email body."""
raise NotImplementedError()
def generate_email_body(self):
"""Generate the main part of the email body, a line at a time.
The text in the body might be truncated after a specified
number of lines (see multimailhook.emailmaxlines)."""
raise NotImplementedError()
def generate_email_footer(self):
"""Generate the footer of the email, a line at a time.
The footer is always included, irrespective of
multimailhook.emailmaxlines."""
raise NotImplementedError()
def generate_email(self, push, body_filter=None, extra_header_values={}):
"""Generate an email describing this change.
Iterate over the lines (including the header lines) of an
email describing this change. If body_filter is not None,
then use it to filter the lines that are intended for the
email body.
The extra_header_values field is received as a dict and not as
**kwargs, to allow passing other keyword arguments in the
future (e.g. passing extra values to generate_email_intro()"""
for line in self.generate_email_header(**extra_header_values):
yield line
yield '\n'
for line in self.generate_email_intro():
yield line
body = self.generate_email_body(push)
if body_filter is not None:
body = body_filter(body)
for line in body:
yield line
for line in self.generate_email_footer():
yield line
class Revision(Change):
"""A Change consisting of a single git commit."""
CC_RE = re.compile(r'^\s*C[Cc]:\s*(?P<to>[^#]+@[^\s#]*)\s*(#.*)?$')
def __init__(self, reference_change, rev, num, tot):
Change.__init__(self, reference_change.environment)
self.reference_change = reference_change
self.rev = rev
self.change_type = self.reference_change.change_type
self.refname = self.reference_change.refname
self.num = num
self.tot = tot
self.author = read_git_output(['log', '--no-walk', '--format=%aN <%aE>', self.rev.sha1])
self.recipients = self.environment.get_revision_recipients(self)
self.cc_recipients = ''
if self.environment.get_scancommitforcc():
self.cc_recipients = ', '.join(to.strip() for to in self._cc_recipients())
if self.cc_recipients:
self.environment.log_msg(
'Add %s to CC for %s\n' % (self.cc_recipients, self.rev.sha1))
def _cc_recipients(self):
cc_recipients = []
message = read_git_output(['log', '--no-walk', '--format=%b', self.rev.sha1])
lines = message.strip().split('\n')
for line in lines:
m = re.match(self.CC_RE, line)
if m:
cc_recipients.append(m.group('to'))
return cc_recipients
def _compute_values(self):
values = Change._compute_values(self)
oneline = read_git_output(
['log', '--format=%s', '--no-walk', self.rev.sha1]
)
values['rev'] = self.rev.sha1
values['rev_short'] = self.rev.short
values['change_type'] = self.change_type
values['refname'] = self.refname
values['short_refname'] = self.reference_change.short_refname
values['refname_type'] = self.reference_change.refname_type
values['reply_to_msgid'] = self.reference_change.msgid
values['num'] = self.num
values['tot'] = self.tot
values['recipients'] = self.recipients
if self.cc_recipients:
values['cc_recipients'] = self.cc_recipients
values['oneline'] = oneline
values['author'] = self.author
reply_to = self.environment.get_reply_to_commit(self)
if reply_to:
values['reply_to'] = reply_to
return values
def generate_email_header(self, **extra_values):
for line in self.expand_header_lines(
REVISION_HEADER_TEMPLATE, **extra_values
):
yield line
def generate_email_intro(self):
for line in self.expand_lines(REVISION_INTRO_TEMPLATE):
yield line
def generate_email_body(self, push):
"""Show this revision."""
return read_git_lines(
['log'] + self.environment.commitlogopts + ['-1', self.rev.sha1],
keepends=True,
)
def generate_email_footer(self):
return self.expand_lines(REVISION_FOOTER_TEMPLATE)
class ReferenceChange(Change):
"""A Change to a Git reference.
An abstract class representing a create, update, or delete of a
Git reference. Derived classes handle specific types of reference
(e.g., tags vs. branches). These classes generate the main
reference change email summarizing the reference change and
whether it caused any any commits to be added or removed.
ReferenceChange objects are usually created using the static
create() method, which has the logic to decide which derived class
to instantiate."""
REF_RE = re.compile(r'^refs\/(?P<area>[^\/]+)\/(?P<shortname>.*)$')
@staticmethod
def create(environment, oldrev, newrev, refname):
"""Return a ReferenceChange object representing the change.
Return an object that represents the type of change that is being
made. oldrev and newrev should be SHA1s or ZEROS."""
old = GitObject(oldrev)
new = GitObject(newrev)
rev = new or old
# The revision type tells us what type the commit is, combined with
# the location of the ref we can decide between
# - working branch
# - tracking branch
# - unannotated tag
# - annotated tag
m = ReferenceChange.REF_RE.match(refname)
if m:
area = m.group('area')
short_refname = m.group('shortname')
else:
area = ''
short_refname = refname
if rev.type == 'tag':
# Annotated tag:
klass = AnnotatedTagChange
elif rev.type == 'commit':
if area == 'tags':
# Non-annotated tag:
klass = NonAnnotatedTagChange
elif area == 'heads':
# Branch:
klass = BranchChange
elif area == 'remotes':
# Tracking branch:
environment.log_warning(
'*** Push-update of tracking branch %r\n'
'*** - incomplete email generated.\n'
% (refname,)
)
klass = OtherReferenceChange
else:
# Some other reference namespace:
environment.log_warning(
'*** Push-update of strange reference %r\n'
'*** - incomplete email generated.\n'
% (refname,)
)
klass = OtherReferenceChange
else:
# Anything else (is there anything else?)
environment.log_warning(
'*** Unknown type of update to %r (%s)\n'
'*** - incomplete email generated.\n'
% (refname, rev.type,)
)
klass = OtherReferenceChange
return klass(
environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
def __init__(self, environment, refname, short_refname, old, new, rev):
Change.__init__(self, environment)
self.change_type = {
(False, True): 'create',
(True, True): 'update',
(True, False): 'delete',
}[bool(old), bool(new)]
self.refname = refname
self.short_refname = short_refname
self.old = old
self.new = new
self.rev = rev
self.msgid = make_msgid()
self.diffopts = environment.diffopts
self.graphopts = environment.graphopts
self.logopts = environment.logopts
self.commitlogopts = environment.commitlogopts
self.showgraph = environment.refchange_showgraph
self.showlog = environment.refchange_showlog
self.header_template = REFCHANGE_HEADER_TEMPLATE
self.intro_template = REFCHANGE_INTRO_TEMPLATE
self.footer_template = FOOTER_TEMPLATE
def _compute_values(self):
values = Change._compute_values(self)
values['change_type'] = self.change_type
values['refname_type'] = self.refname_type
values['refname'] = self.refname
values['short_refname'] = self.short_refname
values['msgid'] = self.msgid
values['recipients'] = self.recipients
values['oldrev'] = str(self.old)
values['oldrev_short'] = self.old.short
values['newrev'] = str(self.new)
values['newrev_short'] = self.new.short
if self.old:
values['oldrev_type'] = self.old.type
if self.new:
values['newrev_type'] = self.new.type
reply_to = self.environment.get_reply_to_refchange(self)
if reply_to:
values['reply_to'] = reply_to
return values
def send_single_combined_email(self, known_added_sha1s):
"""Determine if a combined refchange/revision email should be sent
If there is only a single new (non-merge) commit added by a
change, it is useful to combine the ReferenceChange and
Revision emails into one. In such a case, return the single
revision; otherwise, return None.
This method is overridden in BranchChange."""
return None
def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}):
"""Generate an email describing this change AND specified revision.
Iterate over the lines (including the header lines) of an
email describing this change. If body_filter is not None,
then use it to filter the lines that are intended for the
email body.
The extra_header_values field is received as a dict and not as
**kwargs, to allow passing other keyword arguments in the
future (e.g. passing extra values to generate_email_intro()
This method is overridden in BranchChange."""
raise NotImplementedError
def get_subject(self):
template = {
'create': REF_CREATED_SUBJECT_TEMPLATE,
'update': REF_UPDATED_SUBJECT_TEMPLATE,
'delete': REF_DELETED_SUBJECT_TEMPLATE,
}[self.change_type]
return self.expand(template)
def generate_email_header(self, **extra_values):
if 'subject' not in extra_values:
extra_values['subject'] = self.get_subject()
for line in self.expand_header_lines(
self.header_template, **extra_values
):
yield line
def generate_email_intro(self):
for line in self.expand_lines(self.intro_template):
yield line
def generate_email_body(self, push):
"""Call the appropriate body-generation routine.
Call one of generate_create_summary() /
generate_update_summary() / generate_delete_summary()."""
change_summary = {
'create': self.generate_create_summary,
'delete': self.generate_delete_summary,
'update': self.generate_update_summary,
}[self.change_type](push)
for line in change_summary:
yield line
for line in self.generate_revision_change_summary(push):
yield line
def generate_email_footer(self):
return self.expand_lines(self.footer_template)
def generate_revision_change_graph(self, push):
if self.showgraph:
args = ['--graph'] + self.graphopts
for newold in ('new', 'old'):
has_newold = False
spec = push.get_commits_spec(newold, self)
for line in git_log(spec, args=args, keepends=True):
if not has_newold:
has_newold = True
yield '\n'
yield 'Graph of %s commits:\n\n' % (
{'new': 'new', 'old': 'discarded'}[newold],)
yield ' ' + line
if has_newold:
yield '\n'
def generate_revision_change_log(self, new_commits_list):
if self.showlog:
yield '\n'
yield 'Detailed log of new commits:\n\n'
for line in read_git_lines(
['log', '--no-walk']
+ self.logopts
+ new_commits_list
+ ['--'],
keepends=True,
):
yield line
def generate_new_revision_summary(self, tot, new_commits_list, push):
for line in self.expand_lines(NEW_REVISIONS_TEMPLATE, tot=tot):
yield line
for line in self.generate_revision_change_graph(push):
yield line
for line in self.generate_revision_change_log(new_commits_list):
yield line
def generate_revision_change_summary(self, push):
"""Generate a summary of the revisions added/removed by this change."""
if self.new.commit_sha1 and not self.old.commit_sha1:
# A new reference was created. List the new revisions
# brought by the new reference (i.e., those revisions that
# were not in the repository before this reference
# change).
sha1s = list(push.get_new_commits(self))
sha1s.reverse()
tot = len(sha1s)
new_revisions = [
Revision(self, GitObject(sha1), num=i + 1, tot=tot)
for (i, sha1) in enumerate(sha1s)
]
if new_revisions:
yield self.expand('This %(refname_type)s includes the following new commits:\n')
yield '\n'
for r in new_revisions:
(sha1, subject) = r.rev.get_summary()
yield r.expand(
BRIEF_SUMMARY_TEMPLATE, action='new', text=subject,
)
yield '\n'
for line in self.generate_new_revision_summary(
tot, [r.rev.sha1 for r in new_revisions], push):
yield line
else:
for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
yield line
elif self.new.commit_sha1 and self.old.commit_sha1:
# A reference was changed to point at a different commit.
# List the revisions that were removed and/or added *from
# that reference* by this reference change, along with a
# diff between the trees for its old and new values.
# List of the revisions that were added to the branch by
# this update. Note this list can include revisions that
# have already had notification emails; we want such
# revisions in the summary even though we will not send
# new notification emails for them.
adds = list(generate_summaries(
'--topo-order', '--reverse', '%s..%s'
% (self.old.commit_sha1, self.new.commit_sha1,)
))
# List of the revisions that were removed from the branch
# by this update. This will be empty except for
# non-fast-forward updates.
discards = list(generate_summaries(
'%s..%s' % (self.new.commit_sha1, self.old.commit_sha1,)
))
if adds:
new_commits_list = push.get_new_commits(self)
else:
new_commits_list = []
new_commits = CommitSet(new_commits_list)
if discards:
discarded_commits = CommitSet(push.get_discarded_commits(self))
else:
discarded_commits = CommitSet([])
if discards and adds:
for (sha1, subject) in discards:
if sha1 in discarded_commits:
action = 'discards'
else:
action = 'omits'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
for (sha1, subject) in adds:
if sha1 in new_commits:
action = 'new'
else:
action = 'adds'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
yield '\n'
for line in self.expand_lines(NON_FF_TEMPLATE):
yield line
elif discards:
for (sha1, subject) in discards:
if sha1 in discarded_commits:
action = 'discards'
else:
action = 'omits'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
yield '\n'
for line in self.expand_lines(REWIND_ONLY_TEMPLATE):
yield line
elif adds:
(sha1, subject) = self.old.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='from',
rev_short=sha1, text=subject,
)
for (sha1, subject) in adds:
if sha1 in new_commits:
action = 'new'
else:
action = 'adds'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
yield '\n'
if new_commits:
for line in self.generate_new_revision_summary(
len(new_commits), new_commits_list, push):
yield line
else:
for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
yield line
for line in self.generate_revision_change_graph(push):
yield line
# The diffstat is shown from the old revision to the new
# revision. This is to show the truth of what happened in
# this change. There's no point showing the stat from the
# base to the new revision because the base is effectively a
# random revision at this point - the user will be interested
# in what this revision changed - including the undoing of
# previous revisions in the case of non-fast-forward updates.
yield '\n'
yield 'Summary of changes:\n'
for line in read_git_lines(
['diff-tree']
+ self.diffopts
+ ['%s..%s' % (self.old.commit_sha1, self.new.commit_sha1,)],
keepends=True,
):
yield line
elif self.old.commit_sha1 and not self.new.commit_sha1:
# A reference was deleted. List the revisions that were
# removed from the repository by this reference change.
sha1s = list(push.get_discarded_commits(self))
tot = len(sha1s)
discarded_revisions = [
Revision(self, GitObject(sha1), num=i + 1, tot=tot)
for (i, sha1) in enumerate(sha1s)
]
if discarded_revisions:
for line in self.expand_lines(DISCARDED_REVISIONS_TEMPLATE):
yield line
yield '\n'
for r in discarded_revisions:
(sha1, subject) = r.rev.get_summary()
yield r.expand(
BRIEF_SUMMARY_TEMPLATE, action='discards', text=subject,
)
for line in self.generate_revision_change_graph(push):
yield line
else:
for line in self.expand_lines(NO_DISCARDED_REVISIONS_TEMPLATE):
yield line
elif not self.old.commit_sha1 and not self.new.commit_sha1:
for line in self.expand_lines(NON_COMMIT_UPDATE_TEMPLATE):
yield line
def generate_create_summary(self, push):
"""Called for the creation of a reference."""
# This is a new reference and so oldrev is not valid
(sha1, subject) = self.new.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='at',
rev_short=sha1, text=subject,
)
yield '\n'
def generate_update_summary(self, push):
"""Called for the change of a pre-existing branch."""
return iter([])
def generate_delete_summary(self, push):
"""Called for the deletion of any type of reference."""
(sha1, subject) = self.old.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='was',
rev_short=sha1, text=subject,
)
yield '\n'
class BranchChange(ReferenceChange):
refname_type = 'branch'
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
self._single_revision = None
def send_single_combined_email(self, known_added_sha1s):
if not self.environment.combine_when_single_commit:
return None
# In the sadly-all-too-frequent usecase of people pushing only
# one of their commits at a time to a repository, users feel
# the reference change summary emails are noise rather than
# important signal. This is because, in this particular
# usecase, there is a reference change summary email for each
# new commit, and all these summaries do is point out that
# there is one new commit (which can readily be inferred by
# the existence of the individual revision email that is also
# sent). In such cases, our users prefer there to be a combined
# reference change summary/new revision email.
#
# So, if the change is an update and it doesn't discard any
# commits, and it adds exactly one non-merge commit (gerrit
# forces a workflow where every commit is individually merged
# and the git-multimail hook fired off for just this one
# change), then we send a combined refchange/revision email.
try:
# If this change is a reference update that doesn't discard
# any commits...
if self.change_type != 'update':
return None
if read_git_lines(
['merge-base', self.old.sha1, self.new.sha1]
) != [self.old.sha1]:
return None
# Check if this update introduced exactly one non-merge
# commit:
def split_line(line):
"""Split line into (sha1, [parent,...])."""
words = line.split()
return (words[0], words[1:])
# Get the new commits introduced by the push as a list of
# (sha1, [parent,...])
new_commits = [
split_line(line)
for line in read_git_lines(
[
'log', '-3', '--format=%H %P',
'%s..%s' % (self.old.sha1, self.new.sha1),
]
)
]
if not new_commits:
return None
# If the newest commit is a merge, save it for a later check
# but otherwise ignore it
merge = None
tot = len(new_commits)
if len(new_commits[0][1]) > 1:
merge = new_commits[0][0]
del new_commits[0]
# Our primary check: we can't combine if more than one commit
# is introduced. We also currently only combine if the new
# commit is a non-merge commit, though it may make sense to
# combine if it is a merge as well.
if not (
len(new_commits) == 1
and len(new_commits[0][1]) == 1
and new_commits[0][0] in known_added_sha1s
):
return None
# We do not want to combine revision and refchange emails if
# those go to separate locations.
rev = Revision(self, GitObject(new_commits[0][0]), 1, tot)
if rev.recipients != self.recipients:
return None
# We ignored the newest commit if it was just a merge of the one
# commit being introduced. But we don't want to ignore that
# merge commit it it involved conflict resolutions. Check that.
if merge and merge != read_git_output(['diff-tree', '--cc', merge]):
return None
# We can combine the refchange and one new revision emails
# into one. Return the Revision that a combined email should
# be sent about.
return rev
except CommandError:
# Cannot determine number of commits in old..new or new..old;
# don't combine reference/revision emails:
return None
def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}):
values = revision.get_values()
if extra_header_values:
values.update(extra_header_values)
if 'subject' not in extra_header_values:
values['subject'] = self.expand(COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE, **values)
self._single_revision = revision
self.header_template = COMBINED_HEADER_TEMPLATE
self.intro_template = COMBINED_INTRO_TEMPLATE
self.footer_template = COMBINED_FOOTER_TEMPLATE
for line in self.generate_email(push, body_filter, values):
yield line
def generate_email_body(self, push):
'''Call the appropriate body generation routine.
If this is a combined refchange/revision email, the special logic
for handling this combined email comes from this function. For
other cases, we just use the normal handling.'''
# If self._single_revision isn't set; don't override
if not self._single_revision:
for line in super(BranchChange, self).generate_email_body(push):
yield line
return
# This is a combined refchange/revision email; we first provide
# some info from the refchange portion, and then call the revision
# generate_email_body function to handle the revision portion.
adds = list(generate_summaries(
'--topo-order', '--reverse', '%s..%s'
% (self.old.commit_sha1, self.new.commit_sha1,)
))
yield self.expand("The following commit(s) were added to %(refname)s by this push:\n")
for (sha1, subject) in adds:
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='new',
rev_short=sha1, text=subject,
)
yield self._single_revision.rev.short + " is described below\n"
yield '\n'
for line in self._single_revision.generate_email_body(push):
yield line
class AnnotatedTagChange(ReferenceChange):
refname_type = 'annotated tag'
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_announce_recipients(self)
self.show_shortlog = environment.announce_show_shortlog
ANNOTATED_TAG_FORMAT = (
'%(*objectname)\n'
'%(*objecttype)\n'
'%(taggername)\n'
'%(taggerdate)'
)
def describe_tag(self, push):
"""Describe the new value of an annotated tag."""
# Use git for-each-ref to pull out the individual fields from
# the tag
[tagobject, tagtype, tagger, tagged] = read_git_lines(
['for-each-ref', '--format=%s' % (self.ANNOTATED_TAG_FORMAT,), self.refname],
)
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='tagging',
rev_short=tagobject, text='(%s)' % (tagtype,),
)
if tagtype == 'commit':
# If the tagged object is a commit, then we assume this is a
# release, and so we calculate which tag this tag is
# replacing
try:
prevtag = read_git_output(['describe', '--abbrev=0', '%s^' % (self.new,)])
except CommandError:
prevtag = None
if prevtag:
yield ' replaces %s\n' % (prevtag,)
else:
prevtag = None
yield ' length %s bytes\n' % (read_git_output(['cat-file', '-s', tagobject]),)
yield ' tagged by %s\n' % (tagger,)
yield ' on %s\n' % (tagged,)
yield '\n'
# Show the content of the tag message; this might contain a
# change log or release notes so is worth displaying.
yield LOGBEGIN
contents = list(read_git_lines(['cat-file', 'tag', self.new.sha1], keepends=True))
contents = contents[contents.index('\n') + 1:]
if contents and contents[-1][-1:] != '\n':
contents.append('\n')
for line in contents:
yield line
if self.show_shortlog and tagtype == 'commit':
# Only commit tags make sense to have rev-list operations
# performed on them
yield '\n'
if prevtag:
# Show changes since the previous release
revlist = read_git_output(
['rev-list', '--pretty=short', '%s..%s' % (prevtag, self.new,)],
keepends=True,
)
else:
# No previous tag, show all the changes since time
# began
revlist = read_git_output(
['rev-list', '--pretty=short', '%s' % (self.new,)],
keepends=True,
)
for line in read_git_lines(['shortlog'], input=revlist, keepends=True):
yield line
yield LOGEND
yield '\n'
def generate_create_summary(self, push):
"""Called for the creation of an annotated tag."""
for line in self.expand_lines(TAG_CREATED_TEMPLATE):
yield line
for line in self.describe_tag(push):
yield line
def generate_update_summary(self, push):
"""Called for the update of an annotated tag.
This is probably a rare event and may not even be allowed."""
for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
yield line
for line in self.describe_tag(push):
yield line
def generate_delete_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_DELETED_TEMPLATE):
yield line
yield self.expand(' tag was %(oldrev_short)s\n')
yield '\n'
class NonAnnotatedTagChange(ReferenceChange):
refname_type = 'tag'
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
def generate_create_summary(self, push):
"""Called for the creation of an annotated tag."""
for line in self.expand_lines(TAG_CREATED_TEMPLATE):
yield line
def generate_update_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
yield line
def generate_delete_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_DELETED_TEMPLATE):
yield line
for line in ReferenceChange.generate_delete_summary(self, push):
yield line
class OtherReferenceChange(ReferenceChange):
refname_type = 'reference'
def __init__(self, environment, refname, short_refname, old, new, rev):
# We use the full refname as short_refname, because otherwise
# the full name of the reference would not be obvious from the
# text of the email.
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
class Mailer(object):
"""An object that can send emails."""
def send(self, lines, to_addrs):
"""Send an email consisting of lines.
lines must be an iterable over the lines constituting the
header and body of the email. to_addrs is a list of recipient
addresses (can be needed even if lines already contains a
"To:" field). It can be either a string (comma-separated list
of email addresses) or a Python list of individual email
addresses.
"""
raise NotImplementedError()
class SendMailer(Mailer):
"""Send emails using 'sendmail -oi -t'."""
SENDMAIL_CANDIDATES = [
'/usr/sbin/sendmail',
'/usr/lib/sendmail',
]
@staticmethod
def find_sendmail():
for path in SendMailer.SENDMAIL_CANDIDATES:
if os.access(path, os.X_OK):
return path
else:
raise ConfigurationException(
'No sendmail executable found. '
'Try setting multimailhook.sendmailCommand.'
)
def __init__(self, command=None, envelopesender=None):
"""Construct a SendMailer instance.
command should be the command and arguments used to invoke
sendmail, as a list of strings. If an envelopesender is
provided, it will also be passed to the command, via '-f
envelopesender'."""
if command:
self.command = command[:]
else:
self.command = [self.find_sendmail(), '-oi', '-t']
if envelopesender:
self.command.extend(['-f', envelopesender])
def send(self, lines, to_addrs):
try:
p = subprocess.Popen(self.command, stdin=subprocess.PIPE)
except OSError, e:
sys.stderr.write(
'*** Cannot execute command: %s\n' % ' '.join(self.command)
+ '*** %s\n' % str(e)
+ '*** Try setting multimailhook.mailer to "smtp"\n'
'*** to send emails without using the sendmail command.\n'
)
sys.exit(1)
try:
p.stdin.writelines(lines)
except Exception, e:
sys.stderr.write(
'*** Error while generating commit email\n'
'*** - mail sending aborted.\n'
)
try:
# subprocess.terminate() is not available in Python 2.4
p.terminate()
except AttributeError:
pass
raise e
else:
p.stdin.close()
retcode = p.wait()
if retcode:
raise CommandError(self.command, retcode)
class SMTPMailer(Mailer):
"""Send emails using Python's smtplib."""
def __init__(self, envelopesender, smtpserver,
smtpservertimeout=10.0, smtpserverdebuglevel=0,
smtpencryption='none',
smtpuser='', smtppass='',
):
if not envelopesender:
sys.stderr.write(
'fatal: git_multimail: cannot use SMTPMailer without a sender address.\n'
'please set either multimailhook.envelopeSender or user.email\n'
)
sys.exit(1)
if smtpencryption == 'ssl' and not (smtpuser and smtppass):
raise ConfigurationException(
'Cannot use SMTPMailer with security option ssl '
'without options username and password.'
)
self.envelopesender = envelopesender
self.smtpserver = smtpserver
self.smtpservertimeout = smtpservertimeout
self.smtpserverdebuglevel = smtpserverdebuglevel
self.security = smtpencryption
self.username = smtpuser
self.password = smtppass
try:
def call(klass, server, timeout):
try:
return klass(server, timeout=timeout)
except TypeError:
# Old Python versions do not have timeout= argument.
return klass(server)
if self.security == 'none':
self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
elif self.security == 'ssl':
self.smtp = call(smtplib.SMTP_SSL, self.smtpserver, timeout=self.smtpservertimeout)
elif self.security == 'tls':
if ':' not in self.smtpserver:
self.smtpserver += ':587' # default port for TLS
self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
self.smtp.ehlo()
self.smtp.starttls()
self.smtp.ehlo()
else:
sys.stdout.write('*** Error: Control reached an invalid option. ***')
sys.exit(1)
if self.smtpserverdebuglevel > 0:
sys.stdout.write(
"*** Setting debug on for SMTP server connection (%s) ***\n"
% self.smtpserverdebuglevel)
self.smtp.set_debuglevel(self.smtpserverdebuglevel)
except Exception, e:
sys.stderr.write(
'*** Error establishing SMTP connection to %s ***\n'
% self.smtpserver)
sys.stderr.write('*** %s\n' % str(e))
sys.exit(1)
def __del__(self):
if hasattr(self, 'smtp'):
self.smtp.quit()
def send(self, lines, to_addrs):
try:
if self.username or self.password:
sys.stderr.write("*** Authenticating as %s ***\n" % self.username)
self.smtp.login(self.username, self.password)
msg = ''.join(lines)
# turn comma-separated list into Python list if needed.
if isinstance(to_addrs, basestring):
to_addrs = [email for (name, email) in getaddresses([to_addrs])]
self.smtp.sendmail(self.envelopesender, to_addrs, msg)
except Exception, e:
sys.stderr.write('*** Error sending email ***\n')
sys.stderr.write('*** %s\n' % str(e))
self.smtp.quit()
sys.exit(1)
class OutputMailer(Mailer):
"""Write emails to an output stream, bracketed by lines of '=' characters.
This is intended for debugging purposes."""
SEPARATOR = '=' * 75 + '\n'
def __init__(self, f):
self.f = f
def send(self, lines, to_addrs):
self.f.write(self.SEPARATOR)
self.f.writelines(lines)
self.f.write(self.SEPARATOR)
def get_git_dir():
"""Determine GIT_DIR.
Determine GIT_DIR either from the GIT_DIR environment variable or
from the working directory, using Git's usual rules."""
try:
return read_git_output(['rev-parse', '--git-dir'])
except CommandError:
sys.stderr.write('fatal: git_multimail: not in a git directory\n')
sys.exit(1)
class Environment(object):
"""Describes the environment in which the push is occurring.
An Environment object encapsulates information about the local
environment. For example, it knows how to determine:
* the name of the repository to which the push occurred
* what user did the push
* what users want to be informed about various types of changes.
An Environment object is expected to have the following methods:
get_repo_shortname()
Return a short name for the repository, for display
purposes.
get_repo_path()
Return the absolute path to the Git repository.
get_emailprefix()
Return a string that will be prefixed to every email's
subject.
get_pusher()
Return the username of the person who pushed the changes.
This value is used in the email body to indicate who
pushed the change.
get_pusher_email() (may return None)
Return the email address of the person who pushed the
changes. The value should be a single RFC 2822 email
address as a string; e.g., "Joe User <user@example.com>"
if available, otherwise "user@example.com". If set, the
value is used as the Reply-To address for refchange
emails. If it is impossible to determine the pusher's
email, this attribute should be set to None (in which case
no Reply-To header will be output).
get_sender()
Return the address to be used as the 'From' email address
in the email envelope.
get_fromaddr()
Return the 'From' email address used in the email 'From:'
headers. (May be a full RFC 2822 email address like 'Joe
User <user@example.com>'.)
get_administrator()
Return the name and/or email of the repository
administrator. This value is used in the footer as the
person to whom requests to be removed from the
notification list should be sent. Ideally, it should
include a valid email address.
get_reply_to_refchange()
get_reply_to_commit()
Return the address to use in the email "Reply-To" header,
as a string. These can be an RFC 2822 email address, or
None to omit the "Reply-To" header.
get_reply_to_refchange() is used for refchange emails;
get_reply_to_commit() is used for individual commit
emails.
They should also define the following attributes:
announce_show_shortlog (bool)
True iff announce emails should include a shortlog.
refchange_showgraph (bool)
True iff refchanges emails should include a detailed graph.
refchange_showlog (bool)
True iff refchanges emails should include a detailed log.
diffopts (list of strings)
The options that should be passed to 'git diff' for the
summary email. The value should be a list of strings
representing words to be passed to the command.
graphopts (list of strings)
Analogous to diffopts, but contains options passed to
'git log --graph' when generating the detailed graph for
a set of commits (see refchange_showgraph)
logopts (list of strings)
Analogous to diffopts, but contains options passed to
'git log' when generating the detailed log for a set of
commits (see refchange_showlog)
commitlogopts (list of strings)
The options that should be passed to 'git log' for each
commit mail. The value should be a list of strings
representing words to be passed to the command.
quiet (bool)
On success do not write to stderr
stdout (bool)
Write email to stdout rather than emailing. Useful for debugging
combine_when_single_commit (bool)
True if a combined email should be produced when a single
new commit is pushed to a branch, False otherwise.
"""
REPO_NAME_RE = re.compile(r'^(?P<name>.+?)(?:\.git)$')
def __init__(self, osenv=None):
self.osenv = osenv or os.environ
self.announce_show_shortlog = False
self.maxcommitemails = 500
self.diffopts = ['--stat', '--summary', '--find-copies-harder']
self.graphopts = ['--oneline', '--decorate']
self.logopts = []
self.refchange_showgraph = False
self.refchange_showlog = False
self.commitlogopts = ['-C', '--stat', '-p', '--cc']
self.quiet = False
self.stdout = False
self.combine_when_single_commit = True
self.COMPUTED_KEYS = [
'administrator',
'charset',
'emailprefix',
'fromaddr',
'pusher',
'pusher_email',
'repo_path',
'repo_shortname',
'sender',
]
self._values = None
def get_repo_shortname(self):
"""Use the last part of the repo path, with ".git" stripped off if present."""
basename = os.path.basename(os.path.abspath(self.get_repo_path()))
m = self.REPO_NAME_RE.match(basename)
if m:
return m.group('name')
else:
return basename
def get_pusher(self):
raise NotImplementedError()
def get_pusher_email(self):
return None
def get_fromaddr(self):
config = Config('user')
fromname = config.get('name', default='')
fromemail = config.get('email', default='')
if fromemail:
return formataddr([fromname, fromemail])
return self.get_sender()
def get_administrator(self):
return 'the administrator of this repository'
def get_emailprefix(self):
return ''
def get_repo_path(self):
if read_git_output(['rev-parse', '--is-bare-repository']) == 'true':
path = get_git_dir()
else:
path = read_git_output(['rev-parse', '--show-toplevel'])
return os.path.abspath(path)
def get_charset(self):
return CHARSET
def get_values(self):
"""Return a dictionary {keyword: expansion} for this Environment.
This method is called by Change._compute_values(). The keys
in the returned dictionary are available to be used in any of
the templates. The dictionary is created by calling
self.get_NAME() for each of the attributes named in
COMPUTED_KEYS and recording those that do not return None.
The return value is always a new dictionary."""
if self._values is None:
values = {}
for key in self.COMPUTED_KEYS:
value = getattr(self, 'get_%s' % (key,))()
if value is not None:
values[key] = value
self._values = values
return self._values.copy()
def get_refchange_recipients(self, refchange):
"""Return the recipients for notifications about refchange.
Return the list of email addresses to which notifications
about the specified ReferenceChange should be sent."""
raise NotImplementedError()
def get_announce_recipients(self, annotated_tag_change):
"""Return the recipients for notifications about annotated_tag_change.
Return the list of email addresses to which notifications
about the specified AnnotatedTagChange should be sent."""
raise NotImplementedError()
def get_reply_to_refchange(self, refchange):
return self.get_pusher_email()
def get_revision_recipients(self, revision):
"""Return the recipients for messages about revision.
Return the list of email addresses to which notifications
about the specified Revision should be sent. This method
could be overridden, for example, to take into account the
contents of the revision when deciding whom to notify about
it. For example, there could be a scheme for users to express
interest in particular files or subdirectories, and only
receive notification emails for revisions that affecting those
files."""
raise NotImplementedError()
def get_reply_to_commit(self, revision):
return revision.author
def filter_body(self, lines):
"""Filter the lines intended for an email body.
lines is an iterable over the lines that would go into the
email body. Filter it (e.g., limit the number of lines, the
line length, character set, etc.), returning another iterable.
See FilterLinesEnvironmentMixin and MaxlinesEnvironmentMixin
for classes implementing this functionality."""
return lines
def log_msg(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
sys.stderr.write(msg)
def log_warning(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
sys.stderr.write(msg)
def log_error(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
sys.stderr.write(msg)
class ConfigEnvironmentMixin(Environment):
"""A mixin that sets self.config to its constructor's config argument.
This class's constructor consumes the "config" argument.
Mixins that need to inspect the config should inherit from this
class (1) to make sure that "config" is still in the constructor
arguments with its own constructor runs and/or (2) to be sure that
self.config is set after construction."""
def __init__(self, config, **kw):
super(ConfigEnvironmentMixin, self).__init__(**kw)
self.config = config
class ConfigOptionsEnvironmentMixin(ConfigEnvironmentMixin):
"""An Environment that reads most of its information from "git config"."""
def __init__(self, config, **kw):
super(ConfigOptionsEnvironmentMixin, self).__init__(
config=config, **kw
)
for var, cfg in (
('announce_show_shortlog', 'announceshortlog'),
('refchange_showgraph', 'refchangeShowGraph'),
('refchange_showlog', 'refchangeshowlog'),
('quiet', 'quiet'),
('stdout', 'stdout'),
):
val = config.get_bool(cfg)
if val is not None:
setattr(self, var, val)
maxcommitemails = config.get('maxcommitemails')
if maxcommitemails is not None:
try:
self.maxcommitemails = int(maxcommitemails)
except ValueError:
self.log_warning(
'*** Malformed value for multimailhook.maxCommitEmails: %s\n' % maxcommitemails
+ '*** Expected a number. Ignoring.\n'
)
diffopts = config.get('diffopts')
if diffopts is not None:
self.diffopts = shlex.split(diffopts)
graphopts = config.get('graphOpts')
if graphopts is not None:
self.graphopts = shlex.split(graphopts)
logopts = config.get('logopts')
if logopts is not None:
self.logopts = shlex.split(logopts)
commitlogopts = config.get('commitlogopts')
if commitlogopts is not None:
self.commitlogopts = shlex.split(commitlogopts)
reply_to = config.get('replyTo')
self.__reply_to_refchange = config.get('replyToRefchange', default=reply_to)
if (
self.__reply_to_refchange is not None
and self.__reply_to_refchange.lower() == 'author'
):
raise ConfigurationException(
'"author" is not an allowed setting for replyToRefchange'
)
self.__reply_to_commit = config.get('replyToCommit', default=reply_to)
combine = config.get_bool('combineWhenSingleCommit')
if combine is not None:
self.combine_when_single_commit = combine
def get_administrator(self):
return (
self.config.get('administrator')
or self.get_sender()
or super(ConfigOptionsEnvironmentMixin, self).get_administrator()
)
def get_repo_shortname(self):
return (
self.config.get('reponame')
or super(ConfigOptionsEnvironmentMixin, self).get_repo_shortname()
)
def get_emailprefix(self):
emailprefix = self.config.get('emailprefix')
if emailprefix is not None:
emailprefix = emailprefix.strip()
if emailprefix:
return emailprefix + ' '
else:
return ''
else:
return '[%s] ' % (self.get_repo_shortname(),)
def get_sender(self):
return self.config.get('envelopesender')
def get_fromaddr(self):
fromaddr = self.config.get('from')
if fromaddr:
return fromaddr
return super(ConfigOptionsEnvironmentMixin, self).get_fromaddr()
def get_reply_to_refchange(self, refchange):
if self.__reply_to_refchange is None:
return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_refchange(refchange)
elif self.__reply_to_refchange.lower() == 'pusher':
return self.get_pusher_email()
elif self.__reply_to_refchange.lower() == 'none':
return None
else:
return self.__reply_to_refchange
def get_reply_to_commit(self, revision):
if self.__reply_to_commit is None:
return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_commit(revision)
elif self.__reply_to_commit.lower() == 'author':
return revision.author
elif self.__reply_to_commit.lower() == 'pusher':
return self.get_pusher_email()
elif self.__reply_to_commit.lower() == 'none':
return None
else:
return self.__reply_to_commit
def get_scancommitforcc(self):
return self.config.get('scancommitforcc')
class FilterLinesEnvironmentMixin(Environment):
"""Handle encoding and maximum line length of body lines.
emailmaxlinelength (int or None)
The maximum length of any single line in the email body.
Longer lines are truncated at that length with ' [...]'
appended.
strict_utf8 (bool)
If this field is set to True, then the email body text is
expected to be UTF-8. Any invalid characters are
converted to U+FFFD, the Unicode replacement character
(encoded as UTF-8, of course).
"""
def __init__(self, strict_utf8=True, emailmaxlinelength=500, **kw):
super(FilterLinesEnvironmentMixin, self).__init__(**kw)
self.__strict_utf8 = strict_utf8
self.__emailmaxlinelength = emailmaxlinelength
def filter_body(self, lines):
lines = super(FilterLinesEnvironmentMixin, self).filter_body(lines)
if self.__strict_utf8:
lines = (line.decode(ENCODING, 'replace') for line in lines)
# Limit the line length in Unicode-space to avoid
# splitting characters:
if self.__emailmaxlinelength:
lines = limit_linelength(lines, self.__emailmaxlinelength)
lines = (line.encode(ENCODING, 'replace') for line in lines)
elif self.__emailmaxlinelength:
lines = limit_linelength(lines, self.__emailmaxlinelength)
return lines
class ConfigFilterLinesEnvironmentMixin(
ConfigEnvironmentMixin,
FilterLinesEnvironmentMixin,
):
"""Handle encoding and maximum line length based on config."""
def __init__(self, config, **kw):
strict_utf8 = config.get_bool('emailstrictutf8', default=None)
if strict_utf8 is not None:
kw['strict_utf8'] = strict_utf8
emailmaxlinelength = config.get('emailmaxlinelength')
if emailmaxlinelength is not None:
kw['emailmaxlinelength'] = int(emailmaxlinelength)
super(ConfigFilterLinesEnvironmentMixin, self).__init__(
config=config, **kw
)
class MaxlinesEnvironmentMixin(Environment):
"""Limit the email body to a specified number of lines."""
def __init__(self, emailmaxlines, **kw):
super(MaxlinesEnvironmentMixin, self).__init__(**kw)
self.__emailmaxlines = emailmaxlines
def filter_body(self, lines):
lines = super(MaxlinesEnvironmentMixin, self).filter_body(lines)
if self.__emailmaxlines:
lines = limit_lines(lines, self.__emailmaxlines)
return lines
class ConfigMaxlinesEnvironmentMixin(
ConfigEnvironmentMixin,
MaxlinesEnvironmentMixin,
):
"""Limit the email body to the number of lines specified in config."""
def __init__(self, config, **kw):
emailmaxlines = int(config.get('emailmaxlines', default='0'))
super(ConfigMaxlinesEnvironmentMixin, self).__init__(
config=config,
emailmaxlines=emailmaxlines,
**kw
)
class FQDNEnvironmentMixin(Environment):
"""A mixin that sets the host's FQDN to its constructor argument."""
def __init__(self, fqdn, **kw):
super(FQDNEnvironmentMixin, self).__init__(**kw)
self.COMPUTED_KEYS += ['fqdn']
self.__fqdn = fqdn
def get_fqdn(self):
"""Return the fully-qualified domain name for this host.
Return None if it is unavailable or unwanted."""
return self.__fqdn
class ConfigFQDNEnvironmentMixin(
ConfigEnvironmentMixin,
FQDNEnvironmentMixin,
):
"""Read the FQDN from the config."""
def __init__(self, config, **kw):
fqdn = config.get('fqdn')
super(ConfigFQDNEnvironmentMixin, self).__init__(
config=config,
fqdn=fqdn,
**kw
)
class ComputeFQDNEnvironmentMixin(FQDNEnvironmentMixin):
"""Get the FQDN by calling socket.getfqdn()."""
def __init__(self, **kw):
super(ComputeFQDNEnvironmentMixin, self).__init__(
fqdn=socket.getfqdn(),
**kw
)
class PusherDomainEnvironmentMixin(ConfigEnvironmentMixin):
"""Deduce pusher_email from pusher by appending an emaildomain."""
def __init__(self, **kw):
super(PusherDomainEnvironmentMixin, self).__init__(**kw)
self.__emaildomain = self.config.get('emaildomain')
def get_pusher_email(self):
if self.__emaildomain:
# Derive the pusher's full email address in the default way:
return '%s@%s' % (self.get_pusher(), self.__emaildomain)
else:
return super(PusherDomainEnvironmentMixin, self).get_pusher_email()
class StaticRecipientsEnvironmentMixin(Environment):
"""Set recipients statically based on constructor parameters."""
def __init__(
self,
refchange_recipients, announce_recipients, revision_recipients, scancommitforcc,
**kw
):
super(StaticRecipientsEnvironmentMixin, self).__init__(**kw)
# The recipients for various types of notification emails, as
# RFC 2822 email addresses separated by commas (or the empty
# string if no recipients are configured). Although there is
# a mechanism to choose the recipient lists based on on the
# actual *contents* of the change being reported, we only
# choose based on the *type* of the change. Therefore we can
# compute them once and for all:
if not (refchange_recipients
or announce_recipients
or revision_recipients
or scancommitforcc):
raise ConfigurationException('No email recipients configured!')
self.__refchange_recipients = refchange_recipients
self.__announce_recipients = announce_recipients
self.__revision_recipients = revision_recipients
def get_refchange_recipients(self, refchange):
return self.__refchange_recipients
def get_announce_recipients(self, annotated_tag_change):
return self.__announce_recipients
def get_revision_recipients(self, revision):
return self.__revision_recipients
class ConfigRecipientsEnvironmentMixin(
ConfigEnvironmentMixin,
StaticRecipientsEnvironmentMixin
):
"""Determine recipients statically based on config."""
def __init__(self, config, **kw):
super(ConfigRecipientsEnvironmentMixin, self).__init__(
config=config,
refchange_recipients=self._get_recipients(
config, 'refchangelist', 'mailinglist',
),
announce_recipients=self._get_recipients(
config, 'announcelist', 'refchangelist', 'mailinglist',
),
revision_recipients=self._get_recipients(
config, 'commitlist', 'mailinglist',
),
scancommitforcc=config.get('scancommitforcc'),
**kw
)
def _get_recipients(self, config, *names):
"""Return the recipients for a particular type of message.
Return the list of email addresses to which a particular type
of notification email should be sent, by looking at the config
value for "multimailhook.$name" for each of names. Use the
value from the first name that is configured. The return
value is a (possibly empty) string containing RFC 2822 email
addresses separated by commas. If no configuration could be
found, raise a ConfigurationException."""
for name in names:
retval = config.get_recipients(name)
if retval is not None:
return retval
else:
return ''
class ProjectdescEnvironmentMixin(Environment):
"""Make a "projectdesc" value available for templates.
By default, it is set to the first line of $GIT_DIR/description
(if that file is present and appears to be set meaningfully)."""
def __init__(self, **kw):
super(ProjectdescEnvironmentMixin, self).__init__(**kw)
self.COMPUTED_KEYS += ['projectdesc']
def get_projectdesc(self):
"""Return a one-line descripition of the project."""
git_dir = get_git_dir()
try:
projectdesc = open(os.path.join(git_dir, 'description')).readline().strip()
if projectdesc and not projectdesc.startswith('Unnamed repository'):
return projectdesc
except IOError:
pass
return 'UNNAMED PROJECT'
class GenericEnvironmentMixin(Environment):
def get_pusher(self):
return self.osenv.get('USER', self.osenv.get('USERNAME', 'unknown user'))
class GenericEnvironment(
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
ConfigRecipientsEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
GenericEnvironmentMixin,
Environment,
):
pass
class GitoliteEnvironmentMixin(Environment):
def get_repo_shortname(self):
# The gitolite environment variable $GL_REPO is a pretty good
# repo_shortname (though it's probably not as good as a value
# the user might have explicitly put in his config).
return (
self.osenv.get('GL_REPO', None)
or super(GitoliteEnvironmentMixin, self).get_repo_shortname()
)
def get_pusher(self):
return self.osenv.get('GL_USER', 'unknown user')
def get_fromaddr(self):
GL_USER = self.osenv.get('GL_USER')
if GL_USER is not None:
# Find the path to gitolite.conf. Note that gitolite v3
# did away with the GL_ADMINDIR and GL_CONF environment
# variables (they are now hard-coded).
GL_ADMINDIR = self.osenv.get(
'GL_ADMINDIR',
os.path.expanduser(os.path.join('~', '.gitolite')))
GL_CONF = self.osenv.get(
'GL_CONF',
os.path.join(GL_ADMINDIR, 'conf', 'gitolite.conf'))
if os.path.isfile(GL_CONF):
f = open(GL_CONF, 'rU')
try:
in_user_emails_section = False
re_template = r'^\s*#\s*{}\s*$'
re_begin, re_user, re_end = (
re.compile(re_template.format(x))
for x in (
r'BEGIN\s+USER\s+EMAILS',
re.escape(GL_USER) + r'\s+(.*)',
r'END\s+USER\s+EMAILS',
))
for l in f:
l = l.rstrip('\n')
if not in_user_emails_section:
if re_begin.match(l):
in_user_emails_section = True
continue
if re_end.match(l):
break
m = re_user.match(l)
if m:
return m.group(1)
finally:
f.close()
return super(GitoliteEnvironmentMixin, self).get_fromaddr()
class IncrementalDateTime(object):
"""Simple wrapper to give incremental date/times.
Each call will result in a date/time a second later than the
previous call. This can be used to falsify email headers, to
increase the likelihood that email clients sort the emails
correctly."""
def __init__(self):
self.time = time.time()
def next(self):
formatted = formatdate(self.time, True)
self.time += 1
return formatted
class GitoliteEnvironment(
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
ConfigRecipientsEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
GitoliteEnvironmentMixin,
Environment,
):
pass
class Push(object):
"""Represent an entire push (i.e., a group of ReferenceChanges).
It is easy to figure out what commits were added to a *branch* by
a Reference change:
git rev-list change.old..change.new
or removed from a *branch*:
git rev-list change.new..change.old
But it is not quite so trivial to determine which entirely new
commits were added to the *repository* by a push and which old
commits were discarded by a push. A big part of the job of this
class is to figure out these things, and to make sure that new
commits are only detailed once even if they were added to multiple
references.
The first step is to determine the "other" references--those
unaffected by the current push. They are computed by listing all
references then removing any affected by this push. The results
are stored in Push._other_ref_sha1s.
The commits contained in the repository before this push were
git rev-list other1 other2 other3 ... change1.old change2.old ...
Where "changeN.old" is the old value of one of the references
affected by this push.
The commits contained in the repository after this push are
git rev-list other1 other2 other3 ... change1.new change2.new ...
The commits added by this push are the difference between these
two sets, which can be written
git rev-list \
^other1 ^other2 ... \
^change1.old ^change2.old ... \
change1.new change2.new ...
The commits removed by this push can be computed by
git rev-list \
^other1 ^other2 ... \
^change1.new ^change2.new ... \
change1.old change2.old ...
The last point is that it is possible that other pushes are
occurring simultaneously to this one, so reference values can
change at any time. It is impossible to eliminate all race
conditions, but we reduce the window of time during which problems
can occur by translating reference names to SHA1s as soon as
possible and working with SHA1s thereafter (because SHA1s are
immutable)."""
# A map {(changeclass, changetype): integer} specifying the order
# that reference changes will be processed if multiple reference
# changes are included in a single push. The order is significant
# mostly because new commit notifications are threaded together
# with the first reference change that includes the commit. The
# following order thus causes commits to be grouped with branch
# changes (as opposed to tag changes) if possible.
SORT_ORDER = dict(
(value, i) for (i, value) in enumerate([
(BranchChange, 'update'),
(BranchChange, 'create'),
(AnnotatedTagChange, 'update'),
(AnnotatedTagChange, 'create'),
(NonAnnotatedTagChange, 'update'),
(NonAnnotatedTagChange, 'create'),
(BranchChange, 'delete'),
(AnnotatedTagChange, 'delete'),
(NonAnnotatedTagChange, 'delete'),
(OtherReferenceChange, 'update'),
(OtherReferenceChange, 'create'),
(OtherReferenceChange, 'delete'),
])
)
def __init__(self, changes, ignore_other_refs=False):
self.changes = sorted(changes, key=self._sort_key)
self.__other_ref_sha1s = None
self.__cached_commits_spec = {}
if ignore_other_refs:
self.__other_ref_sha1s = set()
@classmethod
def _sort_key(klass, change):
return (klass.SORT_ORDER[change.__class__, change.change_type], change.refname,)
@property
def _other_ref_sha1s(self):
"""The GitObjects referred to by references unaffected by this push.
"""
if self.__other_ref_sha1s is None:
# The refnames being changed by this push:
updated_refs = set(
change.refname
for change in self.changes
)
# The SHA-1s of commits referred to by all references in this
# repository *except* updated_refs:
sha1s = set()
fmt = (
'%(objectname) %(objecttype) %(refname)\n'
'%(*objectname) %(*objecttype) %(refname)'
)
for line in read_git_lines(
['for-each-ref', '--format=%s' % (fmt,)]):
(sha1, type, name) = line.split(' ', 2)
if sha1 and type == 'commit' and name not in updated_refs:
sha1s.add(sha1)
self.__other_ref_sha1s = sha1s
return self.__other_ref_sha1s
def _get_commits_spec_incl(self, new_or_old, reference_change=None):
"""Get new or old SHA-1 from one or each of the changed refs.
Return a list of SHA-1 commit identifier strings suitable as
arguments to 'git rev-list' (or 'git log' or ...). The
returned identifiers are either the old or new values from one
or all of the changed references, depending on the values of
new_or_old and reference_change.
new_or_old is either the string 'new' or the string 'old'. If
'new', the returned SHA-1 identifiers are the new values from
each changed reference. If 'old', the SHA-1 identifiers are
the old values from each changed reference.
If reference_change is specified and not None, only the new or
old reference from the specified reference is included in the
return value.
This function returns None if there are no matching revisions
(e.g., because a branch was deleted and new_or_old is 'new').
"""
if not reference_change:
incl_spec = sorted(
getattr(change, new_or_old).sha1
for change in self.changes
if getattr(change, new_or_old)
)
if not incl_spec:
incl_spec = None
elif not getattr(reference_change, new_or_old).commit_sha1:
incl_spec = None
else:
incl_spec = [getattr(reference_change, new_or_old).commit_sha1]
return incl_spec
def _get_commits_spec_excl(self, new_or_old):
"""Get exclusion revisions for determining new or discarded commits.
Return a list of strings suitable as arguments to 'git
rev-list' (or 'git log' or ...) that will exclude all
commits that, depending on the value of new_or_old, were
either previously in the repository (useful for determining
which commits are new to the repository) or currently in the
repository (useful for determining which commits were
discarded from the repository).
new_or_old is either the string 'new' or the string 'old'. If
'new', the commits to be excluded are those that were in the
repository before the push. If 'old', the commits to be
excluded are those that are currently in the repository. """
old_or_new = {'old': 'new', 'new': 'old'}[new_or_old]
excl_revs = self._other_ref_sha1s.union(
getattr(change, old_or_new).sha1
for change in self.changes
if getattr(change, old_or_new).type in ['commit', 'tag']
)
return ['^' + sha1 for sha1 in sorted(excl_revs)]
def get_commits_spec(self, new_or_old, reference_change=None):
"""Get rev-list arguments for added or discarded commits.
Return a list of strings suitable as arguments to 'git
rev-list' (or 'git log' or ...) that select those commits
that, depending on the value of new_or_old, are either new to
the repository or were discarded from the repository.
new_or_old is either the string 'new' or the string 'old'. If
'new', the returned list is used to select commits that are
new to the repository. If 'old', the returned value is used
to select the commits that have been discarded from the
repository.
If reference_change is specified and not None, the new or
discarded commits are limited to those that are reachable from
the new or old value of the specified reference.
This function returns None if there are no added (or discarded)
revisions.
"""
key = (new_or_old, reference_change)
if key not in self.__cached_commits_spec:
ret = self._get_commits_spec_incl(new_or_old, reference_change)
if ret is not None:
ret.extend(self._get_commits_spec_excl(new_or_old))
self.__cached_commits_spec[key] = ret
return self.__cached_commits_spec[key]
def get_new_commits(self, reference_change=None):
"""Return a list of commits added by this push.
Return a list of the object names of commits that were added
by the part of this push represented by reference_change. If
reference_change is None, then return a list of *all* commits
added by this push."""
spec = self.get_commits_spec('new', reference_change)
return git_rev_list(spec)
def get_discarded_commits(self, reference_change):
"""Return a list of commits discarded by this push.
Return a list of the object names of commits that were
entirely discarded from the repository by the part of this
push represented by reference_change."""
spec = self.get_commits_spec('old', reference_change)
return git_rev_list(spec)
def send_emails(self, mailer, body_filter=None):
"""Use send all of the notification emails needed for this push.
Use send all of the notification emails (including reference
change emails and commit emails) needed for this push. Send
the emails using mailer. If body_filter is not None, then use
it to filter the lines that are intended for the email
body."""
# The sha1s of commits that were introduced by this push.
# They will be removed from this set as they are processed, to
# guarantee that one (and only one) email is generated for
# each new commit.
unhandled_sha1s = set(self.get_new_commits())
send_date = IncrementalDateTime()
for change in self.changes:
sha1s = []
for sha1 in reversed(list(self.get_new_commits(change))):
if sha1 in unhandled_sha1s:
sha1s.append(sha1)
unhandled_sha1s.remove(sha1)
# Check if we've got anyone to send to
if not change.recipients:
change.environment.log_warning(
'*** no recipients configured so no email will be sent\n'
'*** for %r update %s->%s\n'
% (change.refname, change.old.sha1, change.new.sha1,)
)
else:
if not change.environment.quiet:
change.environment.log_msg(
'Sending notification emails to: %s\n' % (change.recipients,))
extra_values = {'send_date': send_date.next()}
rev = change.send_single_combined_email(sha1s)
if rev:
mailer.send(
change.generate_combined_email(self, rev, body_filter, extra_values),
rev.recipients,
)
# This change is now fully handled; no need to handle
# individual revisions any further.
continue
else:
mailer.send(
change.generate_email(self, body_filter, extra_values),
change.recipients,
)
max_emails = change.environment.maxcommitemails
if max_emails and len(sha1s) > max_emails:
change.environment.log_warning(
'*** Too many new commits (%d), not sending commit emails.\n' % len(sha1s)
+ '*** Try setting multimailhook.maxCommitEmails to a greater value\n'
+ '*** Currently, multimailhook.maxCommitEmails=%d\n' % max_emails
)
return
for (num, sha1) in enumerate(sha1s):
rev = Revision(change, GitObject(sha1), num=num + 1, tot=len(sha1s))
if not rev.recipients and rev.cc_recipients:
change.environment.log_msg('*** Replacing Cc: with To:\n')
rev.recipients = rev.cc_recipients
rev.cc_recipients = None
if rev.recipients:
extra_values = {'send_date': send_date.next()}
mailer.send(
rev.generate_email(self, body_filter, extra_values),
rev.recipients,
)
# Consistency check:
if unhandled_sha1s:
change.environment.log_error(
'ERROR: No emails were sent for the following new commits:\n'
' %s\n'
% ('\n '.join(sorted(unhandled_sha1s)),)
)
def run_as_post_receive_hook(environment, mailer):
changes = []
for line in sys.stdin:
(oldrev, newrev, refname) = line.strip().split(' ', 2)
changes.append(
ReferenceChange.create(environment, oldrev, newrev, refname)
)
push = Push(changes)
push.send_emails(mailer, body_filter=environment.filter_body)
def run_as_update_hook(environment, mailer, refname, oldrev, newrev, force_send=False):
changes = [
ReferenceChange.create(
environment,
read_git_output(['rev-parse', '--verify', oldrev]),
read_git_output(['rev-parse', '--verify', newrev]),
refname,
),
]
push = Push(changes, force_send)
push.send_emails(mailer, body_filter=environment.filter_body)
def choose_mailer(config, environment):
mailer = config.get('mailer', default='sendmail')
if mailer == 'smtp':
smtpserver = config.get('smtpserver', default='localhost')
smtpservertimeout = float(config.get('smtpservertimeout', default=10.0))
smtpserverdebuglevel = int(config.get('smtpserverdebuglevel', default=0))
smtpencryption = config.get('smtpencryption', default='none')
smtpuser = config.get('smtpuser', default='')
smtppass = config.get('smtppass', default='')
mailer = SMTPMailer(
envelopesender=(environment.get_sender() or environment.get_fromaddr()),
smtpserver=smtpserver, smtpservertimeout=smtpservertimeout,
smtpserverdebuglevel=smtpserverdebuglevel,
smtpencryption=smtpencryption,
smtpuser=smtpuser,
smtppass=smtppass,
)
elif mailer == 'sendmail':
command = config.get('sendmailcommand')
if command:
command = shlex.split(command)
mailer = SendMailer(command=command, envelopesender=environment.get_sender())
else:
environment.log_error(
'fatal: multimailhook.mailer is set to an incorrect value: "%s"\n' % mailer
+ 'please use one of "smtp" or "sendmail".\n'
)
sys.exit(1)
return mailer
KNOWN_ENVIRONMENTS = {
'generic': GenericEnvironmentMixin,
'gitolite': GitoliteEnvironmentMixin,
}
def choose_environment(config, osenv=None, env=None, recipients=None):
if not osenv:
osenv = os.environ
environment_mixins = [
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
]
environment_kw = {
'osenv': osenv,
'config': config,
}
if not env:
env = config.get('environment')
if not env:
if 'GL_USER' in osenv and 'GL_REPO' in osenv:
env = 'gitolite'
else:
env = 'generic'
environment_mixins.append(KNOWN_ENVIRONMENTS[env])
if recipients:
environment_mixins.insert(0, StaticRecipientsEnvironmentMixin)
environment_kw['refchange_recipients'] = recipients
environment_kw['announce_recipients'] = recipients
environment_kw['revision_recipients'] = recipients
environment_kw['scancommitforcc'] = config.get('scancommitforcc')
else:
environment_mixins.insert(0, ConfigRecipientsEnvironmentMixin)
environment_klass = type(
'EffectiveEnvironment',
tuple(environment_mixins) + (Environment,),
{},
)
return environment_klass(**environment_kw)
def main(args):
parser = optparse.OptionParser(
description=__doc__,
usage='%prog [OPTIONS]\n or: %prog [OPTIONS] REFNAME OLDREV NEWREV',
)
parser.add_option(
'--environment', '--env', action='store', type='choice',
choices=['generic', 'gitolite'], default=None,
help=(
'Choose type of environment is in use. Default is taken from '
'multimailhook.environment if set; otherwise "generic".'
),
)
parser.add_option(
'--stdout', action='store_true', default=False,
help='Output emails to stdout rather than sending them.',
)
parser.add_option(
'--recipients', action='store', default=None,
help='Set list of email recipients for all types of emails.',
)
parser.add_option(
'--show-env', action='store_true', default=False,
help=(
'Write to stderr the values determined for the environment '
'(intended for debugging purposes).'
),
)
parser.add_option(
'--force-send', action='store_true', default=False,
help=(
'Force sending refchange email when using as an update hook. '
'This is useful to work around the unreliable new commits '
'detection in this mode.'
),
)
(options, args) = parser.parse_args(args)
config = Config('multimailhook')
try:
environment = choose_environment(
config, osenv=os.environ,
env=options.environment,
recipients=options.recipients,
)
if options.show_env:
sys.stderr.write('Environment values:\n')
for (k, v) in sorted(environment.get_values().items()):
sys.stderr.write(' %s : %r\n' % (k, v))
sys.stderr.write('\n')
if options.stdout or environment.stdout:
mailer = OutputMailer(sys.stdout)
else:
mailer = choose_mailer(config, environment)
# Dual mode: if arguments were specified on the command line, run
# like an update hook; otherwise, run as a post-receive hook.
if args:
if len(args) != 3:
parser.error('Need zero or three non-option arguments')
(refname, oldrev, newrev) = args
run_as_update_hook(environment, mailer, refname, oldrev, newrev, options.force_send)
else:
run_as_post_receive_hook(environment, mailer)
except ConfigurationException, e:
sys.exit(str(e))
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-2.0 |
victordion/YouCompleteMe | python/ycm/client/omni_completion_request.py | 48 | 1204 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm.client.completion_request import CompletionRequest
class OmniCompletionRequest( CompletionRequest ):
def __init__( self, omni_completer, request_data ):
super( OmniCompletionRequest, self ).__init__( request_data )
self._omni_completer = omni_completer
def Start( self ):
self._results = self._omni_completer.ComputeCandidates( self.request_data )
def Done( self ):
return True
def Response( self ):
return self._results
| gpl-3.0 |
nolanliou/tensorflow | tensorflow/contrib/framework/python/ops/script_ops.py | 22 | 5526 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script Language Operators. See the @{$python/script_ops} guide.
@@py_func
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops.script_ops import py_func as _py_func
from tensorflow.python.util import nest
__all__ = ['py_func']
def py_func(func,
args=(),
kwargs=None,
output_types=None,
output_shapes=None,
stateful=True,
name=None):
"""Wraps a python function and uses it as a TensorFlow op.
This function is a wrapper around `tf.py_func` and improve it with kwargs
and output_shapes. Further it changed some argument names.
Given a python function `func`, which takes numpy arrays as its
inputs and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
inp = tf.placeholder(tf.float32)
y = tf.py_func(my_func, [inp], tf.float32)
```
**N.B.** The `tf.py_func()` operation has the following known limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.py_func()`. If you are using distributed TensorFlow, you
must run a `tf.train.Server` in the same process as the program that calls
`tf.py_func()` and you must pin the created operation to a device in that
server (e.g. using `with tf.device():`).
Args:
func: A Python function, which accepts a list of NumPy `ndarray` objects
having element types that match the corresponding `tf.Tensor` objects
in `inp`, and returns a list of `ndarray` objects (or a single `ndarray`)
having element types that match the corresponding values in `Tout`.
args: A list of `Tensor` objects.
kwargs: A dict with `Tensor` objects as values.
output_types: A nested structure of tensorflow data types or a single
tensorflow data type if there is only one, indicating what `func` returns.
output_shapes: Same as output_types, except the types are replaces with
shapes (optional).
stateful: (Boolean.) If True, the function should be considered stateful.
If a function is stateless, when given the same input it will return the
same output and have no observable side effects. Optimizations such as
common subexpression elimination are only performed on stateless
operations.
name: A name for the operation (optional).
Returns:
Tensorflow op that wraps the input python function.
"""
if kwargs is None:
kwargs = {}
if not isinstance(args, (list, tuple)):
raise TypeError('args must be list and not {}. args: {}'.format(
type(args), args))
if not isinstance(kwargs, dict):
raise TypeError('kwargs must be dict and not {}. args: {}'.format(
type(kwargs), kwargs))
# For dynamic type inference use callable output_types and output_shapes
if callable(output_types):
# If callable assume same signature and call with tensors and get the types
output_types = output_types(*args, **kwargs)
if callable(output_shapes):
# If callable assume same signature and call with tensors and get the shapes
output_shapes = output_shapes(*args, **kwargs)
flat_output_types = nest.flatten(output_types)
args = (args, kwargs)
flat_args = nest.flatten(args)
def python_function_wrapper(*py_args):
py_args, py_kwargs = nest.pack_sequence_as(args, py_args)
ret = func(*py_args, **py_kwargs)
# TODO(alextp): Catch Exceptions and improve msg, because tensorflow
# ist not able to preserve the traceback, i.e. the Exceptions does not
# contain any information where the Exception was raised.
nest.assert_shallow_structure(output_types, ret)
return nest.flatten(ret)
flat_values = _py_func(
python_function_wrapper,
flat_args,
flat_output_types,
stateful=stateful,
name=name)
if output_shapes is not None:
# I am not sure if this is nessesary
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
flattened_shapes = nest.flatten(output_shapes)
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
| apache-2.0 |
Sweetgrassbuffalo/ReactionSweeGrass-v2 | .meteor/local/dev_bundle/python/Lib/test/test_binop.py | 10 | 10741 | """Tests for binary operators on subtypes of built-in types."""
import unittest
from test import test_support
def gcd(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def isint(x):
"""Test whether an object is an instance of int or long."""
return isinstance(x, int) or isinstance(x, long)
def isnum(x):
"""Test whether an object is an instance of a built-in numeric type."""
for T in int, long, float, complex:
if isinstance(x, T):
return 1
return 0
def isRat(x):
"""Test wheter an object is an instance of the Rat class."""
return isinstance(x, Rat)
class Rat(object):
"""Rational number implemented as a normalized pair of longs."""
__slots__ = ['_Rat__num', '_Rat__den']
def __init__(self, num=0L, den=1L):
"""Constructor: Rat([num[, den]]).
The arguments must be ints or longs, and default to (0, 1)."""
if not isint(num):
raise TypeError, "Rat numerator must be int or long (%r)" % num
if not isint(den):
raise TypeError, "Rat denominator must be int or long (%r)" % den
# But the zero is always on
if den == 0:
raise ZeroDivisionError, "zero denominator"
g = gcd(den, num)
self.__num = long(num//g)
self.__den = long(den//g)
def _get_num(self):
"""Accessor function for read-only 'num' attribute of Rat."""
return self.__num
num = property(_get_num, None)
def _get_den(self):
"""Accessor function for read-only 'den' attribute of Rat."""
return self.__den
den = property(_get_den, None)
def __repr__(self):
"""Convert a Rat to a string resembling a Rat constructor call."""
return "Rat(%d, %d)" % (self.__num, self.__den)
def __str__(self):
"""Convert a Rat to a string resembling a decimal numeric value."""
return str(float(self))
def __float__(self):
"""Convert a Rat to a float."""
return self.__num*1.0/self.__den
def __int__(self):
"""Convert a Rat to an int; self.den must be 1."""
if self.__den == 1:
try:
return int(self.__num)
except OverflowError:
raise OverflowError, ("%s too large to convert to int" %
repr(self))
raise ValueError, "can't convert %s to int" % repr(self)
def __long__(self):
"""Convert a Rat to a long; self.den must be 1."""
if self.__den == 1:
return long(self.__num)
raise ValueError, "can't convert %s to long" % repr(self)
def __add__(self, other):
"""Add two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den + other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) + other
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den - other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) - other
return NotImplemented
def __rsub__(self, other):
"""Subtract two Rats, or a Rat and a number (reversed args)."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(other.__num*self.__den - self.__num*other.__den,
self.__den*other.__den)
if isnum(other):
return other - float(self)
return NotImplemented
def __mul__(self, other):
"""Multiply two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__num, self.__den*other.__den)
if isint(other):
return Rat(self.__num*other, self.__den)
if isnum(other):
return float(self)*other
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__den, self.__den*other.__num)
if isint(other):
return Rat(self.__num, self.__den*other)
if isnum(other):
return float(self) / other
return NotImplemented
__div__ = __truediv__
def __rtruediv__(self, other):
"""Divide two Rats, or a Rat and a number (reversed args)."""
if isRat(other):
return Rat(other.__num*self.__den, other.__den*self.__num)
if isint(other):
return Rat(other*self.__den, self.__num)
if isnum(other):
return other / float(self)
return NotImplemented
__rdiv__ = __rtruediv__
def __floordiv__(self, other):
"""Divide two Rats, returning the floored result."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self/other
return x.__num // x.__den
def __rfloordiv__(self, other):
"""Divide two Rats, returning the floored result (reversed args)."""
x = other/self
return x.__num // x.__den
def __divmod__(self, other):
"""Divide two Rats, returning quotient and remainder."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self//other
return (x, self - other * x)
def __rdivmod__(self, other):
"""Divide two Rats, returning quotient and remainder (reversed args)."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
return divmod(other, self)
def __mod__(self, other):
"""Take one Rat modulo another."""
return divmod(self, other)[1]
def __rmod__(self, other):
"""Take one Rat modulo another (reversed args)."""
return divmod(other, self)[1]
def __eq__(self, other):
"""Compare two Rats for equality."""
if isint(other):
return self.__den == 1 and self.__num == other
if isRat(other):
return self.__num == other.__num and self.__den == other.__den
if isnum(other):
return float(self) == other
return NotImplemented
def __ne__(self, other):
"""Compare two Rats for inequality."""
return not self == other
# Silence Py3k warning
__hash__ = None
class RatTestCase(unittest.TestCase):
"""Unit tests for Rat class and its support utilities."""
def test_gcd(self):
self.assertEqual(gcd(10, 12), 2)
self.assertEqual(gcd(10, 15), 5)
self.assertEqual(gcd(10, 11), 1)
self.assertEqual(gcd(100, 15), 5)
self.assertEqual(gcd(-10, 2), -2)
self.assertEqual(gcd(10, -2), 2)
self.assertEqual(gcd(-10, -2), -2)
for i in range(1, 20):
for j in range(1, 20):
self.assertTrue(gcd(i, j) > 0)
self.assertTrue(gcd(-i, j) < 0)
self.assertTrue(gcd(i, -j) > 0)
self.assertTrue(gcd(-i, -j) < 0)
def test_constructor(self):
a = Rat(10, 15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10L, 15L)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10, -15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, 15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, -15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(7)
self.assertEqual(a.num, 7)
self.assertEqual(a.den, 1)
try:
a = Rat(1, 0)
except ZeroDivisionError:
pass
else:
self.fail("Rat(1, 0) didn't raise ZeroDivisionError")
for bad in "0", 0.0, 0j, (), [], {}, None, Rat, unittest:
try:
a = Rat(bad)
except TypeError:
pass
else:
self.fail("Rat(%r) didn't raise TypeError" % bad)
try:
a = Rat(1, bad)
except TypeError:
pass
else:
self.fail("Rat(1, %r) didn't raise TypeError" % bad)
def test_add(self):
self.assertEqual(Rat(2, 3) + Rat(1, 3), 1)
self.assertEqual(Rat(2, 3) + 1, Rat(5, 3))
self.assertEqual(1 + Rat(2, 3), Rat(5, 3))
self.assertEqual(1.0 + Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) + 1.0, 1.5)
def test_sub(self):
self.assertEqual(Rat(7, 2) - Rat(7, 5), Rat(21, 10))
self.assertEqual(Rat(7, 5) - 1, Rat(2, 5))
self.assertEqual(1 - Rat(3, 5), Rat(2, 5))
self.assertEqual(Rat(3, 2) - 1.0, 0.5)
self.assertEqual(1.0 - Rat(1, 2), 0.5)
def test_mul(self):
self.assertEqual(Rat(2, 3) * Rat(5, 7), Rat(10, 21))
self.assertEqual(Rat(10, 3) * 3, 10)
self.assertEqual(3 * Rat(10, 3), 10)
self.assertEqual(Rat(10, 5) * 0.5, 1.0)
self.assertEqual(0.5 * Rat(10, 5), 1.0)
def test_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
def test_floordiv(self):
self.assertEqual(Rat(10) // Rat(4), 2)
self.assertEqual(Rat(10, 3) // Rat(4, 3), 2)
self.assertEqual(Rat(10) // 4, 2)
self.assertEqual(10 // Rat(4), 2)
def test_eq(self):
self.assertEqual(Rat(10), Rat(20, 2))
self.assertEqual(Rat(10), 10)
self.assertEqual(10, Rat(10))
self.assertEqual(Rat(10), 10.0)
self.assertEqual(10.0, Rat(10))
def test_future_div(self):
exec future_test
# XXX Ran out of steam; TO DO: divmod, div, future division
future_test = """
from __future__ import division
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
self.assertEqual(eval('1/2'), 0.5)
"""
def test_main():
test_support.run_unittest(RatTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
peastman/msmbuilder | msmbuilder/tests/test_kernel_approximation.py | 9 | 1158 | from __future__ import absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.kernel_approximation import Nystroem as NystroemR
from msmbuilder.decomposition.kernel_approximation import Nystroem, LandmarkNystroem
def test_nystroem_vs_sklearn():
np.random.seed(42)
X = np.random.randn(100, 5)
kernel = Nystroem(kernel='linear', random_state=42)
kernelR = NystroemR(kernel='linear', random_state=42)
y1 = kernel.fit_transform([X])[0]
y2 = kernelR.fit_transform(X)
assert_array_almost_equal(y1, y2)
def test_lndmrk_nystroem_approximation():
np.random.seed(42)
X = np.random.randn(100, 5)
u = np.arange(X.shape[0])[5::1]
v = np.arange(X.shape[0])[::1][:u.shape[0]]
lndmrks = X[np.unique((u, v))]
kernel = LandmarkNystroem(kernel='rbf', random_state=42)
kernelR = NystroemR(kernel='rbf', random_state=42)
y1_1 = kernel.fit_transform([X])[0]
kernel.landmarks = lndmrks
y1_2 = kernel.fit_transform([X])[0]
y2 = kernelR.fit_transform(X)
assert_array_almost_equal(y2, y1_1)
assert not all((np.abs(y2 - y1_2) > 1E-6).flatten())
| lgpl-2.1 |
jbeder/yaml-cpp.old-api | test/gmock-1.7.0/test/gmock_leak_test.py | 779 | 4384 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
environ = gmock_test_utils.environ
SetEnvVar = gmock_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gmock_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL,
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL,
env=environ).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1'],
env=environ).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
| mit |
dsparrow27/vortex | src/ds/vortex/nodes/comparison/equalTo.py | 1 | 1290 | from ds.vortex.core import baseNode
from ds.vortex.core import plug as plugs
class EqualToNode(baseNode.BaseNode):
def __init__(self, name):
"""
:param name: str, the name of the node
"""
baseNode.BaseNode.__init__(self, name)
def initialize(self):
baseNode.BaseNode.initialize(self)
self.outputPlug_ = plugs.OutputPlug("output", self)
self.addPlug(self.outputPlug_, clean=True)
self.value1Plug_ = plugs.InputPlug("value1", self, value=0)
self.value2Plug_ = plugs.InputPlug("value2", self, value=0)
self.addPlug(self.value1Plug_, clean=True)
self.addPlug(self.value2Plug_, clean=True)
self.plugAffects(self.value1Plug_, self.outputPlug_)
self.plugAffects(self.value2Plug_, self.outputPlug_)
def compute(self, requestPlug):
baseNode.BaseNode.compute(self, requestPlug=requestPlug)
if requestPlug != self.outputPlug_:
return None
result = self.value1Plug_ == self.value2Plug_.value
requestPlug.value = result
requestPlug.dirty = False
return result
def getNode():
"""General function that returns our node, used to get create our node via Ui etc
:return: Node instance
"""
return EqualToNode
| mit |
PhilLidar-DAD/geonode | geonode/maps/admin.py | 30 | 1784 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import autocomplete_light
from geonode.maps.models import Map, MapLayer, MapSnapshot
from geonode.base.admin import MediaTranslationAdmin, ResourceBaseAdminForm
from django.contrib import admin
class MapLayerInline(admin.TabularInline):
model = MapLayer
class MapAdminForm(ResourceBaseAdminForm):
class Meta:
model = Map
class MapAdmin(MediaTranslationAdmin):
inlines = [MapLayerInline, ]
list_display_links = ('title',)
list_display = ('id', 'title', 'owner',)
list_filter = ('owner', 'category',)
search_fields = ('title', 'abstract', 'purpose',)
form = MapAdminForm
class MapLayerAdmin(admin.ModelAdmin):
list_display = ('id', 'map', 'name')
list_filter = ('map',)
search_fields = ('map__title', 'name',)
form = autocomplete_light.modelform_factory(MapLayer)
admin.site.register(Map, MapAdmin)
admin.site.register(MapLayer, MapLayerAdmin)
admin.site.register(MapSnapshot)
| gpl-3.0 |
mclaughlin6464/pylearn2 | pylearn2/scripts/papers/jia_huang_wkshp_11/fit_final_model.py | 44 | 3913 | from __future__ import print_function
import numpy as np
from optparse import OptionParser
from pylearn2.models.independent_multiclass_logistic import IndependentMulticlassLogistic
from galatea.s3c.feature_loading import get_features
from pylearn2.utils import serial
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.cifar100 import CIFAR100
from theano.compat.six.moves import xrange
import gc
gc.collect()
def train(fold_train_X, fold_train_y, C):
model = IndependentMulticlassLogistic(C).fit(fold_train_X, fold_train_y)
gc.collect()
return model
def get_labels_and_fold_indices(cifar10, cifar100, stl10):
assert stl10 or cifar10 or cifar100
assert stl10+cifar10+cifar100 == 1
if stl10:
print('loading entire stl-10 train set just to get the labels and folds')
stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/train.pkl")
train_y = stl10.y
fold_indices = stl10.fold_indices
elif cifar10 or cifar100:
if cifar10:
print('loading entire cifar10 train set just to get the labels')
cifar = CIFAR10(which_set = 'train')
else:
assert cifar100
print('loading entire cifar100 train set just to get the labels')
cifar = CIFAR100(which_set = 'train')
cifar.y = cifar.y_fine
train_y = cifar.y
assert train_y is not None
fold_indices = np.zeros((5,40000),dtype='uint16')
idx_list = np.cast['uint16'](np.arange(1,50001)) #mimic matlab format of stl10
for i in xrange(5):
mask = idx_list < i * 10000 + 1
mask += idx_list >= (i+1) * 10000 + 1
fold_indices[i,:] = idx_list[mask]
assert fold_indices.min() == 1
assert fold_indices.max() == 50000
return train_y, fold_indices
def main(train_path,
out_path,
dataset,
standardize,
C,
**kwargs):
stl10 = dataset == 'stl10'
cifar10 = dataset == 'cifar10'
cifar100 = dataset == 'cifar100'
assert stl10 + cifar10 + cifar100 == 1
print('getting labels and oflds')
train_y, fold_indices = get_labels_and_fold_indices(cifar10, cifar100, stl10)
gc.collect()
assert train_y is not None
print('loading training features')
train_X = get_features(train_path, split = False, standardize = standardize)
assert str(train_X.dtype) == 'float32'
if stl10:
assert train_X.shape[0] == 5000
if cifar10 or cifar100:
assert train_X.shape[0] == 50000
assert train_y.shape == (50000,)
print('training model')
model = train(train_X, train_y, C)
print('saving model')
serial.save(out_path, model)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-d", "--train",
action="store", type="string", dest="train")
parser.add_option("-o", "--out",
action="store", type="string", dest="out")
parser.add_option('-C', type='float', dest='C', action='store', default = None)
parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)
parser.add_option('--standardize',action="store_true", dest="standardize", default=False)
parser.add_option('--fold', action='store', type='int', dest='fold', default = None)
#(options, args) = parser.parse_args()
#assert options.dataset is not None
#assert options.C is not None
#assert options.out is not None
#assert options.fold is not None
#log = open(options.out+'.log.txt','w')
#log.write('log file started succesfully\n')
#log.flush()
print('parsed the args')
main(train_path='features.npy',
out_path = 'final_model.pkl',
C = .01,
dataset = 'cifar100',
standardize = False,
#fold = options.fold,
#log = log
)
#log.close()
| bsd-3-clause |
kapari/django-oscar | tests/functional/dashboard/user_tests.py | 35 | 3451 | from django.core.urlresolvers import reverse
from django.core import mail
from django.utils.translation import ugettext_lazy as _
from webtest import AppError
from oscar.core.compat import get_user_model
from oscar.test.factories import UserFactory
from oscar.test.testcases import WebTestCase
User = get_user_model()
class IndexViewTests(WebTestCase):
is_staff = True
active_users_ids = []
inactive_users_ids = []
csrf_checks = False
def setUp(self):
super(IndexViewTests, self).setUp()
for i in range(1, 25):
UserFactory(is_active=True)
for i in range(1, 25):
UserFactory(is_active=False)
user_queryset = User.objects.all()
self.active_users_ids = user_queryset.filter(is_active=True).values_list('id', flat=True)
self.inactive_users_ids = user_queryset.filter(is_active=False).values_list('id', flat=True)
def test_user_list_view(self):
response = self.get(reverse('dashboard:users-index'))
self.assertInContext(response, 'users')
def test_make_active(self):
params = {'action': 'make_active',
'selected_user': self.inactive_users_ids}
response = self.post(reverse('dashboard:users-index'), params=params)
ex_inactive = User.objects.get(id=self.inactive_users_ids[10])
self.assertIsRedirect(response)
self.assertTrue(ex_inactive.is_active)
def test_make_inactive(self):
params = {'action': 'make_inactive',
'selected_user': self.active_users_ids}
response = self.post(reverse('dashboard:users-index'), params=params)
ex_active = User.objects.get(id=self.active_users_ids[10])
self.assertIsRedirect(response)
self.assertFalse(ex_active.is_active)
class DetailViewTests(WebTestCase):
is_staff = True
def test_user_detail_view(self):
response = self.get(reverse('dashboard:user-detail', kwargs={'pk': 1}))
self.assertInContext(response, 'user')
self.assertIsOk(response)
class TestDetailViewForStaffUser(WebTestCase):
is_staff = True
def setUp(self):
self.customer = UserFactory(
username='jane', email='jane@example.org', password='password')
super(TestDetailViewForStaffUser, self).setUp()
def test_password_reset_url_only_available_via_post(self):
try:
reset_url = reverse(
'dashboard:user-password-reset',
kwargs={'pk': self.customer.id}
)
self.get(reset_url)
except AppError as e:
self.assertIn('405', e.args[0])
def test_admin_can_reset_user_passwords(self):
customer_page_url = reverse(
'dashboard:user-detail',
kwargs={'pk': self.customer.id}
)
customer_page = self.get(customer_page_url)
reset_form = customer_page.forms['password_reset_form']
response = reset_form.submit()
# Check that the staff user is redirected back to the customer page
self.assertRedirects(response, customer_page_url)
# Check that the reset email has been sent
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Resetting your password", mail.outbox[0].subject)
# Check that success message shows up
self.assertContains(
response.follow(),
_("A password reset email has been sent")
)
| bsd-3-clause |
teltek/edx-platform | common/test/acceptance/tests/studio/test_studio_settings_details.py | 5 | 8450 | """
Acceptance tests for Studio's Settings Details pages
"""
from datetime import datetime, timedelta
from common.test.acceptance.fixtures.config import ConfigModelFixture
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.tests.helpers import (
element_has_text,
generate_course_key,
is_option_value_selected,
select_option_by_value
)
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
class StudioSettingsDetailsTest(StudioCourseTest):
"""Base class for settings and details page tests."""
shard = 4
def setUp(self, is_staff=True):
super(StudioSettingsDetailsTest, self).setUp(is_staff=is_staff)
self.settings_detail = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Before every test, make sure to visit the page first
self.settings_detail.visit()
class SettingsMilestonesTest(StudioSettingsDetailsTest):
"""
Tests for milestones feature in Studio's settings tab
"""
shard = 4
def test_page_has_prerequisite_field(self):
"""
Test to make sure page has pre-requisite course field if milestones app is enabled.
"""
self.assertTrue(self.settings_detail.pre_requisite_course_options)
def test_prerequisite_course_save_successfully(self):
"""
Scenario: Selecting course from Pre-Requisite course drop down save the selected course as pre-requisite
course.
Given that I am on the Schedule & Details page on studio
When I select an item in pre-requisite course drop down and click Save Changes button
Then My selected item should be saved as pre-requisite course
And My selected item should be selected after refreshing the page.'
"""
course_number = self.unique_id
CourseFixture(
org='test_org',
number=course_number,
run='test_run',
display_name='Test Course' + course_number
).install()
pre_requisite_course_key = generate_course_key(
org='test_org',
number=course_number,
run='test_run'
)
pre_requisite_course_id = unicode(pre_requisite_course_key)
# Refresh the page to load the new course fixture and populate the prrequisite course dropdown
# Then select the prerequisite course and save the changes
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again and confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
))
# Set the prerequisite course back to None and save the changes
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the None selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
))
# Re-pick the prerequisite course and confirm no errors are thrown (covers a discovered bug)
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
dropdown_status = is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.assertTrue(dropdown_status)
def test_page_has_enable_entrance_exam_field(self):
"""
Test to make sure page has 'enable entrance exam' field.
"""
self.assertTrue(self.settings_detail.entrance_exam_field)
def test_entrance_exam_has_unit_button(self):
"""
Test that entrance exam should be created after checking the 'enable entrance exam' checkbox.
And user has option to add units only instead of any Subsection.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
course_outline_page.wait_for_ajax()
# button with text 'New Unit' should be present.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Unit'
))
# button with text 'New Subsection' should not be present.
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Subsection'
))
class CoursePacingTest(StudioSettingsDetailsTest):
"""Tests for setting a course to self-paced."""
shard = 4
def populate_course_fixture(self, __):
ConfigModelFixture('/config/self_paced', {'enabled': True}).install()
# Set the course start date to tomorrow in order to allow setting pacing
self.course_fixture.add_course_details({'start_date': datetime.now() + timedelta(days=1)})
def test_default_instructor_paced(self):
"""
Test that the 'instructor paced' button is checked by default.
"""
self.assertEqual(self.settings_detail.course_pacing, 'Instructor-Paced')
def test_self_paced(self):
"""
Test that the 'self-paced' button is checked for a self-paced
course.
"""
self.course_fixture.add_course_details({
'self_paced': True
})
self.course_fixture.configure_course()
self.settings_detail.refresh_page()
self.assertEqual(self.settings_detail.course_pacing, 'Self-Paced')
def test_set_self_paced(self):
"""
Test that the self-paced option is persisted correctly.
"""
self.settings_detail.course_pacing = 'Self-Paced'
self.settings_detail.save_changes()
self.settings_detail.refresh_page()
self.assertEqual(self.settings_detail.course_pacing, 'Self-Paced')
def test_toggle_pacing_after_course_start(self):
"""
Test that course authors cannot toggle the pacing of their course
while the course is running.
"""
self.course_fixture.add_course_details({'start_date': datetime.now()})
self.course_fixture.configure_course()
self.settings_detail.refresh_page()
self.assertTrue(self.settings_detail.course_pacing_disabled())
self.assertIn('Course pacing cannot be changed', self.settings_detail.course_pacing_disabled_text)
| agpl-3.0 |
Noltari/lede | scripts/dl_cleanup.py | 223 | 6094 | #!/usr/bin/env python3
"""
# OpenWrt download directory cleanup utility.
# Delete all but the very last version of the program tarballs.
#
# Copyright (C) 2010-2015 Michael Buesch <m@bues.ch>
# Copyright (C) 2013-2015 OpenWrt.org
"""
from __future__ import print_function
import sys
import os
import re
import getopt
# Commandline options
opt_dryrun = False
def parseVer_1234(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
(int(match.group(5)) << 16)
return (progname, progversion)
def parseVer_123(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(5)
except IndexError as e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
patchlevel
return (progname, progversion)
def parseVer_12(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(4)
except IndexError as e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
patchlevel
return (progname, progversion)
def parseVer_r(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64)
return (progname, progversion)
def parseVer_ymd(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32)
return (progname, progversion)
def parseVer_GIT(match, filepath):
progname = match.group(1)
st = os.stat(filepath)
progversion = int(st.st_mtime) << 64
return (progname, progversion)
extensions = (
".tar.gz",
".tar.bz2",
".tar.xz",
".orig.tar.gz",
".orig.tar.bz2",
".orig.tar.xz",
".zip",
".tgz",
".tbz",
".txz",
)
versionRegex = (
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4
(re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD
(re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a
(re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3
(re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a
(re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111
)
blacklist = [
("linux", re.compile(r"linux-\d.*")),
("gcc", re.compile(r"gcc-.*")),
("wl_apsta", re.compile(r"wl_apsta.*")),
(".fw", re.compile(r".*\.fw")),
(".arm", re.compile(r".*\.arm")),
(".bin", re.compile(r".*\.bin")),
("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")),
]
class EntryParseError(Exception): pass
class Entry:
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.progname = ""
self.fileext = ""
for ext in extensions:
if filename.endswith(ext):
filename = filename[0:0-len(ext)]
self.fileext = ext
break
else:
print(self.filename, "has an unknown file-extension")
raise EntryParseError("ext")
for (regex, parseVersion) in versionRegex:
match = regex.match(filename)
if match:
(self.progname, self.version) = parseVersion(
match, directory + "/" + filename + self.fileext)
break
else:
print(self.filename, "has an unknown version pattern")
raise EntryParseError("ver")
def getPath(self):
return (self.directory + "/" + self.filename).replace("//", "/")
def deleteFile(self):
path = self.getPath()
print("Deleting", path)
if not opt_dryrun:
os.unlink(path)
def __ge__(self, y):
return self.version >= y.version
def usage():
print("OpenWrt download directory cleanup utility")
print("Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>")
print("")
print(" -d|--dry-run Do a dry-run. Don't delete any files")
print(" -B|--show-blacklist Show the blacklist and exit")
print(" -w|--whitelist ITEM Remove ITEM from blacklist")
def main(argv):
global opt_dryrun
try:
(opts, args) = getopt.getopt(argv[1:],
"hdBw:",
[ "help", "dry-run", "show-blacklist", "whitelist=", ])
if len(args) != 1:
usage()
return 1
except getopt.GetoptError as e:
usage()
return 1
directory = args[0]
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-d", "--dry-run"):
opt_dryrun = True
if o in ("-w", "--whitelist"):
for i in range(0, len(blacklist)):
(name, regex) = blacklist[i]
if name == v:
del blacklist[i]
break
else:
print("Whitelist error: Item", v,\
"is not in blacklist")
return 1
if o in ("-B", "--show-blacklist"):
for (name, regex) in blacklist:
sep = "\t\t"
if len(name) >= 8:
sep = "\t"
print("%s%s(%s)" % (name, sep, regex.pattern))
return 0
# Create a directory listing and parse the file names.
entries = []
for filename in os.listdir(directory):
if filename == "." or filename == "..":
continue
for (name, regex) in blacklist:
if regex.match(filename):
if opt_dryrun:
print(filename, "is blacklisted")
break
else:
try:
entries.append(Entry(directory, filename))
except EntryParseError as e:
pass
# Create a map of programs
progmap = {}
for entry in entries:
if entry.progname in progmap.keys():
progmap[entry.progname].append(entry)
else:
progmap[entry.progname] = [entry,]
# Traverse the program map and delete everything but the last version
for prog in progmap:
lastVersion = None
versions = progmap[prog]
for version in versions:
if lastVersion is None or version >= lastVersion:
lastVersion = version
if lastVersion:
for version in versions:
if version is not lastVersion:
version.deleteFile()
if opt_dryrun:
print("Keeping", lastVersion.getPath())
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-2.0 |
pschmitt/home-assistant | homeassistant/components/coronavirus/config_flow.py | 7 | 1406 | """Config flow for Coronavirus integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries
from . import get_coordinator
from .const import DOMAIN, OPTION_WORLDWIDE # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Coronavirus."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
_options = None
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if self._options is None:
self._options = {OPTION_WORLDWIDE: "Worldwide"}
coordinator = await get_coordinator(self.hass)
for case in sorted(
coordinator.data.values(), key=lambda case: case.country
):
self._options[case.country] = case.country
if user_input is not None:
await self.async_set_unique_id(user_input["country"])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=self._options[user_input["country"]], data=user_input
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Required("country"): vol.In(self._options)}),
errors=errors,
)
| apache-2.0 |
ojengwa/oh-mainline | vendor/packages/twisted/twisted/test/testutils.py | 110 | 1525 | from cStringIO import StringIO
from twisted.internet.protocol import FileWrapper
class IOPump:
"""Utility to pump data between clients and servers for protocol testing.
Perhaps this is a utility worthy of being in protocol.py?
"""
def __init__(self, client, server, clientIO, serverIO):
self.client = client
self.server = server
self.clientIO = clientIO
self.serverIO = serverIO
def flush(self):
"Pump until there is no more input or output."
while self.pump():
pass
def pump(self):
"""Move data back and forth.
Returns whether any data was moved.
"""
self.clientIO.seek(0)
self.serverIO.seek(0)
cData = self.clientIO.read()
sData = self.serverIO.read()
self.clientIO.seek(0)
self.serverIO.seek(0)
self.clientIO.truncate()
self.serverIO.truncate()
for byte in cData:
self.server.dataReceived(byte)
for byte in sData:
self.client.dataReceived(byte)
if cData or sData:
return 1
else:
return 0
def returnConnected(server, client):
"""Take two Protocol instances and connect them.
"""
cio = StringIO()
sio = StringIO()
client.makeConnection(FileWrapper(cio))
server.makeConnection(FileWrapper(sio))
pump = IOPump(client, server, cio, sio)
# Challenge-response authentication:
pump.flush()
# Uh...
pump.flush()
return pump
| agpl-3.0 |
wbrefvem/heroku-buildpack-python | vendor/pip-pop/pip/_vendor/html5lib/serializer/htmlserializer.py | 436 | 12855 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| mit |
jarshwah/django | tests/migrate_signals/tests.py | 22 | 5400 | from django.apps import apps
from django.core import management
from django.db import migrations
from django.db.models import signals
from django.test import TransactionTestCase, override_settings
from django.utils import six
APP_CONFIG = apps.get_app_config('migrate_signals')
SIGNAL_ARGS = ['app_config', 'verbosity', 'interactive', 'using', 'plan', 'apps']
MIGRATE_DATABASE = 'default'
MIGRATE_VERBOSITY = 1
MIGRATE_INTERACTIVE = False
class Receiver(object):
def __init__(self, signal):
self.call_counter = 0
self.call_args = None
signal.connect(self, sender=APP_CONFIG)
def __call__(self, signal, sender, **kwargs):
self.call_counter += 1
self.call_args = kwargs
class OneTimeReceiver(object):
"""
Special receiver for handle the fact that test runner calls migrate for
several databases and several times for some of them.
"""
def __init__(self, signal):
self.signal = signal
self.call_counter = 0
self.call_args = None
self.signal.connect(self, sender=APP_CONFIG)
def __call__(self, signal, sender, **kwargs):
# Although test runner calls migrate for several databases,
# testing for only one of them is quite sufficient.
if kwargs['using'] == MIGRATE_DATABASE:
self.call_counter += 1
self.call_args = kwargs
# we need to test only one call of migrate
self.signal.disconnect(self, sender=APP_CONFIG)
# We connect receiver here and not in unit test code because we need to
# connect receiver before test runner creates database. That is, sequence of
# actions would be:
#
# 1. Test runner imports this module.
# 2. We connect receiver.
# 3. Test runner calls migrate for create default database.
# 4. Test runner execute our unit test code.
pre_migrate_receiver = OneTimeReceiver(signals.pre_migrate)
post_migrate_receiver = OneTimeReceiver(signals.post_migrate)
class MigrateSignalTests(TransactionTestCase):
available_apps = ['migrate_signals']
def test_call_time(self):
self.assertEqual(pre_migrate_receiver.call_counter, 1)
self.assertEqual(post_migrate_receiver.call_counter, 1)
def test_args(self):
pre_migrate_receiver = Receiver(signals.pre_migrate)
post_migrate_receiver = Receiver(signals.post_migrate)
management.call_command(
'migrate', database=MIGRATE_DATABASE, verbosity=MIGRATE_VERBOSITY,
interactive=MIGRATE_INTERACTIVE, stdout=six.StringIO(),
)
for receiver in [pre_migrate_receiver, post_migrate_receiver]:
args = receiver.call_args
self.assertEqual(receiver.call_counter, 1)
self.assertEqual(set(args), set(SIGNAL_ARGS))
self.assertEqual(args['app_config'], APP_CONFIG)
self.assertEqual(args['verbosity'], MIGRATE_VERBOSITY)
self.assertEqual(args['interactive'], MIGRATE_INTERACTIVE)
self.assertEqual(args['using'], 'default')
self.assertEqual(args['plan'], [])
self.assertIsInstance(args['apps'], migrations.state.StateApps)
@override_settings(MIGRATION_MODULES={'migrate_signals': 'migrate_signals.custom_migrations'})
def test_migrations_only(self):
"""
If all apps have migrations, migration signals should be sent.
"""
pre_migrate_receiver = Receiver(signals.pre_migrate)
post_migrate_receiver = Receiver(signals.post_migrate)
stdout = six.StringIO()
management.call_command(
'migrate', database=MIGRATE_DATABASE, verbosity=MIGRATE_VERBOSITY,
interactive=MIGRATE_INTERACTIVE, stdout=stdout,
)
for receiver in [pre_migrate_receiver, post_migrate_receiver]:
args = receiver.call_args
self.assertEqual(receiver.call_counter, 1)
self.assertEqual(set(args), set(SIGNAL_ARGS))
self.assertEqual(args['app_config'], APP_CONFIG)
self.assertEqual(args['verbosity'], MIGRATE_VERBOSITY)
self.assertEqual(args['interactive'], MIGRATE_INTERACTIVE)
self.assertEqual(args['using'], 'default')
self.assertIsInstance(args['plan'][0][0], migrations.Migration)
# The migration isn't applied backward.
self.assertFalse(args['plan'][0][1])
self.assertIsInstance(args['apps'], migrations.state.StateApps)
self.assertEqual(pre_migrate_receiver.call_args['apps'].get_models(), [])
self.assertEqual(
[model._meta.label for model in post_migrate_receiver.call_args['apps'].get_models()],
['migrate_signals.Signal']
)
# Migrating with an empty plan.
pre_migrate_receiver = Receiver(signals.pre_migrate)
post_migrate_receiver = Receiver(signals.post_migrate)
management.call_command(
'migrate', database=MIGRATE_DATABASE, verbosity=MIGRATE_VERBOSITY,
interactive=MIGRATE_INTERACTIVE, stdout=stdout,
)
self.assertEqual(
[model._meta.label for model in pre_migrate_receiver.call_args['apps'].get_models()],
['migrate_signals.Signal']
)
self.assertEqual(
[model._meta.label for model in post_migrate_receiver.call_args['apps'].get_models()],
['migrate_signals.Signal']
)
| bsd-3-clause |
ge0rgi/cinder | cinder/tests/unit/volume/drivers/violin/test_v7000_fcp.py | 7 | 20755 | # Copyright 2015 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Violin Memory 7000 Series All-Flash Array Fibrechannel Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.violin \
import fake_vmem_client as vmemclient
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v7000_common
from cinder.volume.drivers.violin import v7000_fcp
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant",
'wwpns': ['50014380186b3f65', '50014380186b3f67'],
}
FC_TARGET_WWPNS = [
'31000024ff45fb22', '21000024ff45fb23',
'51000024ff45f1be', '41000024ff45f1bf'
]
FC_INITIATOR_WWPNS = [
'50014380186b3f65', '50014380186b3f67'
]
FC_FABRIC_MAP = {
'fabricA':
{'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]},
'fabricB':
{'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]}
}
FC_INITIATOR_TARGET_MAP = {
FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
}
PHY_DEVICES_RESPONSE = {
'data':
{'physical_devices':
[{'availsize': 1099504287744,
'availsize_mb': 524284,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '3cc4d6dd-166d-77d2-4967-00005463f597',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN01 v1.0',
'is_foreign': True,
'name': 'BKSC:OTHDISK-MFCN01.000',
'object_id': '84b834fb-1f4d-5d3b-b7ae-5796f9868151',
'owner': 'example.com',
'pool': None,
'product': 'OTHDISK-MFCN01',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '0',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 1048569,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN01 v1.0-0-0-00'},
{'availsize': 1099504287744,
'availsize_mb': 524284,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '283b2694-192b-4745-6768-00005463f673',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN08 v1.0',
'is_foreign': False,
'name': 'BKSC:OTHDISK-MFCN08.000',
'object_id': '8555b888-bf43-5083-a433-f0c7b0282370',
'owner': 'example.com',
'pool':
{'name': 'mga-pool',
'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
'product': 'OTHDISK-MFCN08',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '11',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 1048569,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN08 v1.0-0-0-00'},
{'availsize': 1099504287744,
'availsize_mb': 1048569,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '7f47db19-019c-707d-0df1-00005463f949',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN09 v1.0',
'is_foreign': False,
'name': 'BKSC:OTHDISK-MFCN09.000',
'object_id': '62a98898-f8b8-5837-af2b-764f5a72e291',
'owner': 'a.b.c.d',
'pool':
{'name': 'mga-pool',
'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
'product': 'OTHDISK-MFCN09',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '12',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 524284,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN09 v1.0-0-0-00'}],
'total_physical_devices': 3},
'msg': 'Successful',
'success': True
}
# The FC_INFO dict returned by the backend is keyed on
# object_id of the FC adapter and the values are the
# wwmns
FC_INFO = {
'1a3cdb6a-383d-5ba6-a50b-4ba598074510': ['2100001b9745e25e'],
'4a6bc10a-5547-5cc0-94f2-76222a8f8dff': ['2100001b9745e230'],
'b21bfff5-d89e-51ff-9920-d990a061d722': ['2100001b9745e25f'],
'b508cc6b-f78a-51f9-81cf-47c1aaf53dd1': ['2100001b9745e231']
}
CLIENT_INFO = {
'FCPolicy':
{'AS400enabled': False,
'VSAenabled': False,
'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
'50-01-43-80-18-6b-3f-64']},
'FibreChannelDevices':
[{'access': 'ReadWrite',
'id': 'v0000004',
'initiatorWWPN': '*',
'lun': '8',
'name': 'abcdabcd-1234-abcd-1234-abcdeffedcba',
'sizeMB': 10240,
'targetWWPN': '*',
'type': 'SAN'}]
}
CLIENT_INFO1 = {
'FCPolicy':
{'AS400enabled': False,
'VSAenabled': False,
'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
'50-01-43-80-18-6b-3f-64']},
'FibreChannelDevices': []
}
class V7000FCPDriverTestCase(test.TestCase):
"""Test cases for VMEM FCP driver."""
def setUp(self):
super(V7000FCPDriverTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v7000_fcp.V7000FCPDriver(configuration=self.conf)
self.driver.common.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.driver.gateway_fc_wwns = FC_TARGET_WWPNS
self.stats = {}
self.driver.set_initialized()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v7000_fcp'
config.san_ip = '8.8.8.8'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.request_timeout = 300
config.container = 'myContainer'
return config
def setup_mock_concerto(self, m_conf=None):
"""Create a fake Concerto communication object."""
_m_concerto = mock.Mock(name='Concerto',
version='1.1.1',
spec=vmemclient.mock_client_conf)
if m_conf:
_m_concerto.configure_mock(**m_conf)
return _m_concerto
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
def test_check_for_setup_error(self, m_setup_func):
"""No setup errors are found."""
result = self.driver.check_for_setup_error()
m_setup_func.assert_called_with()
self.assertIsNone(result)
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
def test_check_for_setup_error_no_wwn_config(self, m_setup_func):
"""No wwns were found during setup."""
self.driver.gateway_fc_wwns = []
failure = exception.ViolinInvalidBackendConfig
self.assertRaises(failure, self.driver.check_for_setup_error)
def test_create_volume(self):
"""Volume created successfully."""
self.driver.common._create_lun = mock.Mock()
result = self.driver.create_volume(VOLUME)
self.driver.common._create_lun.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_create_volume_from_snapshot(self):
self.driver.common._create_volume_from_snapshot = mock.Mock()
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.driver.common._create_volume_from_snapshot.assert_called_with(
SNAPSHOT, VOLUME)
self.assertIsNone(result)
def test_create_cloned_volume(self):
self.driver.common._create_lun_from_lun = mock.Mock()
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
self.driver.common._create_lun_from_lun.assert_called_with(
SRC_VOL, VOLUME)
self.assertIsNone(result)
def test_delete_volume(self):
"""Volume deleted successfully."""
self.driver.common._delete_lun = mock.Mock()
result = self.driver.delete_volume(VOLUME)
self.driver.common._delete_lun.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_extend_volume(self):
"""Volume extended successfully."""
new_size = 10
self.driver.common._extend_lun = mock.Mock()
result = self.driver.extend_volume(VOLUME, new_size)
self.driver.common._extend_lun.assert_called_with(VOLUME, new_size)
self.assertIsNone(result)
def test_create_snapshot(self):
self.driver.common._create_lun_snapshot = mock.Mock()
result = self.driver.create_snapshot(SNAPSHOT)
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertIsNone(result)
def test_delete_snapshot(self):
self.driver.common._delete_lun_snapshot = mock.Mock()
result = self.driver.delete_snapshot(SNAPSHOT)
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertIsNone(result)
def test_get_volume_stats(self):
self.driver._update_volume_stats = mock.Mock()
self.driver._update_volume_stats()
result = self.driver.get_volume_stats(True)
self.driver._update_volume_stats.assert_called_with()
self.assertEqual(self.driver.stats, result)
@mock.patch('socket.gethostbyaddr')
def test_update_volume_stats(self, mock_gethost):
"""Test Update Volume Stats.
Makes a mock query to the backend to collect stats on all physical
devices.
"""
def gethostbyaddr(addr):
if addr == '8.8.8.8' or addr == 'example.com':
return ('example.com', [], ['8.8.8.8'])
else:
return ('a.b.c.d', [], addr)
mock_gethost.side_effect = gethostbyaddr
backend_name = self.conf.volume_backend_name
vendor_name = "Violin Memory, Inc."
tot_gb = 2046
free_gb = 1022
phy_devices = "/batch/physicalresource/physicaldevice"
conf = {
'basic.get.side_effect': [PHY_DEVICES_RESPONSE, ],
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._update_volume_stats()
calls = [mock.call(phy_devices)]
self.driver.common.vmem_mg.basic.get.assert_has_calls(calls)
self.assertEqual(tot_gb, self.driver.stats['total_capacity_gb'])
self.assertEqual(free_gb, self.driver.stats['free_capacity_gb'])
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
self.assertIsNone(result)
def test_get_active_fc_targets(self):
"""Test Get Active FC Targets.
Makes a mock query to the backend to collect all the physical
adapters and extract the WWNs.
"""
conf = {
'adapter.get_fc_info.return_value': FC_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._get_active_fc_targets()
self.assertEqual({'2100001b9745e230', '2100001b9745e25f',
'2100001b9745e231', '2100001b9745e25e'},
set(result))
def test_initialize_connection(self):
lun_id = 1
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
conf = {
'client.create_client.return_value': None,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._export_lun = mock.Mock(return_value=lun_id)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.common.vmem_mg.client.create_client.assert_called_with(
name=CONNECTOR['host'], proto='FC', fc_wwns=CONNECTOR['wwpns'])
self.driver._export_lun.assert_called_with(VOLUME, CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertTrue(props['data']['target_discovered'])
self.assertEqual(self.driver.gateway_fc_wwns,
props['data']['target_wwn'])
self.assertEqual(lun_id, props['data']['target_lun'])
def test_terminate_connection(self):
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._unexport_lun = mock.Mock()
self.driver._is_initiator_connected_to_array = mock.Mock(
return_value=False)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver._unexport_lun.assert_called_with(VOLUME, CONNECTOR)
self.driver._is_initiator_connected_to_array.assert_called_with(
CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_export_lun(self):
lun_id = '1'
response = {'success': True, 'msg': 'Assign SAN client successfully'}
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver.common._send_cmd_and_verify = mock.Mock(
return_value=response)
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._export_lun(VOLUME, CONNECTOR)
self.driver.common._send_cmd_and_verify.assert_called_with(
self.driver.common.vmem_mg.lun.assign_lun_to_client,
self.driver._is_lun_id_ready,
'Assign SAN client successfully',
[VOLUME['id'], CONNECTOR['host'], "ReadWrite"],
[VOLUME['id'], CONNECTOR['host']])
self.driver._get_lun_id.assert_called_with(
VOLUME['id'], CONNECTOR['host'])
self.assertEqual(lun_id, result)
def test_export_lun_fails_with_exception(self):
lun_id = '1'
response = {'status': False, 'msg': 'Generic error'}
failure = exception.ViolinBackendErr
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd_and_verify = mock.Mock(
side_effect=exception.ViolinBackendErr(response['msg']))
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
self.assertRaises(failure, self.driver._export_lun, VOLUME, CONNECTOR)
def test_unexport_lun(self):
response = {'success': True, 'msg': 'Unassign SAN client successfully'}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd = mock.Mock(
return_value=response)
result = self.driver._unexport_lun(VOLUME, CONNECTOR)
self.driver.common._send_cmd.assert_called_with(
self.driver.common.vmem_mg.lun.unassign_client_lun,
"Unassign SAN client successfully",
VOLUME['id'], CONNECTOR['host'], True)
self.assertIsNone(result)
def test_get_lun_id(self):
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host'])
self.assertEqual(8, result)
def test_is_lun_id_ready(self):
lun_id = '1'
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._is_lun_id_ready(
VOLUME['id'], CONNECTOR['host'])
self.assertTrue(result)
def test_build_initiator_target_map(self):
"""Successfully build a map when zoning is enabled."""
expected_targ_wwns = FC_TARGET_WWPNS
self.driver.lookup_service = mock.Mock()
(self.driver.lookup_service.get_device_mapping_from_network.
return_value) = FC_FABRIC_MAP
result = self.driver._build_initiator_target_map(CONNECTOR)
(targ_wwns, init_targ_map) = result
(self.driver.lookup_service.get_device_mapping_from_network.
assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns))
self.assertEqual(set(expected_targ_wwns), set(targ_wwns))
i = FC_INITIATOR_WWPNS[0]
self.assertIn(FC_TARGET_WWPNS[0], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[1], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
i = FC_INITIATOR_WWPNS[1]
self.assertIn(FC_TARGET_WWPNS[2], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[3], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
self.assertEqual(2, len(init_targ_map))
def test_build_initiator_target_map_no_lookup_service(self):
"""Successfully build a map when zoning is disabled."""
expected_targ_wwns = FC_TARGET_WWPNS
expected_init_targ_map = {
CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS,
CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS
}
self.driver.lookup_service = None
targ_wwns, init_targ_map = self.driver._build_initiator_target_map(
CONNECTOR)
self.assertEqual(expected_targ_wwns, targ_wwns)
self.assertEqual(expected_init_targ_map, init_targ_map)
def test_is_initiator_connected_to_array(self):
"""Successfully finds an initiator with remaining active session."""
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.assertTrue(self.driver._is_initiator_connected_to_array(
CONNECTOR))
self.driver.common.vmem_mg.client.get_client_info.assert_called_with(
CONNECTOR['host'])
def test_is_initiator_connected_to_array_empty_response(self):
"""Successfully finds no initiators with remaining active sessions."""
conf = {
'client.get_client_info.return_value': CLIENT_INFO1
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.assertFalse(self.driver._is_initiator_connected_to_array(
CONNECTOR))
| apache-2.0 |
CyanogenMod/android_kernel_zte_msm8994 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
omarkhan/ansible-modules-core | cloud/openstack/os_floating_ip.py | 103 | 6540 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Author: Davide Guerri <davide.guerri@hp.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_floating_ip
version_added: "2.0"
short_description: Add/Remove floating IP from an instance
extends_documentation_fragment: openstack
description:
- Add or Remove a floating IP to an instance
options:
server:
description:
- The name or ID of the instance to which the IP address
should be assigned.
required: true
network:
description:
- The name or ID of a neutron external network or a nova pool name.
required: false
floating_ip_address:
description:
- A floating IP address to attach or to detach. Required only if state
is absent. When state is present can be used to specify a IP address
to attach.
required: false
reuse:
description:
- When state is present, and floating_ip_address is not present,
this parameter can be used to specify whether we should try to reuse
a floating IP address already allocated to the project.
required: false
default: false
fixed_address:
description:
- To which fixed IP of server the floating IP address should be
attached to.
required: false
wait:
description:
- When attaching a floating IP address, specify whether we should
wait for it to appear as attached.
required: false
default: false
timeout:
description:
- Time to wait for an IP address to appear as attached. See wait.
required: false
default: 60
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
required: false
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Assign a floating IP to the fist interface of `cattle001` from an exiting
# external network or nova pool. A new floating IP from the first available
# external network is allocated to the project.
- os_floating_ip:
cloud: dguerri
server: cattle001
# Assign a new floating IP to the instance fixed ip `192.0.2.3` of
# `cattle001`. If a free floating IP is already allocated to the project, it is
# reused; if not, a new one is created.
- os_floating_ip:
cloud: dguerri
state: present
reuse: yes
server: cattle001
network: ext_net
fixed_address: 192.0.2.3
wait: true
timeout: 180
# Detach a floating IP address from a server
- os_floating_ip:
cloud: dguerri
state: absent
floating_ip_address: 203.0.113.2
server: cattle001
'''
def _get_floating_ip(cloud, floating_ip_address):
f_ips = cloud.search_floating_ips(
filters={'floating_ip_address': floating_ip_address})
if not f_ips:
return None
return f_ips[0]
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
network=dict(required=False),
floating_ip_address=dict(required=False),
reuse=dict(required=False, type='bool', default=False),
fixed_address=dict(required=False),
wait=dict(required=False, type='bool', default=False),
timeout=dict(required=False, type='int', default=60),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
server_name_or_id = module.params['server']
state = module.params['state']
network = module.params['network']
floating_ip_address = module.params['floating_ip_address']
reuse = module.params['reuse']
fixed_address = module.params['fixed_address']
wait = module.params['wait']
timeout = module.params['timeout']
cloud = shade.openstack_cloud(**module.params)
try:
server = cloud.get_server(server_name_or_id)
if server is None:
module.fail_json(
msg="server {0} not found".format(server_name_or_id))
if state == 'present':
if floating_ip_address is None:
if reuse:
f_ip = cloud.available_floating_ip(network=network)
else:
f_ip = cloud.create_floating_ip(network=network)
else:
f_ip = _get_floating_ip(cloud, floating_ip_address)
if f_ip is None:
module.fail_json(
msg="floating IP {0} not found".format(
floating_ip_address))
cloud.attach_ip_to_server(
server_id=server['id'], floating_ip_id=f_ip['id'],
fixed_address=fixed_address, wait=wait, timeout=timeout)
# Update the floating IP status
f_ip = cloud.get_floating_ip(id=f_ip['id'])
module.exit_json(changed=True, floating_ip=f_ip)
elif state == 'absent':
if floating_ip_address is None:
module.fail_json(msg="floating_ip_address is required")
f_ip = _get_floating_ip(cloud, floating_ip_address)
cloud.detach_ip_from_server(
server_id=server['id'], floating_ip_id=f_ip['id'])
# Update the floating IP status
f_ip = cloud.get_floating_ip(id=f_ip['id'])
module.exit_json(changed=True, floating_ip=f_ip)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
bigdatauniversity/edx-platform | common/lib/xmodule/xmodule/x_module.py | 10 | 70810 | import logging
import os
import sys
import time
import yaml
from contracts import contract, new_contract
from functools import partial
from lxml import etree
from collections import namedtuple
from pkg_resources import (
resource_exists,
resource_listdir,
resource_string,
resource_isdir,
)
from webob import Response
from webob.multidict import MultiDict
from lazy import lazy
from xblock.core import XBlock, XBlockAside
from xblock.fields import (
Scope, Integer, Float, List,
String, Dict, ScopeIds, Reference, ReferenceList,
ReferenceValueDict, UserScope
)
from xblock.fragment import Fragment
from xblock.runtime import Runtime, IdReader, IdGenerator
from xmodule import course_metadata_utils
from xmodule.fields import RelativeTime
from xmodule.errortracker import exc_info_to_str
from xmodule.modulestore.exceptions import ItemNotFoundError
from opaque_keys.edx.keys import UsageKey
from opaque_keys.edx.asides import AsideUsageKeyV1, AsideDefinitionKeyV1
from xmodule.exceptions import UndefinedContext
import dogstats_wrapper as dog_stats_api
log = logging.getLogger(__name__)
XMODULE_METRIC_NAME = 'edxapp.xmodule'
XMODULE_DURATION_METRIC_NAME = XMODULE_METRIC_NAME + '.duration'
XMODULE_METRIC_SAMPLE_RATE = 0.1
# Stats event sent to DataDog in order to determine if old XML parsing can be deprecated.
DEPRECATION_VSCOMPAT_EVENT = 'deprecation.vscompat'
# xblock view names
# This is the view that will be rendered to display the XBlock in the LMS.
# It will also be used to render the block in "preview" mode in Studio, unless
# the XBlock also implements author_view.
STUDENT_VIEW = 'student_view'
# An optional view of the XBlock similar to student_view, but with possible inline
# editing capabilities. This view differs from studio_view in that it should be as similar to student_view
# as possible. When previewing XBlocks within Studio, Studio will prefer author_view to student_view.
AUTHOR_VIEW = 'author_view'
# The view used to render an editor in Studio. The editor rendering can be completely different
# from the LMS student_view, and it is only shown when the author selects "Edit".
STUDIO_VIEW = 'studio_view'
# Views that present a "preview" view of an xblock (as opposed to an editing view).
PREVIEW_VIEWS = [STUDENT_VIEW, AUTHOR_VIEW]
class OpaqueKeyReader(IdReader):
"""
IdReader for :class:`DefinitionKey` and :class:`UsageKey`s.
"""
def get_definition_id(self, usage_id):
"""Retrieve the definition that a usage is derived from.
Args:
usage_id: The id of the usage to query
Returns:
The `definition_id` the usage is derived from
"""
raise NotImplementedError("Specific Modulestores must implement get_definition_id")
def get_block_type(self, def_id):
"""Retrieve the block_type of a particular definition
Args:
def_id: The id of the definition to query
Returns:
The `block_type` of the definition
"""
return def_id.block_type
def get_usage_id_from_aside(self, aside_id):
"""
Retrieve the XBlock `usage_id` associated with this aside usage id.
Args:
aside_id: The usage id of the XBlockAside.
Returns:
The `usage_id` of the usage the aside is commenting on.
"""
return aside_id.usage_key
def get_definition_id_from_aside(self, aside_id):
"""
Retrieve the XBlock `definition_id` associated with this aside definition id.
Args:
aside_id: The usage id of the XBlockAside.
Returns:
The `definition_id` of the usage the aside is commenting on.
"""
return aside_id.definition_key
def get_aside_type_from_usage(self, aside_id):
"""
Retrieve the XBlockAside `aside_type` associated with this aside
usage id.
Args:
aside_id: The usage id of the XBlockAside.
Returns:
The `aside_type` of the aside.
"""
return aside_id.aside_type
def get_aside_type_from_definition(self, aside_id):
"""
Retrieve the XBlockAside `aside_type` associated with this aside
definition id.
Args:
aside_id: The definition id of the XBlockAside.
Returns:
The `aside_type` of the aside.
"""
return aside_id.aside_type
class AsideKeyGenerator(IdGenerator):
"""
An :class:`.IdGenerator` that only provides facilities for constructing new XBlockAsides.
"""
def create_aside(self, definition_id, usage_id, aside_type):
"""
Make a new aside definition and usage ids, indicating an :class:`.XBlockAside` of type `aside_type`
commenting on an :class:`.XBlock` usage `usage_id`
Returns:
(aside_definition_id, aside_usage_id)
"""
def_key = AsideDefinitionKeyV1(definition_id, aside_type)
usage_key = AsideUsageKeyV1(usage_id, aside_type)
return (def_key, usage_key)
def create_usage(self, def_id):
"""Make a usage, storing its definition id.
Returns the newly-created usage id.
"""
raise NotImplementedError("Specific Modulestores must provide implementations of create_usage")
def create_definition(self, block_type, slug=None):
"""Make a definition, storing its block type.
If `slug` is provided, it is a suggestion that the definition id
incorporate the slug somehow.
Returns the newly-created definition id.
"""
raise NotImplementedError("Specific Modulestores must provide implementations of create_definition")
def dummy_track(_event_type, _event):
pass
class HTMLSnippet(object):
"""
A base class defining an interface for an object that is able to present an
html snippet, along with associated javascript and css
"""
js = {}
js_module_name = None
css = {}
@classmethod
def get_javascript(cls):
"""
Return a dictionary containing some of the following keys:
coffee: A list of coffeescript fragments that should be compiled and
placed on the page
js: A list of javascript fragments that should be included on the
page
All of these will be loaded onto the page in the CMS
"""
# cdodge: We've moved the xmodule.coffee script from an outside directory into the xmodule area of common
# this means we need to make sure that all xmodules include this dependency which had been previously implicitly
# fulfilled in a different area of code
coffee = cls.js.setdefault('coffee', [])
js = cls.js.setdefault('js', [])
# Added xmodule.js separately to enforce 000 prefix for this only.
cls.js.setdefault('xmodule_js', resource_string(__name__, 'js/src/xmodule.js'))
return cls.js
@classmethod
def get_css(cls):
"""
Return a dictionary containing some of the following keys:
css: A list of css fragments that should be applied to the html
contents of the snippet
sass: A list of sass fragments that should be applied to the html
contents of the snippet
scss: A list of scss fragments that should be applied to the html
contents of the snippet
"""
return cls.css
def get_html(self):
"""
Return the html used to display this snippet
"""
raise NotImplementedError(
"get_html() must be provided by specific modules - not present in {0}"
.format(self.__class__))
def shim_xmodule_js(block, fragment):
"""
Set up the XBlock -> XModule shim on the supplied :class:`xblock.fragment.Fragment`
"""
if not fragment.js_init_fn:
fragment.initialize_js('XBlockToXModuleShim')
fragment.json_init_args = {'xmodule-type': block.js_module_name}
class XModuleFields(object):
"""
Common fields for XModules.
"""
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default=None
)
class XModuleMixin(XModuleFields, XBlock):
"""
Fields and methods used by XModules internally.
Adding this Mixin to an :class:`XBlock` allows it to cooperate with old-style :class:`XModules`
"""
# Attributes for inspection of the descriptor
# This indicates whether the xmodule is a problem-type.
# It should respond to max_score() and grade(). It can be graded or ungraded
# (like a practice problem).
has_score = False
# Whether this module can be displayed in read-only mode. It is safe to set this to True if
# all user state is handled through the FieldData API.
show_in_read_only_mode = False
# Class level variable
# True if this descriptor always requires recalculation of grades, for
# example if the score can change via an extrnal service, not just when the
# student interacts with the module on the page. A specific example is
# FoldIt, which posts grade-changing updates through a separate API.
always_recalculate_grades = False
# The default implementation of get_icon_class returns the icon_class
# attribute of the class
#
# This attribute can be overridden by subclasses, and
# the function can also be overridden if the icon class depends on the data
# in the module
icon_class = 'other'
def __init__(self, *args, **kwargs):
self.xmodule_runtime = None
super(XModuleMixin, self).__init__(*args, **kwargs)
@property
def runtime(self):
return CombinedSystem(self.xmodule_runtime, self._runtime)
@runtime.setter
def runtime(self, value):
self._runtime = value
@property
def system(self):
"""
Return the XBlock runtime (backwards compatibility alias provided for XModules).
"""
return self.runtime
@property
def course_id(self):
return self.location.course_key
@property
def category(self):
return self.scope_ids.block_type
@property
def location(self):
return self.scope_ids.usage_id
@location.setter
def location(self, value):
assert isinstance(value, UsageKey)
self.scope_ids = self.scope_ids._replace(
def_id=value,
usage_id=value,
)
@property
def url_name(self):
return course_metadata_utils.url_name_for_course_location(self.location)
@property
def display_name_with_default(self):
"""
Return a display name for the module: use display_name if defined in
metadata, otherwise convert the url name.
"""
return course_metadata_utils.display_name_with_default(self)
@property
def xblock_kvs(self):
"""
Retrieves the internal KeyValueStore for this XModule.
Should only be used by the persistence layer. Use with caution.
"""
# if caller wants kvs, caller's assuming it's up to date; so, decache it
self.save()
return self._field_data._kvs # pylint: disable=protected-access
@lazy
def _unwrapped_field_data(self):
"""
This property hold the value _field_data here before we wrap it in
the LmsFieldData or OverrideFieldData classes.
"""
return self._field_data
def get_explicitly_set_fields_by_scope(self, scope=Scope.content):
"""
Get a dictionary of the fields for the given scope which are set explicitly on this xblock. (Including
any set to None.)
"""
result = {}
for field in self.fields.values():
if field.scope == scope and field.is_set_on(self):
result[field.name] = field.read_json(self)
return result
def has_children_at_depth(self, depth):
r"""
Returns true if self has children at the given depth. depth==0 returns
false if self is a leaf, true otherwise.
SELF
|
[child at depth 0]
/ \
[depth 1] [depth 1]
/ \
[depth 2] [depth 2]
So the example above would return True for `has_children_at_depth(2)`, and False
for depth > 2
"""
if depth < 0:
raise ValueError("negative depth argument is invalid")
elif depth == 0:
return bool(self.get_children())
else:
return any(child.has_children_at_depth(depth - 1) for child in self.get_children())
def get_content_titles(self):
r"""
Returns list of content titles for all of self's children.
SEQUENCE
|
VERTICAL
/ \
SPLIT_TEST DISCUSSION
/ \
VIDEO A VIDEO B
Essentially, this function returns a list of display_names (e.g. content titles)
for all of the leaf nodes. In the diagram above, calling get_content_titles on
SEQUENCE would return the display_names of `VIDEO A`, `VIDEO B`, and `DISCUSSION`.
This is most obviously useful for sequence_modules, which need this list to display
tooltips to users, though in theory this should work for any tree that needs
the display_names of all its leaf nodes.
"""
if self.has_children:
return sum((child.get_content_titles() for child in self.get_children()), [])
else:
return [self.display_name_with_default]
def get_children(self, usage_id_filter=None, usage_key_filter=None): # pylint: disable=arguments-differ
"""Returns a list of XBlock instances for the children of
this module"""
# Be backwards compatible with callers using usage_key_filter
if usage_id_filter is None and usage_key_filter is not None:
usage_id_filter = usage_key_filter
return [
child
for child
in super(XModuleMixin, self).get_children(usage_id_filter)
if child is not None
]
def get_child(self, usage_id):
"""
Return the child XBlock identified by ``usage_id``, or ``None`` if there
is an error while retrieving the block.
"""
try:
child = super(XModuleMixin, self).get_child(usage_id)
except ItemNotFoundError:
log.warning(u'Unable to load item %s, skipping', usage_id)
dog_stats_api.increment(
"xmodule.item_not_found_error",
tags=[
u"course_id:{}".format(usage_id.course_key),
u"block_type:{}".format(usage_id.block_type),
u"parent_block_type:{}".format(self.location.block_type),
]
)
return None
if child is None:
return None
child.runtime.export_fs = self.runtime.export_fs
return child
def get_required_module_descriptors(self):
"""Returns a list of XModuleDescriptor instances upon which this module depends, but are
not children of this module"""
return []
def get_display_items(self):
"""
Returns a list of descendent module instances that will display
immediately inside this module.
"""
items = []
for child in self.get_children():
items.extend(child.displayable_items())
return items
def displayable_items(self):
"""
Returns list of displayable modules contained by this module. If this
module is visible, should return [self].
"""
return [self]
def get_child_by(self, selector):
"""
Return a child XBlock that matches the specified selector
"""
for child in self.get_children():
if selector(child):
return child
return None
def get_icon_class(self):
"""
Return a css class identifying this module in the context of an icon
"""
return self.icon_class
def has_dynamic_children(self):
"""
Returns True if this descriptor has dynamic children for a given
student when the module is created.
Returns False if the children of this descriptor are the same
children that the module will return for any student.
"""
return False
# Functions used in the LMS
def get_score(self):
"""
Score the student received on the problem, or None if there is no
score.
Returns:
dictionary
{'score': integer, from 0 to get_max_score(),
'total': get_max_score()}
NOTE (vshnayder): not sure if this was the intended return value, but
that's what it's doing now. I suspect that we really want it to just
return a number. Would need to change (at least) capa to match if we did that.
"""
return None
def max_score(self):
""" Maximum score. Two notes:
* This is generic; in abstract, a problem could be 3/5 points on one
randomization, and 5/7 on another
* In practice, this is a Very Bad Idea, and (a) will break some code
in place (although that code should get fixed), and (b) break some
analytics we plan to put in place.
"""
return None
def get_progress(self):
""" Return a progress.Progress object that represents how far the
student has gone in this module. Must be implemented to get correct
progress tracking behavior in nesting modules like sequence and
vertical.
If this module has no notion of progress, return None.
"""
return None
def bind_for_student(self, xmodule_runtime, user_id, wrappers=None):
"""
Set up this XBlock to act as an XModule instead of an XModuleDescriptor.
Arguments:
xmodule_runtime (:class:`ModuleSystem'): the runtime to use when accessing student facing methods
user_id: The user_id to set in scope_ids
wrappers: These are a list functions that put a wrapper, such as
LmsFieldData or OverrideFieldData, around the field_data.
Note that the functions will be applied in the order in
which they're listed. So [f1, f2] -> f2(f1(field_data))
"""
# pylint: disable=attribute-defined-outside-init
# Skip rebinding if we're already bound a user, and it's this user.
if self.scope_ids.user_id is not None and user_id == self.scope_ids.user_id:
if getattr(xmodule_runtime, 'position', None):
self.position = xmodule_runtime.position # update the position of the tab
return
# If we are switching users mid-request, save the data from the old user.
self.save()
# Update scope_ids to point to the new user.
self.scope_ids = self.scope_ids._replace(user_id=user_id)
# Clear out any cached instantiated children.
self.clear_child_cache()
# Clear out any cached field data scoped to the old user.
for field in self.fields.values():
if field.scope in (Scope.parent, Scope.children):
continue
if field.scope.user == UserScope.ONE:
field._del_cached_value(self) # pylint: disable=protected-access
# not the most elegant way of doing this, but if we're removing
# a field from the module's field_data_cache, we should also
# remove it from its _dirty_fields
if field in self._dirty_fields:
del self._dirty_fields[field]
# Set the new xmodule_runtime and field_data (which are user-specific)
self.xmodule_runtime = xmodule_runtime
if wrappers is None:
wrappers = []
wrapped_field_data = self._unwrapped_field_data
for wrapper in wrappers:
wrapped_field_data = wrapper(wrapped_field_data)
self._field_data = wrapped_field_data
@property
def non_editable_metadata_fields(self):
"""
Return the list of fields that should not be editable in Studio.
When overriding, be sure to append to the superclasses' list.
"""
# We are not allowing editing of xblock tag and name fields at this time (for any component).
return [XBlock.tags, XBlock.name]
@property
def editable_metadata_fields(self):
"""
Returns the metadata fields to be edited in Studio. These are fields with scope `Scope.settings`.
Can be limited by extending `non_editable_metadata_fields`.
"""
metadata_fields = {}
# Only use the fields from this class, not mixins
fields = getattr(self, 'unmixed_class', self.__class__).fields
for field in fields.values():
if field in self.non_editable_metadata_fields:
continue
if field.scope not in (Scope.settings, Scope.content):
continue
metadata_fields[field.name] = self._create_metadata_editor_info(field)
return metadata_fields
def _create_metadata_editor_info(self, field):
"""
Creates the information needed by the metadata editor for a specific field.
"""
def jsonify_value(field, json_choice):
"""
Convert field value to JSON, if needed.
"""
if isinstance(json_choice, dict):
new_json_choice = dict(json_choice) # make a copy so below doesn't change the original
if 'display_name' in json_choice:
new_json_choice['display_name'] = get_text(json_choice['display_name'])
if 'value' in json_choice:
new_json_choice['value'] = field.to_json(json_choice['value'])
else:
new_json_choice = field.to_json(json_choice)
return new_json_choice
def get_text(value):
"""Localize a text value that might be None."""
if value is None:
return None
else:
return self.runtime.service(self, "i18n").ugettext(value)
# gets the 'default_value' and 'explicitly_set' attrs
metadata_field_editor_info = self.runtime.get_field_provenance(self, field)
metadata_field_editor_info['field_name'] = field.name
metadata_field_editor_info['display_name'] = get_text(field.display_name)
metadata_field_editor_info['help'] = get_text(field.help)
metadata_field_editor_info['value'] = field.read_json(self)
# We support the following editors:
# 1. A select editor for fields with a list of possible values (includes Booleans).
# 2. Number editors for integers and floats.
# 3. A generic string editor for anything else (editing JSON representation of the value).
editor_type = "Generic"
values = field.values
if "values_provider" in field.runtime_options:
values = field.runtime_options['values_provider'](self)
if isinstance(values, (tuple, list)) and len(values) > 0:
editor_type = "Select"
values = [jsonify_value(field, json_choice) for json_choice in values]
elif isinstance(field, Integer):
editor_type = "Integer"
elif isinstance(field, Float):
editor_type = "Float"
elif isinstance(field, List):
editor_type = "List"
elif isinstance(field, Dict):
editor_type = "Dict"
elif isinstance(field, RelativeTime):
editor_type = "RelativeTime"
elif isinstance(field, String) and field.name == "license":
editor_type = "License"
metadata_field_editor_info['type'] = editor_type
metadata_field_editor_info['options'] = [] if values is None else values
return metadata_field_editor_info
class ProxyAttribute(object):
"""
A (python) descriptor that proxies attribute access.
For example:
class Foo(object):
def __init__(self, value):
self.foo_attr = value
class Bar(object):
foo = Foo('x')
foo_attr = ProxyAttribute('foo', 'foo_attr')
bar = Bar()
assert bar.foo_attr == 'x'
bar.foo_attr = 'y'
assert bar.foo.foo_attr == 'y'
del bar.foo_attr
assert not hasattr(bar.foo, 'foo_attr')
"""
def __init__(self, source, name):
"""
:param source: The name of the attribute to proxy to
:param name: The name of the attribute to proxy
"""
self._source = source
self._name = name
def __get__(self, instance, owner):
if instance is None:
return self
return getattr(getattr(instance, self._source), self._name)
def __set__(self, instance, value):
setattr(getattr(instance, self._source), self._name, value)
def __delete__(self, instance):
delattr(getattr(instance, self._source), self._name)
module_attr = partial(ProxyAttribute, '_xmodule') # pylint: disable=invalid-name
descriptor_attr = partial(ProxyAttribute, 'descriptor') # pylint: disable=invalid-name
module_runtime_attr = partial(ProxyAttribute, 'xmodule_runtime') # pylint: disable=invalid-name
@XBlock.needs("i18n")
class XModule(HTMLSnippet, XModuleMixin):
""" Implements a generic learning module.
Subclasses must at a minimum provide a definition for get_html in order
to be displayed to users.
See the HTML module for a simple example.
"""
entry_point = "xmodule.v1"
has_score = descriptor_attr('has_score')
show_in_read_only_mode = descriptor_attr('show_in_read_only_mode')
_field_data_cache = descriptor_attr('_field_data_cache')
_field_data = descriptor_attr('_field_data')
_dirty_fields = descriptor_attr('_dirty_fields')
def __init__(self, descriptor, *args, **kwargs):
"""
Construct a new xmodule
runtime: An XBlock runtime allowing access to external resources
descriptor: the XModuleDescriptor that this module is an instance of.
field_data: A dictionary-like object that maps field names to values
for those fields.
"""
# Set the descriptor first so that we can proxy to it
self.descriptor = descriptor
self._runtime = None
super(XModule, self).__init__(*args, **kwargs)
self.runtime.xmodule_instance = self
@property
def runtime(self):
return CombinedSystem(self._runtime, self.descriptor._runtime) # pylint: disable=protected-access
@runtime.setter
def runtime(self, value): # pylint: disable=arguments-differ
self._runtime = value
def __unicode__(self):
return u'<x_module(id={0})>'.format(self.id)
def handle_ajax(self, _dispatch, _data):
""" dispatch is last part of the URL.
data is a dictionary-like object with the content of the request"""
return u""
@XBlock.handler
def xmodule_handler(self, request, suffix=None):
"""
XBlock handler that wraps `handle_ajax`
"""
class FileObjForWebobFiles(object):
"""
Turn Webob cgi.FieldStorage uploaded files into pure file objects.
Webob represents uploaded files as cgi.FieldStorage objects, which
have a .file attribute. We wrap the FieldStorage object, delegating
attribute access to the .file attribute. But the files have no
name, so we carry the FieldStorage .filename attribute as the .name.
"""
def __init__(self, webob_file):
self.file = webob_file.file
self.name = webob_file.filename
def __getattr__(self, name):
return getattr(self.file, name)
# WebOb requests have multiple entries for uploaded files. handle_ajax
# expects a single entry as a list.
request_post = MultiDict(request.POST)
for key in set(request.POST.iterkeys()):
if hasattr(request.POST[key], "file"):
request_post[key] = map(FileObjForWebobFiles, request.POST.getall(key))
response_data = self.handle_ajax(suffix, request_post)
return Response(response_data, content_type='application/json')
def get_child(self, usage_id):
if usage_id in self._child_cache:
return self._child_cache[usage_id]
# Take advantage of the children cache that the descriptor might have
child_descriptor = self.descriptor.get_child(usage_id)
child_block = None
if child_descriptor is not None:
child_block = self.system.get_module(child_descriptor)
self._child_cache[usage_id] = child_block
return child_block
def get_child_descriptors(self):
"""
Returns the descriptors of the child modules
Overriding this changes the behavior of get_children and
anything that uses get_children, such as get_display_items.
This method will not instantiate the modules of the children
unless absolutely necessary, so it is cheaper to call than get_children
These children will be the same children returned by the
descriptor unless descriptor.has_dynamic_children() is true.
"""
return self.descriptor.get_children()
def displayable_items(self):
"""
Returns list of displayable modules contained by this module. If this
module is visible, should return [self].
"""
return [self.descriptor]
# ~~~~~~~~~~~~~~~ XBlock API Wrappers ~~~~~~~~~~~~~~~~
def student_view(self, context):
"""
Return a fragment with the html from this XModule
Doesn't yet add any of the javascript to the fragment, nor the css.
Also doesn't expect any javascript binding, yet.
Makes no use of the context parameter
"""
return Fragment(self.get_html())
def policy_key(location):
"""
Get the key for a location in a policy file. (Since the policy file is
specific to a course, it doesn't need the full location url).
"""
return u'{cat}/{name}'.format(cat=location.category, name=location.name)
Template = namedtuple("Template", "metadata data children")
class ResourceTemplates(object):
"""
Gets the templates associated w/ a containing cls. The cls must have a 'template_dir_name' attribute.
It finds the templates as directly in this directory under 'templates'.
"""
template_packages = [__name__]
@classmethod
def templates(cls):
"""
Returns a list of dictionary field: value objects that describe possible templates that can be used
to seed a module of this type.
Expects a class attribute template_dir_name that defines the directory
inside the 'templates' resource directory to pull templates from
"""
templates = []
dirname = cls.get_template_dir()
if dirname is not None:
for pkg in cls.template_packages:
if not resource_isdir(pkg, dirname):
continue
for template_file in resource_listdir(pkg, dirname):
if not template_file.endswith('.yaml'):
log.warning("Skipping unknown template file %s", template_file)
continue
template_content = resource_string(pkg, os.path.join(dirname, template_file))
template = yaml.safe_load(template_content)
template['template_id'] = template_file
templates.append(template)
return templates
@classmethod
def get_template_dir(cls):
if getattr(cls, 'template_dir_name', None):
dirname = os.path.join('templates', cls.template_dir_name)
if not resource_isdir(__name__, dirname):
log.warning(u"No resource directory {dir} found when loading {cls_name} templates".format(
dir=dirname,
cls_name=cls.__name__,
))
return None
else:
return dirname
else:
return None
@classmethod
def get_template(cls, template_id):
"""
Get a single template by the given id (which is the file name identifying it w/in the class's
template_dir_name)
"""
dirname = cls.get_template_dir()
if dirname is not None:
path = os.path.join(dirname, template_id)
for pkg in cls.template_packages:
if resource_exists(pkg, path):
template_content = resource_string(pkg, path)
template = yaml.safe_load(template_content)
template['template_id'] = template_id
return template
@XBlock.needs("i18n")
class XModuleDescriptor(HTMLSnippet, ResourceTemplates, XModuleMixin):
"""
An XModuleDescriptor is a specification for an element of a course. This
could be a problem, an organizational element (a group of content), or a
segment of video, for example.
XModuleDescriptors are independent and agnostic to the current student state
on a problem. They handle the editing interface used by instructors to
create a problem, and can generate XModules (which do know about student
state).
"""
entry_point = "xmodule.v1"
module_class = XModule
# VS[compat]. Backwards compatibility code that can go away after
# importing 2012 courses.
# A set of metadata key conversions that we want to make
metadata_translations = {
'slug': 'url_name',
'name': 'display_name',
}
# ============================= STRUCTURAL MANIPULATION ===================
def __init__(self, *args, **kwargs):
"""
Construct a new XModuleDescriptor. The only required arguments are the
system, used for interaction with external resources, and the
definition, which specifies all the data needed to edit and display the
problem (but none of the associated metadata that handles recordkeeping
around the problem).
This allows for maximal flexibility to add to the interface while
preserving backwards compatibility.
runtime: A DescriptorSystem for interacting with external resources
field_data: A dictionary-like object that maps field names to values
for those fields.
XModuleDescriptor.__init__ takes the same arguments as xblock.core:XBlock.__init__
"""
super(XModuleDescriptor, self).__init__(*args, **kwargs)
# update_version is the version which last updated this xblock v prev being the penultimate updater
# leaving off original_version since it complicates creation w/o any obv value yet and is computable
# by following previous until None
# definition_locator is only used by mongostores which separate definitions from blocks
self.previous_version = self.update_version = self.definition_locator = None
self.xmodule_runtime = None
@classmethod
def _translate(cls, key):
'VS[compat]'
if key in cls.metadata_translations:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmodule_descriptor_translate"]
)
return cls.metadata_translations.get(key, key)
# ================================= XML PARSING ============================
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Interpret the parsed XML in `node`, creating an XModuleDescriptor.
"""
# It'd be great to not reserialize and deserialize the xml
xml = etree.tostring(node)
block = cls.from_xml(xml, runtime, id_generator)
return block
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses.
Args:
xml_data (str): A string of xml that will be translated into data and children
for this module
system (:class:`.XMLParsingSystem):
id_generator (:class:`xblock.runtime.IdGenerator`): Used to generate the
usage_ids and definition_ids when loading this xml
"""
raise NotImplementedError('Modules must implement from_xml to be parsable from xml')
def add_xml_to_node(self, node):
"""
Export this :class:`XModuleDescriptor` as XML, by setting attributes on the provided
`node`.
"""
xml_string = self.export_to_xml(self.runtime.export_fs)
exported_node = etree.fromstring(xml_string)
node.tag = exported_node.tag
node.text = exported_node.text
node.tail = exported_node.tail
for key, value in exported_node.items():
node.set(key, value)
node.extend(list(exported_node))
def export_to_xml(self, resource_fs):
"""
Returns an xml string representing this module, and all modules
underneath it. May also write required resources out to resource_fs.
Assumes that modules have single parentage (that no module appears twice
in the same course), and that it is thus safe to nest modules as xml
children as appropriate.
The returned XML should be able to be parsed back into an identical
XModuleDescriptor using the from_xml method with the same system, org,
and course
"""
raise NotImplementedError('Modules must implement export_to_xml to enable xml export')
def editor_saved(self, user, old_metadata, old_content):
"""
This method is called when "Save" is pressed on the Studio editor.
Note that after this method is called, the modulestore update_item method will
be called on this xmodule. Therefore, any modifications to the xmodule that are
performed in editor_saved will automatically be persisted (calling update_item
from implementors of this method is not necessary).
Args:
user: the user who requested the save (as obtained from the request)
old_metadata (dict): the values of the fields with Scope.settings before the save was performed
old_content (dict): the values of the fields with Scope.content before the save was performed.
This will include 'data'.
"""
pass
# =============================== BUILTIN METHODS ==========================
def __eq__(self, other):
return (hasattr(other, 'scope_ids') and
self.scope_ids == other.scope_ids and
self.fields.keys() == other.fields.keys() and
all(getattr(self, field.name) == getattr(other, field.name)
for field in self.fields.values()))
def __repr__(self):
return (
"{0.__class__.__name__}("
"{0.runtime!r}, "
"{0._field_data!r}, "
"{0.scope_ids!r}"
")".format(self)
)
# ~~~~~~~~~~~~~~~ XModule Indirection ~~~~~~~~~~~~~~~~
@property
def _xmodule(self):
"""
Returns the XModule corresponding to this descriptor. Expects that the system
already supports all of the attributes needed by xmodules
"""
if self.xmodule_runtime is None:
raise UndefinedContext()
assert self.xmodule_runtime.error_descriptor_class is not None
if self.xmodule_runtime.xmodule_instance is None:
try:
self.xmodule_runtime.construct_xblock_from_class(
self.module_class,
descriptor=self,
scope_ids=self.scope_ids,
field_data=self._field_data,
for_parent=self.get_parent() if self.has_cached_parent else None
)
self.xmodule_runtime.xmodule_instance.save()
except Exception: # pylint: disable=broad-except
# xmodule_instance is set by the XModule.__init__. If we had an error after that,
# we need to clean it out so that we can set up the ErrorModule instead
self.xmodule_runtime.xmodule_instance = None
if isinstance(self, self.xmodule_runtime.error_descriptor_class):
log.exception('Error creating an ErrorModule from an ErrorDescriptor')
raise
log.exception('Error creating xmodule')
descriptor = self.xmodule_runtime.error_descriptor_class.from_descriptor(
self,
error_msg=exc_info_to_str(sys.exc_info())
)
descriptor.xmodule_runtime = self.xmodule_runtime
self.xmodule_runtime.xmodule_instance = descriptor._xmodule # pylint: disable=protected-access
return self.xmodule_runtime.xmodule_instance
course_id = module_attr('course_id')
displayable_items = module_attr('displayable_items')
get_display_items = module_attr('get_display_items')
get_icon_class = module_attr('get_icon_class')
get_progress = module_attr('get_progress')
get_score = module_attr('get_score')
handle_ajax = module_attr('handle_ajax')
max_score = module_attr('max_score')
student_view = module_attr(STUDENT_VIEW)
get_child_descriptors = module_attr('get_child_descriptors')
xmodule_handler = module_attr('xmodule_handler')
# ~~~~~~~~~~~~~~~ XBlock API Wrappers ~~~~~~~~~~~~~~~~
def studio_view(self, _context):
"""
Return a fragment with the html from this XModuleDescriptor's editing view
Doesn't yet add any of the javascript to the fragment, nor the css.
Also doesn't expect any javascript binding, yet.
Makes no use of the context parameter
"""
return Fragment(self.get_html())
class ConfigurableFragmentWrapper(object):
"""
Runtime mixin that allows for composition of many `wrap_xblock` wrappers
"""
def __init__(self, wrappers=None, **kwargs):
"""
:param wrappers: A list of wrappers, where each wrapper is:
def wrapper(block, view, frag, context):
...
return wrapped_frag
"""
super(ConfigurableFragmentWrapper, self).__init__(**kwargs)
if wrappers is not None:
self.wrappers = wrappers
else:
self.wrappers = []
def wrap_xblock(self, block, view, frag, context):
"""
See :func:`Runtime.wrap_child`
"""
for wrapper in self.wrappers:
frag = wrapper(block, view, frag, context)
return frag
# This function exists to give applications (LMS/CMS) a place to monkey-patch until
# we can refactor modulestore to split out the FieldData half of its interface from
# the Runtime part of its interface. This function mostly matches the
# Runtime.handler_url interface.
#
# The monkey-patching happens in (lms|cms)/startup.py
def descriptor_global_handler_url(block, handler_name, suffix='', query='', thirdparty=False): # pylint: disable=unused-argument
"""
See :meth:`xblock.runtime.Runtime.handler_url`.
"""
raise NotImplementedError("Applications must monkey-patch this function before using handler_url for studio_view")
# This function exists to give applications (LMS/CMS) a place to monkey-patch until
# we can refactor modulestore to split out the FieldData half of its interface from
# the Runtime part of its interface. This function matches the Runtime.local_resource_url interface
#
# The monkey-patching happens in (lms|cms)/startup.py
def descriptor_global_local_resource_url(block, uri): # pylint: disable=invalid-name, unused-argument
"""
See :meth:`xblock.runtime.Runtime.local_resource_url`.
"""
raise NotImplementedError("Applications must monkey-patch this function before using local_resource_url for studio_view")
class MetricsMixin(object):
"""
Mixin for adding metric logging for render and handle methods in the DescriptorSystem and ModuleSystem.
"""
def render(self, block, view_name, context=None):
start_time = time.time()
try:
status = "success"
return super(MetricsMixin, self).render(block, view_name, context=context)
except:
status = "failure"
raise
finally:
end_time = time.time()
duration = end_time - start_time
course_id = getattr(self, 'course_id', '')
tags = [
u'view_name:{}'.format(view_name),
u'action:render',
u'action_status:{}'.format(status),
u'course_id:{}'.format(course_id),
u'block_type:{}'.format(block.scope_ids.block_type),
u'block_family:{}'.format(block.entry_point),
]
dog_stats_api.increment(XMODULE_METRIC_NAME, tags=tags, sample_rate=XMODULE_METRIC_SAMPLE_RATE)
dog_stats_api.histogram(
XMODULE_DURATION_METRIC_NAME,
duration,
tags=tags,
sample_rate=XMODULE_METRIC_SAMPLE_RATE,
)
log.debug(
"%.3fs - render %s.%s (%s)",
duration,
block.__class__.__name__,
view_name,
getattr(block, 'location', ''),
)
def handle(self, block, handler_name, request, suffix=''):
start_time = time.time()
try:
status = "success"
return super(MetricsMixin, self).handle(block, handler_name, request, suffix=suffix)
except:
status = "failure"
raise
finally:
end_time = time.time()
duration = end_time - start_time
course_id = getattr(self, 'course_id', '')
tags = [
u'handler_name:{}'.format(handler_name),
u'action:handle',
u'action_status:{}'.format(status),
u'course_id:{}'.format(course_id),
u'block_type:{}'.format(block.scope_ids.block_type),
u'block_family:{}'.format(block.entry_point),
]
dog_stats_api.increment(XMODULE_METRIC_NAME, tags=tags, sample_rate=XMODULE_METRIC_SAMPLE_RATE)
dog_stats_api.histogram(
XMODULE_DURATION_METRIC_NAME,
duration,
tags=tags,
sample_rate=XMODULE_METRIC_SAMPLE_RATE
)
log.debug(
"%.3fs - handle %s.%s (%s)",
duration,
block.__class__.__name__,
handler_name,
getattr(block, 'location', ''),
)
class DescriptorSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
"""
Base class for :class:`Runtime`s to be used with :class:`XModuleDescriptor`s
"""
# pylint: disable=bad-continuation
def __init__(
self, load_item, resources_fs, error_tracker, get_policy=None, disabled_xblock_types=(), **kwargs
):
"""
load_item: Takes a Location and returns an XModuleDescriptor
resources_fs: A Filesystem object that contains all of the
resources needed for the course
error_tracker: A hook for tracking errors in loading the descriptor.
Used for example to get a list of all non-fatal problems on course
load, and display them to the user.
A function of (error_msg). errortracker.py provides a
handy make_error_tracker() function.
Patterns for using the error handler:
try:
x = access_some_resource()
check_some_format(x)
except SomeProblem as err:
msg = 'Grommet {0} is broken: {1}'.format(x, str(err))
log.warning(msg) # don't rely on tracker to log
# NOTE: we generally don't want content errors logged as errors
self.system.error_tracker(msg)
# work around
return 'Oops, couldn't load grommet'
OR, if not in an exception context:
if not check_something(thingy):
msg = "thingy {0} is broken".format(thingy)
log.critical(msg)
self.system.error_tracker(msg)
NOTE: To avoid duplication, do not call the tracker on errors
that you're about to re-raise---let the caller track them.
get_policy: a function that takes a usage id and returns a dict of
policy to apply.
local_resource_url: an implementation of :meth:`xblock.runtime.Runtime.local_resource_url`
"""
kwargs.setdefault('id_reader', OpaqueKeyReader())
kwargs.setdefault('id_generator', AsideKeyGenerator())
super(DescriptorSystem, self).__init__(**kwargs)
# This is used by XModules to write out separate files during xml export
self.export_fs = None
self.load_item = load_item
self.resources_fs = resources_fs
self.error_tracker = error_tracker
if get_policy:
self.get_policy = get_policy
else:
self.get_policy = lambda u: {}
self.disabled_xblock_types = disabled_xblock_types
def get_block(self, usage_id, for_parent=None):
"""See documentation for `xblock.runtime:Runtime.get_block`"""
return self.load_item(usage_id, for_parent=for_parent)
def load_block_type(self, block_type):
"""
Returns a subclass of :class:`.XBlock` that corresponds to the specified `block_type`.
"""
if block_type in self.disabled_xblock_types:
return self.default_class
return super(DescriptorSystem, self).load_block_type(block_type)
def get_field_provenance(self, xblock, field):
"""
For the given xblock, return a dict for the field's current state:
{
'default_value': what json'd value will take effect if field is unset: either the field default or
inherited value,
'explicitly_set': boolean for whether the current value is set v default/inherited,
}
:param xblock:
:param field:
"""
# pylint: disable=protected-access
# in runtime b/c runtime contains app-specific xblock behavior. Studio's the only app
# which needs this level of introspection right now. runtime also is 'allowed' to know
# about the kvs, dbmodel, etc.
result = {}
result['explicitly_set'] = xblock._field_data.has(xblock, field.name)
try:
result['default_value'] = xblock._field_data.default(xblock, field.name)
except KeyError:
result['default_value'] = field.to_json(field.default)
return result
def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False):
# Currently, Modulestore is responsible for instantiating DescriptorSystems
# This means that LMS/CMS don't have a way to define a subclass of DescriptorSystem
# that implements the correct handler url. So, for now, instead, we will reference a
# global function that the application can override.
return descriptor_global_handler_url(block, handler_name, suffix, query, thirdparty)
def local_resource_url(self, block, uri):
"""
See :meth:`xblock.runtime.Runtime:local_resource_url` for documentation.
"""
# Currently, Modulestore is responsible for instantiating DescriptorSystems
# This means that LMS/CMS don't have a way to define a subclass of DescriptorSystem
# that implements the correct local_resource_url. So, for now, instead, we will reference a
# global function that the application can override.
return descriptor_global_local_resource_url(block, uri)
def applicable_aside_types(self, block):
"""
See :meth:`xblock.runtime.Runtime:applicable_aside_types` for documentation.
"""
potential_set = set(super(DescriptorSystem, self).applicable_aside_types(block))
if getattr(block, 'xmodule_runtime', None) is not None:
application_set = set(block.xmodule_runtime.applicable_aside_types(block))
return list(potential_set.intersection(application_set))
return list(potential_set)
def resource_url(self, resource):
"""
See :meth:`xblock.runtime.Runtime:resource_url` for documentation.
"""
raise NotImplementedError("edX Platform doesn't currently implement XBlock resource urls")
def add_block_as_child_node(self, block, node):
child = etree.SubElement(node, "unknown")
child.set('url_name', block.url_name)
block.add_xml_to_node(child)
def publish(self, block, event_type, event):
# A stub publish method that doesn't emit any events from XModuleDescriptors.
pass
new_contract('DescriptorSystem', DescriptorSystem)
class XMLParsingSystem(DescriptorSystem):
def __init__(self, process_xml, **kwargs):
"""
process_xml: Takes an xml string, and returns a XModuleDescriptor
created from that xml
"""
super(XMLParsingSystem, self).__init__(**kwargs)
self.process_xml = process_xml
def _usage_id_from_node(self, node, parent_id, id_generator=None):
"""Create a new usage id from an XML dom node.
Args:
node (lxml.etree.Element): The DOM node to interpret.
parent_id: The usage ID of the parent block
id_generator (IdGenerator): The :class:`.IdGenerator` to use
for creating ids
Returns:
UsageKey: the usage key for the new xblock
"""
return self.xblock_from_node(node, parent_id, id_generator).scope_ids.usage_id
def xblock_from_node(self, node, parent_id, id_generator=None):
"""
Create an XBlock instance from XML data.
Args:
xml_data (string): A string containing valid xml.
system (XMLParsingSystem): The :class:`.XMLParsingSystem` used to connect the block
to the outside world.
id_generator (IdGenerator): An :class:`~xblock.runtime.IdGenerator` that
will be used to construct the usage_id and definition_id for the block.
Returns:
XBlock: The fully instantiated :class:`~xblock.core.XBlock`.
"""
id_generator = id_generator or self.id_generator
# leave next line commented out - useful for low-level debugging
# log.debug('[_usage_id_from_node] tag=%s, class=%s' % (node.tag, xblock_class))
block_type = node.tag
# remove xblock-family from elements
node.attrib.pop('xblock-family', None)
url_name = node.get('url_name') # difference from XBlock.runtime
def_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(def_id)
keys = ScopeIds(None, block_type, def_id, usage_id)
block_class = self.mixologist.mix(self.load_block_type(block_type))
self.parse_asides(node, def_id, usage_id, id_generator)
block = block_class.parse_xml(node, self, keys, id_generator)
self._convert_reference_fields_to_keys(block) # difference from XBlock.runtime
block.parent = parent_id
block.save()
return block
def parse_asides(self, node, def_id, usage_id, id_generator):
"""pull the asides out of the xml payload and instantiate them"""
aside_children = []
for child in node.iterchildren():
# get xblock-family from node
xblock_family = child.attrib.pop('xblock-family', None)
if xblock_family:
xblock_family = self._family_id_to_superclass(xblock_family)
if issubclass(xblock_family, XBlockAside):
aside_children.append(child)
# now process them & remove them from the xml payload
for child in aside_children:
self._aside_from_xml(child, def_id, usage_id, id_generator)
node.remove(child)
def _make_usage_key(self, course_key, value):
"""
Makes value into a UsageKey inside the specified course.
If value is already a UsageKey, returns that.
"""
if isinstance(value, UsageKey):
return value
return course_key.make_usage_key_from_deprecated_string(value)
def _convert_reference_fields_to_keys(self, xblock):
"""
Find all fields of type reference and convert the payload into UsageKeys
"""
course_key = xblock.scope_ids.usage_id.course_key
for field in xblock.fields.itervalues():
if field.is_set_on(xblock):
field_value = getattr(xblock, field.name)
if field_value is None:
continue
elif isinstance(field, Reference):
setattr(xblock, field.name, self._make_usage_key(course_key, field_value))
elif isinstance(field, ReferenceList):
setattr(xblock, field.name, [self._make_usage_key(course_key, ele) for ele in field_value])
elif isinstance(field, ReferenceValueDict):
for key, subvalue in field_value.iteritems():
assert isinstance(subvalue, basestring)
field_value[key] = self._make_usage_key(course_key, subvalue)
setattr(xblock, field.name, field_value)
class ModuleSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
"""
This is an abstraction such that x_modules can function independent
of the courseware (e.g. import into other types of courseware, LMS,
or if we want to have a sandbox server for user-contributed content)
ModuleSystem objects are passed to x_modules to provide access to system
functionality.
Note that these functions can be closures over e.g. a django request
and user, or other environment-specific info.
"""
@contract(descriptor_runtime='DescriptorSystem')
def __init__(
self, static_url, track_function, get_module, render_template,
replace_urls, descriptor_runtime, user=None, filestore=None,
debug=False, hostname="", xqueue=None, publish=None, node_path="",
anonymous_student_id='', course_id=None,
cache=None, can_execute_unsafe_code=None, replace_course_urls=None,
replace_jump_to_id_urls=None, error_descriptor_class=None, get_real_user=None,
field_data=None, get_user_role=None, rebind_noauth_module_to_user=None,
user_location=None, get_python_lib_zip=None, **kwargs):
"""
Create a closure around the system environment.
static_url - the base URL to static assets
track_function - function of (event_type, event), intended for logging
or otherwise tracking the event.
TODO: Not used, and has inconsistent args in different
files. Update or remove.
get_module - function that takes a descriptor and returns a corresponding
module instance object. If the current user does not have
access to that location, returns None.
render_template - a function that takes (template_file, context), and
returns rendered html.
user - The user to base the random number generator seed off of for this
request
filestore - A filestore ojbect. Defaults to an instance of OSFS based
at settings.DATA_DIR.
xqueue - Dict containing XqueueInterface object, as well as parameters
for the specific StudentModule:
xqueue = {'interface': XQueueInterface object,
'callback_url': Callback into the LMS,
'queue_name': Target queuename in Xqueue}
replace_urls - TEMPORARY - A function like static_replace.replace_urls
that capa_module can use to fix up the static urls in
ajax results.
descriptor_runtime - A `DescriptorSystem` to use for loading xblocks by id
anonymous_student_id - Used for tracking modules with student id
course_id - the course_id containing this module
publish(event) - A function that allows XModules to publish events (such as grade changes)
cache - A cache object with two methods:
.get(key) returns an object from the cache or None.
.set(key, value, timeout_secs=None) stores a value in the cache with a timeout.
can_execute_unsafe_code - A function returning a boolean, whether or
not to allow the execution of unsafe, unsandboxed code.
get_python_lib_zip - A function returning a bytestring or None. The
bytestring is the contents of a zip file that should be importable
by other Python code running in the module.
error_descriptor_class - The class to use to render XModules with errors
get_real_user - function that takes `anonymous_student_id` and returns real user_id,
associated with `anonymous_student_id`.
get_user_role - A function that returns user role. Implementation is different
for LMS and Studio.
field_data - the `FieldData` to use for backing XBlock storage.
rebind_noauth_module_to_user - rebinds module bound to AnonymousUser to a real user...used in LTI
modules, which have an anonymous handler, to set legitimate users' data
"""
# Usage_store is unused, and field_data is often supplanted with an
# explicit field_data during construct_xblock.
kwargs.setdefault('id_reader', getattr(descriptor_runtime, 'id_reader', OpaqueKeyReader()))
kwargs.setdefault('id_generator', getattr(descriptor_runtime, 'id_generator', AsideKeyGenerator()))
super(ModuleSystem, self).__init__(field_data=field_data, **kwargs)
self.STATIC_URL = static_url
self.xqueue = xqueue
self.track_function = track_function
self.filestore = filestore
self.get_module = get_module
self.render_template = render_template
self.DEBUG = self.debug = debug
self.HOSTNAME = self.hostname = hostname
self.seed = user.id if user is not None else 0
self.replace_urls = replace_urls
self.node_path = node_path
self.anonymous_student_id = anonymous_student_id
self.course_id = course_id
self.user_is_staff = user is not None and user.is_staff
if publish:
self.publish = publish
self.cache = cache or DoNothingCache()
self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False)
self.get_python_lib_zip = get_python_lib_zip or (lambda: None)
self.replace_course_urls = replace_course_urls
self.replace_jump_to_id_urls = replace_jump_to_id_urls
self.error_descriptor_class = error_descriptor_class
self.xmodule_instance = None
self.get_real_user = get_real_user
self.user_location = user_location
self.get_user_role = get_user_role
self.descriptor_runtime = descriptor_runtime
self.rebind_noauth_module_to_user = rebind_noauth_module_to_user
if user:
self.user_id = user.id
def get(self, attr):
""" provide uniform access to attributes (like etree)."""
return self.__dict__.get(attr)
def set(self, attr, val):
"""provide uniform access to attributes (like etree)"""
self.__dict__[attr] = val
def __repr__(self):
kwargs = self.__dict__.copy()
# Remove value set transiently by XBlock
kwargs.pop('_view_name')
return "{}{}".format(self.__class__.__name__, kwargs)
@property
def ajax_url(self):
"""
The url prefix to be used by XModules to call into handle_ajax
"""
assert self.xmodule_instance is not None
return self.handler_url(self.xmodule_instance, 'xmodule_handler', '', '').rstrip('/?')
def get_block(self, block_id, for_parent=None):
return self.get_module(self.descriptor_runtime.get_block(block_id, for_parent=for_parent))
def resource_url(self, resource):
raise NotImplementedError("edX Platform doesn't currently implement XBlock resource urls")
def publish(self, block, event_type, event):
pass
class CombinedSystem(object):
"""
This class is a shim to allow both pure XBlocks and XModuleDescriptors
that have been bound as XModules to access both the attributes of ModuleSystem
and of DescriptorSystem as a single runtime.
"""
__slots__ = ('_module_system', '_descriptor_system')
# This system doesn't override a number of methods that are provided by ModuleSystem and DescriptorSystem,
# namely handler_url, local_resource_url, query, and resource_url.
#
# At runtime, the ModuleSystem and/or DescriptorSystem will define those methods
#
def __init__(self, module_system, descriptor_system):
# These attributes are set directly to __dict__ below to avoid a recursion in getattr/setattr.
self._module_system = module_system
self._descriptor_system = descriptor_system
def _get_student_block(self, block):
"""
If block is an XModuleDescriptor that has been bound to a student, return
the corresponding XModule, instead of the XModuleDescriptor.
Otherwise, return block.
"""
if isinstance(block, XModuleDescriptor) and block.xmodule_runtime:
return block._xmodule # pylint: disable=protected-access
else:
return block
def render(self, block, view_name, context=None):
"""
Render a block by invoking its view.
Finds the view named `view_name` on `block`. The default view will be
used if a specific view hasn't be registered. If there is no default
view, an exception will be raised.
The view is invoked, passing it `context`. The value returned by the
view is returned, with possible modifications by the runtime to
integrate it into a larger whole.
"""
context = context or {}
if view_name in PREVIEW_VIEWS:
block = self._get_student_block(block)
return self.__getattr__('render')(block, view_name, context)
def service(self, block, service_name):
"""Return a service, or None.
Services are objects implementing arbitrary other interfaces. They are
requested by agreed-upon names, see [XXX TODO] for a list of possible
services. The object returned depends on the service requested.
XBlocks must announce their intention to request services with the
`XBlock.needs` or `XBlock.wants` decorators. Use `needs` if you assume
that the service is available, or `wants` if your code is flexible and
can accept a None from this method.
Runtimes can override this method if they have different techniques for
finding and delivering services.
Arguments:
block (an XBlock): this block's class will be examined for service
decorators.
service_name (string): the name of the service requested.
Returns:
An object implementing the requested service, or None.
"""
service = None
if self._module_system:
service = self._module_system.service(block, service_name)
if service is None:
service = self._descriptor_system.service(block, service_name)
return service
def __getattr__(self, name):
"""
If the ModuleSystem doesn't have an attribute, try returning the same attribute from the
DescriptorSystem, instead. This allows XModuleDescriptors that are bound as XModules
to still function as XModuleDescriptors.
"""
# First we try a lookup in the module system...
try:
return getattr(self._module_system, name)
except AttributeError:
return getattr(self._descriptor_system, name)
def __setattr__(self, name, value):
"""
If the ModuleSystem is set, set the attr on it.
Always set the attr on the DescriptorSystem.
"""
if name in self.__slots__:
return super(CombinedSystem, self).__setattr__(name, value)
if self._module_system:
setattr(self._module_system, name, value)
setattr(self._descriptor_system, name, value)
def __delattr__(self, name):
"""
If the ModuleSystem is set, delete the attribute from it.
Always delete the attribute from the DescriptorSystem.
"""
if self._module_system:
delattr(self._module_system, name)
delattr(self._descriptor_system, name)
def __repr__(self):
return "CombinedSystem({!r}, {!r})".format(self._module_system, self._descriptor_system)
class DoNothingCache(object):
"""A duck-compatible object to use in ModuleSystem when there's no cache."""
def get(self, _key):
return None
def set(self, key, value, timeout=None):
pass
| agpl-3.0 |
dkulikovsky/graphite-ch-web | webapp/graphite/events/views.py | 1 | 2767 | import datetime
import time
from django.http import HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.utils.timezone import localtime, now
from graphite.util import json
from graphite.events import models
from graphite.render.attime import parseATTime
from django.core.urlresolvers import get_script_prefix
def to_timestamp(dt):
return time.mktime(dt.timetuple())
class EventEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return to_timestamp(obj)
return json.JSONEncoder.default(self, obj)
def view_events(request):
if request.method == "GET":
context = { 'events' : fetch(request),
'slash' : get_script_prefix()
}
return render_to_response("events.html", context)
else:
return post_event(request)
def detail(request, event_id):
e = get_object_or_404(models.Event, pk=event_id)
context = { 'event' : e,
'slash' : get_script_prefix()
}
return render_to_response("event.html", context)
def post_event(request):
if request.method == 'POST':
event = json.loads(request.body)
assert isinstance(event, dict)
values = {}
values["what"] = event["what"]
values["tags"] = event.get("tags", None)
values["when"] = datetime.datetime.fromtimestamp(
event.get("when", time.time()))
if "data" in event:
values["data"] = event["data"]
e = models.Event(**values)
e.save()
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
def get_data(request):
if 'jsonp' in request.REQUEST:
response = HttpResponse(
"%s(%s)" % (request.REQUEST.get('jsonp'),
json.dumps(fetch(request), cls=EventEncoder)),
content_type='text/javascript')
else:
response = HttpResponse(
json.dumps(fetch(request), cls=EventEncoder),
content_type="application/json")
return response
def fetch(request):
#XXX we need to move to USE_TZ=True to get rid of localtime() conversions
if request.GET.get("from", None) is not None:
time_from = localtime(parseATTime(request.GET["from"])).replace(tzinfo=None)
else:
time_from = datetime.datetime.fromtimestamp(0)
if request.GET.get("until", None) is not None:
time_until = localtime(parseATTime(request.GET["until"])).replace(tzinfo=None)
else:
time_until = now()
tags = request.GET.get("tags", None)
if tags is not None:
tags = request.GET.get("tags").split(" ")
return [x.as_dict() for x in
models.Event.find_events(time_from, time_until, tags=tags)]
| apache-2.0 |
nitzmahone/ansible | lib/ansible/plugins/lookup/together.py | 100 | 2152 | # (c) 2013, Bradley Young <young.bradley@gmail.com>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: together
author: Bradley Young <young.bradley@gmail.com>
version_added: '1.3'
short_description: merges lists into synchronized list
description:
- Creates a list with the iterated elements of the supplied lists
- "To clarify with an example, [ 'a', 'b' ] and [ 1, 2 ] turn into [ ('a',1), ('b', 2) ]"
- This is basically the same as the 'zip_longest' filter and Python function
- Any 'unbalanced' elements will be substituted with 'None'
options:
_terms:
description: list of lists to merge
required: True
"""
EXAMPLES = """
- name: item.0 returns from the 'a' list, item.1 returns from the '1' list
debug:
msg: "{{ item.0 }} and {{ item.1 }}"
with_together:
- ['a', 'b', 'c', 'd']
- [1, 2, 3, 4]
"""
RETURN = """
_list:
description: synchronized list
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import zip_longest
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
"""
Transpose a list of arrays:
[1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
Replace any empty spots in 2nd array with None:
[1, 2], [3] -> [1, 3], [2, None]
"""
def _lookup_variables(self, terms):
results = []
for x in terms:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
raise AnsibleError("with_together requires at least one element in each list")
return [self._flatten(x) for x in zip_longest(*my_list, fillvalue=None)]
| gpl-3.0 |
hcorg/thrift | test/py/TSimpleJSONProtocolTest.py | 43 | 3951 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from ThriftTest.ttypes import Bonk, VersioningTestV1, VersioningTestV2
from thrift.protocol import TJSONProtocol
from thrift.transport import TTransport
import json
import unittest
class SimpleJSONProtocolTest(unittest.TestCase):
protocol_factory = TJSONProtocol.TSimpleJSONProtocolFactory()
def _assertDictEqual(self, a, b, msg=None):
if hasattr(self, 'assertDictEqual'):
# assertDictEqual only in Python 2.7. Depends on your machine.
self.assertDictEqual(a, b, msg)
return
# Substitute implementation not as good as unittest library's
self.assertEquals(len(a), len(b), msg)
for k, v in a.iteritems():
self.assertTrue(k in b, msg)
self.assertEquals(b.get(k), v, msg)
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = self.protocol_factory.getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _deserialize(self, objtype, data):
prot = self.protocol_factory.getProtocol(TTransport.TMemoryBuffer(data))
ret = objtype()
ret.read(prot)
return ret
def testWriteOnly(self):
self.assertRaises(NotImplementedError,
self._deserialize, VersioningTestV1, b'{}')
def testSimpleMessage(self):
v1obj = VersioningTestV1(
begin_in_both=12345,
old_string='aaa',
end_in_both=54321)
expected = dict(begin_in_both=v1obj.begin_in_both,
old_string=v1obj.old_string,
end_in_both=v1obj.end_in_both)
actual = json.loads(self._serialize(v1obj).decode('ascii'))
self._assertDictEqual(expected, actual)
def testComplicated(self):
v2obj = VersioningTestV2(
begin_in_both=12345,
newint=1,
newbyte=2,
newshort=3,
newlong=4,
newdouble=5.0,
newstruct=Bonk(message="Hello!", type=123),
newlist=[7, 8, 9],
newset=set([42, 1, 8]),
newmap={1: 2, 2: 3},
newstring="Hola!",
end_in_both=54321)
expected = dict(begin_in_both=v2obj.begin_in_both,
newint=v2obj.newint,
newbyte=v2obj.newbyte,
newshort=v2obj.newshort,
newlong=v2obj.newlong,
newdouble=v2obj.newdouble,
newstruct=dict(message=v2obj.newstruct.message,
type=v2obj.newstruct.type),
newlist=v2obj.newlist,
newset=list(v2obj.newset),
newmap=v2obj.newmap,
newstring=v2obj.newstring,
end_in_both=v2obj.end_in_both)
# Need to load/dump because map keys get escaped.
expected = json.loads(json.dumps(expected))
actual = json.loads(self._serialize(v2obj).decode('ascii'))
self._assertDictEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
m-lab/mlab-metrics-api | metrics_computation_system/weekly_refresh.py | 2 | 1997 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Dylan Curley
"""This module contains logic for triggering weekly refreshes of metric data.
"""
import httplib
import logging
from google.appengine.ext import webapp
import server
def HANDLERS():
"""Returns a list of URL handlers for this application.
Returns:
(list) A list of (string, fn) tuples where the first element is a target
URL and the second is a function that handles requests at that URL.
"""
return [
('/cron/weekly_refresh', WeeklyRefreshHandler),
]
class WeeklyRefreshHandler(webapp.RequestHandler):
"""Handle a request to send a metrics refresh request to the receiver.
"""
def get(self):
"""Handles "get" requests to send a metrics refresh request.
"""
host = self.request.headers['Host']
logging.info('Requesting weekly refresh of all metrics.')
self._SendRequest(host, '/refresh?metric=*')
logging.info('Requesting weekly update of all locales.')
self._SendRequest(host, '/relocate')
def _SendRequest(self, host, path):
logging.debug('Sending request to: %s%s' % (host, path))
conn = httplib.HTTPConnection(host, timeout=20)
conn.request('GET', path)
res = conn.getresponse()
logging.debug('Request response: %s %s' % (res.status, res.reason))
if __name__ == '__main__':
server.start(HANDLERS())
| apache-2.0 |
xiangel/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/db/backends/postgis/creation.py | 117 | 4498 | from django.conf import settings
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.utils.functional import cached_property
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
@cached_property
def template_postgis(self):
template_postgis = getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis')
cursor = self.connection.cursor()
cursor.execute('SELECT 1 FROM pg_database WHERE datname = %s LIMIT 1;', (template_postgis,))
if cursor.fetchone():
return template_postgis
return None
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_ops = ''
elif self.connection.ops.geometry:
if f.dim > 2:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd)
else:
index_ops = ''
else:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_ops + ' );')
return output
def sql_table_creation_suffix(self):
if self.template_postgis is not None:
return ' TEMPLATE %s' % (
self.connection.ops.quote_name(self.template_postgis),)
return ''
def _create_test_db(self, verbosity, autoclobber):
test_database_name = super(PostGISCreation, self)._create_test_db(verbosity, autoclobber)
if self.template_postgis is None:
# Connect to the test database in order to create the postgis extension
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
cursor = self.connection.cursor()
cursor.execute("CREATE EXTENSION postgis")
cursor.connection.commit()
return test_database_name
| apache-2.0 |
ANR-kamoulox/Telemeta | telemeta/south_migrations/0022_auto__chg_field_mediaitem_file.py | 2 | 48378 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MediaItem.file'
db.alter_column('media_items', 'filename', self.gf('telemeta.models.core.FileField')(max_length=255, db_column='filename'))
def backwards(self, orm):
# Changing field 'MediaItem.file'
db.alter_column('media_items', 'filename', self.gf('telemeta.models.core.FileField')(max_length=100, db_column='filename'))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'telemeta.acquisitionmode': {
'Meta': {'ordering': "['value']", 'object_name': 'AcquisitionMode', 'db_table': "'acquisition_modes'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.adconversion': {
'Meta': {'ordering': "['value']", 'object_name': 'AdConversion', 'db_table': "'ad_conversions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.contextkeyword': {
'Meta': {'ordering': "['value']", 'object_name': 'ContextKeyword', 'db_table': "'context_keywords'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.criteria': {
'Meta': {'object_name': 'Criteria', 'db_table': "'search_criteria'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'value': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.ethnicgroup': {
'Meta': {'ordering': "['value']", 'object_name': 'EthnicGroup', 'db_table': "'ethnic_groups'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.ethnicgroupalias': {
'Meta': {'ordering': "['ethnic_group__value']", 'unique_together': "(('ethnic_group', 'value'),)", 'object_name': 'EthnicGroupAlias', 'db_table': "'ethnic_group_aliases'"},
'ethnic_group': ('telemeta.models.core.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['telemeta.EthnicGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.format': {
'Meta': {'object_name': 'Format', 'db_table': "'media_formats'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'format'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['telemeta.MediaItem']", 'blank': 'True', 'null': 'True'}),
'original_audio_quality': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'original_channels': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.NumberOfChannels']"}),
'original_code': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'original_comments': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'original_location': ('telemeta.models.core.ForeignKey', [], {'related_name': "'format'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['telemeta.Location']", 'blank': 'True', 'null': 'True'}),
'original_number': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'original_state': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'original_status': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'physical_format': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.PhysicalFormat']"}),
'recording_system': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'sticker_presence': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'tape_reference': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'tape_speed': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.TapeSpeed']"}),
'tape_thickness': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'tape_vendor': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.TapeVendor']"}),
'tape_wheel_diameter': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.TapeWheelDiameter']"})
},
'telemeta.genericstyle': {
'Meta': {'ordering': "['value']", 'object_name': 'GenericStyle', 'db_table': "'generic_styles'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.instrument': {
'Meta': {'object_name': 'Instrument', 'db_table': "'instruments'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.instrumentalias': {
'Meta': {'object_name': 'InstrumentAlias', 'db_table': "'instrument_aliases'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.instrumentaliasrelation': {
'Meta': {'unique_together': "(('alias', 'instrument'),)", 'object_name': 'InstrumentAliasRelation', 'db_table': "'instrument_alias_relations'"},
'alias': ('telemeta.models.core.ForeignKey', [], {'related_name': "'other_name'", 'to': "orm['telemeta.InstrumentAlias']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('telemeta.models.core.ForeignKey', [], {'related_name': "'relation'", 'to': "orm['telemeta.InstrumentAlias']"})
},
'telemeta.instrumentrelation': {
'Meta': {'unique_together': "(('instrument', 'parent_instrument'),)", 'object_name': 'InstrumentRelation', 'db_table': "'instrument_relations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('telemeta.models.core.ForeignKey', [], {'related_name': "'parent_relation'", 'to': "orm['telemeta.Instrument']"}),
'parent_instrument': ('telemeta.models.core.ForeignKey', [], {'related_name': "'child_relation'", 'to': "orm['telemeta.Instrument']"})
},
'telemeta.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language', 'db_table': "'languages'"},
'comment': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '3', 'blank': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'part1': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '1', 'blank': 'True'}),
'part2B': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '3', 'blank': 'True'}),
'part2T': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '3', 'blank': 'True'}),
'scope': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '1', 'blank': 'True'}),
'type': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '1', 'blank': 'True'})
},
'telemeta.legalright': {
'Meta': {'ordering': "['value']", 'object_name': 'LegalRight', 'db_table': "'legal_rights'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location', 'db_table': "'locations'"},
'complete_type': ('telemeta.models.core.ForeignKey', [], {'related_name': "'locations'", 'to': "orm['telemeta.LocationType']"}),
'current_location': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'past_names'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.Location']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_authoritative': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'latitude': ('telemeta.models.core.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'longitude': ('telemeta.models.core.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '150'}),
'type': ('telemeta.models.core.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'})
},
'telemeta.locationalias': {
'Meta': {'ordering': "['alias']", 'unique_together': "(('location', 'alias'),)", 'object_name': 'LocationAlias', 'db_table': "'location_aliases'"},
'alias': ('telemeta.models.core.CharField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_authoritative': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'location': ('telemeta.models.core.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['telemeta.Location']"})
},
'telemeta.locationrelation': {
'Meta': {'ordering': "['ancestor_location__name']", 'unique_together': "(('location', 'ancestor_location'),)", 'object_name': 'LocationRelation', 'db_table': "'location_relations'"},
'ancestor_location': ('telemeta.models.core.ForeignKey', [], {'related_name': "'descendant_relations'", 'to': "orm['telemeta.Location']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_authoritative': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'is_direct': ('telemeta.models.core.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'location': ('telemeta.models.core.ForeignKey', [], {'related_name': "'ancestor_relations'", 'to': "orm['telemeta.Location']"})
},
'telemeta.locationtype': {
'Meta': {'ordering': "['name']", 'object_name': 'LocationType', 'db_table': "'location_types'"},
'code': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'max_length': '150'})
},
'telemeta.mediacollection': {
'Meta': {'ordering': "['code']", 'object_name': 'MediaCollection', 'db_table': "'media_collections'"},
'a_informer_07_03': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'acquisition_mode': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.AcquisitionMode']"}),
'ad_conversion': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.AdConversion']"}),
'alt_ids': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'alt_title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'approx_duration': ('telemeta.models.core.DurationField', [], {'default': "'0'", 'blank': 'True'}),
'booklet_author': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'booklet_description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'cnrs_contributor': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'code': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'}),
'collector': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'collector_is_creator': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'comment': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'conservation_site': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'creator': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'doctype_code': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'external_references': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'items_done': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'legal_rights': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.LegalRight']"}),
'metadata_author': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.MetadataAuthor']"}),
'metadata_writer': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.MetadataWriter']"}),
'old_code': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'physical_format': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.PhysicalFormat']"}),
'physical_items_num': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'public_access': ('telemeta.models.core.CharField', [], {'default': "'metadata'", 'max_length': '16', 'blank': 'True'}),
'publisher': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.Publisher']"}),
'publisher_collection': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.PublisherCollection']"}),
'publisher_serial': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'publishing_status': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.PublishingStatus']"}),
'recorded_from_year': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'recorded_to_year': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'recording_context': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.RecordingContext']"}),
'reference': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'travail': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'year_published': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'telemeta.mediacollectionrelated': {
'Meta': {'object_name': 'MediaCollectionRelated', 'db_table': "'media_collection_related'"},
'collection': ('telemeta.models.core.ForeignKey', [], {'related_name': "'related'", 'to': "orm['telemeta.MediaCollection']"}),
'credits': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '255', 'db_column': "'filename'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'url': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'})
},
'telemeta.mediacorpus': {
'Meta': {'object_name': 'MediaCorpus', 'db_table': "'media_corpus'"},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'corpus'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['telemeta.MediaCollection']"}),
'code': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'}),
'description': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_access': ('telemeta.models.core.CharField', [], {'default': "'metadata'", 'max_length': '16', 'blank': 'True'}),
'recorded_from_year': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'recorded_to_year': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'reference': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.mediacorpusrelated': {
'Meta': {'object_name': 'MediaCorpusRelated', 'db_table': "'media_corpus_related'"},
'credits': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '255', 'db_column': "'filename'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'resource': ('telemeta.models.core.ForeignKey', [], {'related_name': "'related'", 'to': "orm['telemeta.MediaCorpus']"}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'url': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'})
},
'telemeta.mediafonds': {
'Meta': {'object_name': 'MediaFonds', 'db_table': "'media_fonds'"},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'fonds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['telemeta.MediaCorpus']"}),
'code': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'}),
'description': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_access': ('telemeta.models.core.CharField', [], {'default': "'metadata'", 'max_length': '16', 'blank': 'True'}),
'reference': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.mediafondsrelated': {
'Meta': {'object_name': 'MediaFondsRelated', 'db_table': "'media_fonds_related'"},
'credits': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '255', 'db_column': "'filename'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'resource': ('telemeta.models.core.ForeignKey', [], {'related_name': "'related'", 'to': "orm['telemeta.MediaFonds']"}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'url': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'})
},
'telemeta.mediaitem': {
'Meta': {'object_name': 'MediaItem', 'db_table': "'media_items'"},
'alt_title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'approx_duration': ('telemeta.models.core.DurationField', [], {'default': "'0'", 'blank': 'True'}),
'author': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'code': ('telemeta.models.core.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '250', 'blank': 'True'}),
'collection': ('telemeta.models.core.ForeignKey', [], {'related_name': "'items'", 'to': "orm['telemeta.MediaCollection']"}),
'collector': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'collector_from_collection': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'collector_selection': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'comment': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'context_comment': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'contributor': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'copied_from_item': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'copies'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.MediaItem']"}),
'creator_reference': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'cultural_area': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'depositor': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'digitalist': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'digitization_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'ethnic_group': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'items'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.EthnicGroup']"}),
'external_references': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '255', 'db_column': "'filename'", 'blank': 'True'}),
'generic_style': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'items'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.GenericStyle']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'language_iso': ('telemeta.models.core.ForeignKey', [], {'related_name': "'items'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['telemeta.Language']", 'blank': 'True', 'null': 'True'}),
'location': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'to': "orm['telemeta.Location']", 'null': 'True', 'blank': 'True'}),
'location_comment': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'moda_execut': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'old_code': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'organization': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'to': "orm['telemeta.Organization']", 'null': 'True', 'blank': 'True'}),
'public_access': ('telemeta.models.core.CharField', [], {'default': "'metadata'", 'max_length': '16', 'blank': 'True'}),
'publishing_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'recorded_from_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'recorded_to_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'recordist': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'rights': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'to': "orm['telemeta.Rights']", 'null': 'True', 'blank': 'True'}),
'scientist': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'summary': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'topic': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'to': "orm['telemeta.Topic']", 'null': 'True', 'blank': 'True'}),
'track': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'vernacular_style': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'items'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.VernacularStyle']"})
},
'telemeta.mediaitemanalysis': {
'Meta': {'ordering': "['name']", 'object_name': 'MediaItemAnalysis', 'db_table': "'media_analysis'"},
'analyzer_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'analysis'", 'to': "orm['telemeta.MediaItem']"}),
'name': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'unit': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'})
},
'telemeta.mediaitemkeyword': {
'Meta': {'unique_together': "(('item', 'keyword'),)", 'object_name': 'MediaItemKeyword', 'db_table': "'media_item_keywords'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'keyword_relations'", 'to': "orm['telemeta.MediaItem']"}),
'keyword': ('telemeta.models.core.ForeignKey', [], {'related_name': "'item_relations'", 'to': "orm['telemeta.ContextKeyword']"})
},
'telemeta.mediaitemmarker': {
'Meta': {'object_name': 'MediaItemMarker', 'db_table': "'media_markers'"},
'author': ('telemeta.models.core.ForeignKey', [], {'related_name': "'markers'", 'to': "orm['auth.User']"}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'markers'", 'to': "orm['telemeta.MediaItem']"}),
'public_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'time': ('telemeta.models.core.FloatField', [], {'default': '0', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'})
},
'telemeta.mediaitemperformance': {
'Meta': {'object_name': 'MediaItemPerformance', 'db_table': "'media_item_performances'"},
'alias': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'performances'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.InstrumentAlias']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'performances'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.Instrument']"}),
'instruments_num': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'media_item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'performances'", 'to': "orm['telemeta.MediaItem']"}),
'musicians': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'})
},
'telemeta.mediaitemrelated': {
'Meta': {'object_name': 'MediaItemRelated', 'db_table': "'media_item_related'"},
'credits': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '255', 'db_column': "'filename'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'related'", 'to': "orm['telemeta.MediaItem']"}),
'mime_type': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'url': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'})
},
'telemeta.mediaitemtranscodingflag': {
'Meta': {'object_name': 'MediaItemTranscodingFlag', 'db_table': "'media_transcoding'"},
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'transcoding'", 'to': "orm['telemeta.MediaItem']"}),
'mime_type': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'value': ('telemeta.models.core.BooleanField', [], {'default': 'False'})
},
'telemeta.mediapart': {
'Meta': {'object_name': 'MediaPart', 'db_table': "'media_parts'"},
'end': ('telemeta.models.core.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['telemeta.MediaItem']"}),
'start': ('telemeta.models.core.FloatField', [], {}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.metadataauthor': {
'Meta': {'ordering': "['value']", 'object_name': 'MetadataAuthor', 'db_table': "'metadata_authors'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.metadatawriter': {
'Meta': {'ordering': "['value']", 'object_name': 'MetadataWriter', 'db_table': "'metadata_writers'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.numberofchannels': {
'Meta': {'ordering': "['value']", 'object_name': 'NumberOfChannels', 'db_table': "'original_channel_number'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.organization': {
'Meta': {'ordering': "['value']", 'object_name': 'Organization', 'db_table': "'organization'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.physicalformat': {
'Meta': {'ordering': "['value']", 'object_name': 'PhysicalFormat', 'db_table': "'physical_formats'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.playlist': {
'Meta': {'object_name': 'Playlist', 'db_table': "'playlists'"},
'author': ('telemeta.models.core.ForeignKey', [], {'related_name': "'playlists'", 'db_column': "'author'", 'to': "orm['auth.User']"}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.playlistresource': {
'Meta': {'object_name': 'PlaylistResource', 'db_table': "'playlist_resources'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'playlist': ('telemeta.models.core.ForeignKey', [], {'related_name': "'resources'", 'to': "orm['telemeta.Playlist']"}),
'public_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'resource_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'resource_type': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.publisher': {
'Meta': {'ordering': "['value']", 'object_name': 'Publisher', 'db_table': "'publishers'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.publishercollection': {
'Meta': {'ordering': "['value']", 'object_name': 'PublisherCollection', 'db_table': "'publisher_collections'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publisher': ('telemeta.models.core.ForeignKey', [], {'related_name': "'publisher_collections'", 'to': "orm['telemeta.Publisher']"}),
'value': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.publishingstatus': {
'Meta': {'ordering': "['value']", 'object_name': 'PublishingStatus', 'db_table': "'publishing_status'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.recordingcontext': {
'Meta': {'ordering': "['value']", 'object_name': 'RecordingContext', 'db_table': "'recording_contexts'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.revision': {
'Meta': {'object_name': 'Revision', 'db_table': "'revisions'"},
'change_type': ('telemeta.models.core.CharField', [], {'max_length': '16'}),
'element_id': ('telemeta.models.core.IntegerField', [], {}),
'element_type': ('telemeta.models.core.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('telemeta.models.core.ForeignKey', [], {'related_name': "'revisions'", 'db_column': "'username'", 'to': "orm['auth.User']"})
},
'telemeta.rights': {
'Meta': {'ordering': "['value']", 'object_name': 'Rights', 'db_table': "'rights'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.search': {
'Meta': {'ordering': "['-date']", 'object_name': 'Search', 'db_table': "'searches'"},
'criteria': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'search'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['telemeta.Criteria']"}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('telemeta.models.core.ForeignKey', [], {'related_name': "'searches'", 'db_column': "'username'", 'to': "orm['auth.User']"})
},
'telemeta.tapelength': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeLength', 'db_table': "'tape_length'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.tapespeed': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeSpeed', 'db_table': "'tape_speed'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.tapevendor': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeVendor', 'db_table': "'tape_vendor'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.tapewheeldiameter': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeWheelDiameter', 'db_table': "'tape_wheel_diameter'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.tapewidth': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeWidth', 'db_table': "'tape_width'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.topic': {
'Meta': {'ordering': "['value']", 'object_name': 'Topic', 'db_table': "'topic'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'profiles'"},
'address': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'attachment': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'department': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'expiration_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'function': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'telephone': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'user': ('telemeta.models.core.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'telemeta.vernacularstyle': {
'Meta': {'ordering': "['value']", 'object_name': 'VernacularStyle', 'db_table': "'vernacular_styles'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
}
}
complete_apps = ['telemeta'] | agpl-3.0 |
MSM8226-Samsung/android_kernel_samsung_ms01lte | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
jat255/hyperspy | hyperspy/tests/misc/test_utils.py | 1 | 2016 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from hyperspy.misc.utils import slugify, parse_quantity, is_hyperspy_signal
from hyperspy import signals
import numpy as np
def test_slugify():
assert slugify('a') == 'a'
assert slugify('1a') == '1a'
assert slugify('1') == '1'
assert slugify('a a') == 'a_a'
assert slugify('a', valid_variable_name=True) == 'a'
assert slugify('1a', valid_variable_name=True) == 'Number_1a'
assert slugify('1', valid_variable_name=True) == 'Number_1'
assert slugify('a', valid_variable_name=False) == 'a'
assert slugify('1a', valid_variable_name=False) == '1a'
assert slugify('1', valid_variable_name=False) == '1'
def test_parse_quantity():
# From the metadata specification, the quantity is defined as
# "name (units)" without backets in the name of the quantity
assert parse_quantity('a (b)') == ('a', 'b')
assert parse_quantity('a (b/(c))') == ('a', 'b/(c)')
assert parse_quantity('a (c) (b/(c))') == ('a (c)', 'b/(c)')
assert parse_quantity('a [b]') == ('a [b]', '')
assert parse_quantity('a [b]', opening = '[', closing = ']') == ('a', 'b')
def test_is_hyperspy_signal():
s = signals.Signal1D(np.zeros((5, 5, 5)))
p = object()
assert is_hyperspy_signal(s) is True
assert is_hyperspy_signal(p) is False
| gpl-3.0 |
odejesush/tensorflow | tensorflow/contrib/graph_editor/edit.py | 158 | 8583 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various function for graph editing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.graph_editor import reroute
from tensorflow.contrib.graph_editor import select
from tensorflow.contrib.graph_editor import subgraph
from tensorflow.contrib.graph_editor import util
from tensorflow.python.ops import array_ops as tf_array_ops
__all__ = [
"detach_control_inputs",
"detach_control_outputs",
"detach_inputs",
"detach_outputs",
"detach",
"connect",
"bypass",
]
def detach_control_inputs(sgv):
"""Detach all the external control inputs of the subgraph sgv.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
for op in sgv.ops:
cops = [cop for cop in op.control_inputs if cop not in sgv.ops]
reroute.remove_control_inputs(op, cops)
def detach_control_outputs(sgv, control_outputs):
"""Detach all the external control outputs of the subgraph sgv.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
control_outputs: a util.ControlOutputs instance.
"""
if not isinstance(control_outputs, util.ControlOutputs):
raise TypeError("Expected a util.ControlOutputs, got: {}",
type(control_outputs))
control_outputs.update()
sgv = subgraph.make_view(sgv)
for op in sgv.ops:
for cop in control_outputs.get(op):
if cop not in sgv.ops:
reroute.remove_control_inputs(cop, op)
def detach_inputs(sgv, control_inputs=False):
"""Detach the inputs of a subgraph view.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
Note that sgv is modified in place.
control_inputs: if True control_inputs are also detached.
Returns:
A tuple `(sgv, input_placeholders)` where
`sgv` is a new subgraph view of the detached subgraph;
`input_placeholders` is a list of the created input placeholders.
Raises:
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
with sgv.graph.as_default():
input_placeholders = [
tf_array_ops.placeholder(
dtype=input_t.dtype, name=util.placeholder_name(input_t))
for input_t in sgv.inputs
]
reroute.swap_inputs(sgv, input_placeholders)
if control_inputs:
detach_control_inputs(sgv)
return sgv, input_placeholders
def detach_outputs(sgv, control_outputs=None):
"""Detach the output of a subgraph view.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
Note that sgv is modified in place.
control_outputs: a util.ControlOutputs instance or None. If not None the
control outputs are also detached.
Returns:
A tuple `(sgv, output_placeholders)` where
`sgv` is a new subgraph view of the detached subgraph;
`output_placeholders` is a list of the created output placeholders.
Raises:
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
# only select outputs with consumers
sgv_ = sgv.remap_outputs([output_id
for output_id, output_t in enumerate(sgv.outputs)
if output_t.consumers()])
# create consumer subgraph and remap
consumers_sgv = subgraph.SubGraphView(sgv_.consumers())
consumers_sgv = consumers_sgv.remap_inputs(
[input_id for input_id, input_t in enumerate(consumers_sgv.inputs)
if input_t in sgv_.outputs])
with sgv_.graph.as_default():
output_placeholders = [
util.make_placeholder_from_tensor(input_t)
for input_t in consumers_sgv.inputs
]
reroute.swap_outputs(sgv_, output_placeholders)
if control_outputs is not None:
detach_control_outputs(sgv_, control_outputs)
return sgv_, output_placeholders
def detach(sgv, control_inputs=False, control_outputs=None, control_ios=None):
"""Detach both the inputs and the outputs of a subgraph view.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
Note that sgv is modified in place.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A tuple `(sgv, detached_inputs, detached_outputs)` where:
`sgv` is a new subgraph view of the detached subgraph;
`detach_inputs` is a list of the created input placeholders;
`detach_outputs` is a list of the created output placeholders.
Raises:
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
control_inputs, control_outputs = select.check_cios(control_inputs,
control_outputs,
control_ios)
_, detached_inputs = detach_inputs(sgv, control_inputs)
_, detached_outputs = detach_outputs(sgv, control_outputs)
return sgv, detached_inputs, detached_outputs
def connect(sgv0, sgv1, disconnect_first=False):
"""Connect the outputs of sgv0 to the inputs of sgv1.
Args:
sgv0: the first subgraph to have its outputs swapped. This argument is
converted to a subgraph using the same rules as the function
subgraph.make_view.
Note that sgv0 is modified in place.
sgv1: the second subgraph to have its outputs swapped. This argument is
converted to a subgraph using the same rules as the function
subgraph.make_view.
Note that sgv1 is modified in place.
disconnect_first: if True the current outputs of sgv0 are disconnected.
Returns:
A tuple `(sgv0, sgv1)` of the now connected subgraphs.
Raises:
StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv0 = subgraph.make_view(sgv0)
sgv1 = subgraph.make_view(sgv1)
util.check_graphs(sgv0, sgv1)
if disconnect_first:
detach_outputs(sgv0)
sgv0_outputs = subgraph.SubGraphView(passthrough_ts=sgv0.outputs)
reroute.reroute_inputs(sgv0_outputs, sgv1)
return sgv0, sgv1
def bypass(sgv):
"""Bypass the given subgraph by connecting its inputs to its outputs.
Args:
sgv: the subgraph view to be bypassed. This argument is converted to a
subgraph using the same rules than the function subgraph.make_view.
Note that sgv is modified in place.
Returns:
A tuple `(sgv, detached_inputs)` where:
`sgv` is a new subgraph view of the bypassed subgraph;
`detached_inputs` is a list of the created input placeholders.
Raises:
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
# TODO(fkp): allows to plug sgv.inputs to individual sgv.outputs consumers
sgv = subgraph.make_view(sgv)
sgv_inputs = list(sgv.inputs)
sgv, detached_inputs = detach_inputs(sgv)
reroute.reroute_ts(sgv_inputs, sgv.outputs)
return sgv, detached_inputs
| apache-2.0 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/sphinx/websupport/storage/sqlalchemy_db.py | 11 | 7477 | # -*- coding: utf-8 -*-
"""
sphinx.websupport.storage.sqlalchemy_db
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SQLAlchemy table and mapper definitions used by the
:class:`sphinx.websupport.storage.sqlalchemystorage.SQLAlchemyStorage`.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from datetime import datetime
from sqlalchemy import Column, Integer, Text, String, Boolean, \
ForeignKey, DateTime
from sqlalchemy.orm import relation, sessionmaker, aliased
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
Session = sessionmaker()
db_prefix = 'sphinx_'
class Node(Base):
"""Data about a Node in a doctree."""
__tablename__ = db_prefix + 'nodes'
id = Column(String(32), primary_key=True)
document = Column(String(256), nullable=False)
source = Column(Text, nullable=False)
def nested_comments(self, username, moderator):
"""Create a tree of comments. First get all comments that are
descendants of this node, then convert them to a tree form.
:param username: the name of the user to get comments for.
:param moderator: whether the user is moderator.
"""
session = Session()
if username:
# If a username is provided, create a subquery to retrieve all
# votes by this user. We will outerjoin with the comment query
# with this subquery so we have a user's voting information.
sq = session.query(CommentVote).\
filter(CommentVote.username == username).subquery()
cvalias = aliased(CommentVote, sq)
q = session.query(Comment, cvalias.value).outerjoin(cvalias)
else:
# If a username is not provided, we don't need to join with
# CommentVote.
q = session.query(Comment)
# Filter out all comments not descending from this node.
q = q.filter(Comment.path.like(str(self.id) + '.%'))
# Filter out all comments that are not moderated yet.
if not moderator:
q = q.filter(Comment.displayed == True)
# Retrieve all results. Results must be ordered by Comment.path
# so that we can easily transform them from a flat list to a tree.
results = q.order_by(Comment.path).all()
session.close()
return self._nest_comments(results, username)
def _nest_comments(self, results, username):
"""Given the flat list of results, convert the list into a
tree.
:param results: the flat list of comments
:param username: the name of the user requesting the comments.
"""
comments = []
list_stack = [comments]
for r in results:
if username:
comment, vote = r
else:
comment, vote = (r, 0)
inheritance_chain = comment.path.split('.')[1:]
if len(inheritance_chain) == len(list_stack) + 1:
parent = list_stack[-1][-1]
list_stack.append(parent['children'])
elif len(inheritance_chain) < len(list_stack):
while len(inheritance_chain) < len(list_stack):
list_stack.pop()
list_stack[-1].append(comment.serializable(vote=vote))
return comments
def __init__(self, id, document, source):
self.id = id
self.document = document
self.source = source
class CommentVote(Base):
"""A vote a user has made on a Comment."""
__tablename__ = db_prefix + 'commentvote'
username = Column(String(64), primary_key=True)
comment_id = Column(Integer, ForeignKey(db_prefix + 'comments.id'),
primary_key=True)
# -1 if downvoted, +1 if upvoted, 0 if voted then unvoted.
value = Column(Integer, nullable=False)
def __init__(self, comment_id, username, value):
self.comment_id = comment_id
self.username = username
self.value = value
class Comment(Base):
"""An individual Comment being stored."""
__tablename__ = db_prefix + 'comments'
id = Column(Integer, primary_key=True)
rating = Column(Integer, nullable=False)
time = Column(DateTime, nullable=False)
text = Column(Text, nullable=False)
displayed = Column(Boolean, index=True, default=False)
username = Column(String(64))
proposal = Column(Text)
proposal_diff = Column(Text)
path = Column(String(256), index=True)
node_id = Column(String, ForeignKey(db_prefix + 'nodes.id'))
node = relation(Node, backref="comments")
votes = relation(CommentVote, backref="comment",
cascade="all")
def __init__(self, text, displayed, username, rating, time,
proposal, proposal_diff):
self.text = text
self.displayed = displayed
self.username = username
self.rating = rating
self.time = time
self.proposal = proposal
self.proposal_diff = proposal_diff
def set_path(self, node_id, parent_id):
"""Set the materialized path for this comment."""
# This exists because the path can't be set until the session has
# been flushed and this Comment has an id.
if node_id:
self.node_id = node_id
self.path = '%s.%s' % (node_id, self.id)
else:
session = Session()
parent_path = session.query(Comment.path).\
filter(Comment.id == parent_id).one().path
session.close()
self.node_id = parent_path.split('.')[0]
self.path = '%s.%s' % (parent_path, self.id)
def serializable(self, vote=0):
"""Creates a serializable representation of the comment. This is
converted to JSON, and used on the client side.
"""
delta = datetime.now() - self.time
time = {'year': self.time.year,
'month': self.time.month,
'day': self.time.day,
'hour': self.time.hour,
'minute': self.time.minute,
'second': self.time.second,
'iso': self.time.isoformat(),
'delta': self.pretty_delta(delta)}
path = self.path.split('.')
node = path[0]
if len(path) > 2:
parent = path[-2]
else:
parent = None
return {'text': self.text,
'username': self.username or 'Anonymous',
'id': self.id,
'node': node,
'parent': parent,
'rating': self.rating,
'displayed': self.displayed,
'age': delta.seconds,
'time': time,
'vote': vote or 0,
'proposal_diff': self.proposal_diff,
'children': []}
def pretty_delta(self, delta):
"""Create a pretty representation of the Comment's age.
(e.g. 2 minutes).
"""
days = delta.days
seconds = delta.seconds
hours = seconds / 3600
minutes = seconds / 60
if days == 0:
if hours == 0:
dt = (minutes, 'minute')
else:
dt = (hours, 'hour')
else:
dt = (days, 'day')
if dt[0] == 1:
ret = '%s %s ago' % dt
else:
ret = '%s %ss ago' % dt
return ret
| bsd-3-clause |
hifly/OpenUpgrade | addons/l10n_ma/l10n_ma.py | 336 | 1952 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class l10n_ma_report(osv.osv):
_name = 'l10n.ma.report'
_description = 'Report for l10n_ma_kzc'
_columns = {
'code': fields.char('Code', size=64),
'name': fields.char('Name'),
'line_ids': fields.one2many('l10n.ma.line', 'report_id', 'Lines', copy=True),
}
_sql_constraints = [
('code_uniq', 'unique (code)','The code report must be unique !')
]
class l10n_ma_line(osv.osv):
_name = 'l10n.ma.line'
_description = 'Report Lines for l10n_ma'
_columns = {
'code': fields.char('Variable Name', size=64),
'definition': fields.char('Definition'),
'name': fields.char('Name'),
'report_id': fields.many2one('l10n.ma.report', 'Report'),
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The variable name must be unique !')
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mmind/linux-es600 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
nmayorov/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
jlcarmic/producthunt_simulator | venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray_assignment.py | 59 | 1982 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import run_module_suite
ndims = 2
size = 10
shape = tuple([size] * ndims)
def _indices_for_nelems(nelems):
"""Returns slices of length nelems, from start onwards, in direction sign."""
if nelems == 0:
return [size // 2] # int index
res = []
for step in (1, 2):
for sign in (-1, 1):
start = size // 2 - nelems * step * sign // 2
stop = start + nelems * step * sign
res.append(slice(start, stop, step * sign))
return res
def _indices_for_axis():
"""Returns (src, dst) pairs of indices."""
res = []
for nelems in (0, 2, 3):
ind = _indices_for_nelems(nelems)
# no itertools.product available in Py2.4
res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems"
return res
def _indices(ndims):
"""Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs."""
ind = _indices_for_axis()
# no itertools.product available in Py2.4
res = [[]]
for i in range(ndims):
newres = []
for elem in ind:
for others in res:
newres.append([elem] + others)
res = newres
return res
def _check_assignment(srcidx, dstidx):
"""Check assignment arr[dstidx] = arr[srcidx] works."""
arr = np.arange(np.product(shape)).reshape(shape)
cpy = arr.copy()
cpy[dstidx] = arr[srcidx]
arr[dstidx] = arr[srcidx]
assert np.all(arr == cpy), 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)
def test_overlapping_assignments():
"""Test automatically generated assignments which overlap in memory."""
inds = _indices(ndims)
for ind in inds:
srcidx = tuple([a[0] for a in ind])
dstidx = tuple([a[1] for a in ind])
yield _check_assignment, srcidx, dstidx
if __name__ == "__main__":
run_module_suite()
| mit |
blowmage/gcloud-python | gcloud/storage/demo/__init__.py | 1 | 1054 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from gcloud import storage
__all__ = ['create_bucket', 'list_buckets', 'PROJECT_ID']
PROJECT_ID = os.getenv('GCLOUD_TESTS_PROJECT_ID')
def list_buckets(connection):
return list(storage.list_buckets(project=PROJECT_ID,
connection=connection))
def create_bucket(bucket_name, connection):
return storage.create_bucket(bucket_name, PROJECT_ID,
connection=connection)
| apache-2.0 |
HeinleinSupport/check_mk | python-chardet/lib/python/chardet/latin1prober.py | 290 | 5370 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
super(Latin1Prober, self).__init__()
self._last_char_class = None
self._freq_counter = None
self.reset()
def reset(self):
self._last_char_class = OTH
self._freq_counter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
@property
def charset_name(self):
return "ISO-8859-1"
@property
def language(self):
return ""
def feed(self, byte_str):
byte_str = self.filter_with_english_letters(byte_str)
for c in byte_str:
char_class = Latin1_CharToClass[c]
freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM)
+ char_class]
if freq == 0:
self._state = ProbingState.NOT_ME
break
self._freq_counter[freq] += 1
self._last_char_class = char_class
return self.state
def get_confidence(self):
if self.state == ProbingState.NOT_ME:
return 0.01
total = sum(self._freq_counter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
| gpl-2.0 |
gaolichuang/py-essential | essential/report/generators/conf.py | 2 | 1404 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides OpenStack config generators
This module defines a class for configuration
generators for generating the model in
:mod:`essential.report.models.conf`.
"""
from essential.config import cfg
import essential.report.models.conf as cm
class ConfigReportGenerator(object):
"""A Configuration Data Generator
This generator returns
:class:`essential.report.models.conf.ConfigModel` ,
by default using the configuration options stored
in :attr:`essential.config.cfg.CONF`, which is where
OpenStack stores everything.
:param cnf: the configuration option object
:type cnf: :class:`essential.config.cfg.ConfigOpts`
"""
def __init__(self, cnf=cfg.CONF):
self.conf_obj = cnf
def __call__(self):
return cm.ConfigModel(self.conf_obj)
| apache-2.0 |
vaiski/checksum | src/checksum/checksum.py | 1 | 4815 | # -*- coding: utf-8 -*-
'''
Checksum
========
Provides an extendable checksum calculation and validation library for
different checksum algorithms.
'''
class ChecksumStrategy(object):
'''
An interface class for checksum algorithm classes.
'''
def checksum(self, body):
''' Calculates a checksum for the body string provided. '''
raise NotImplementedError('Checksum calculation is not implemented for'
'this checksum strategy.')
def is_valid(self, value, checksum=None):
'''
Validates a string against the checksum.
This abstract base class provides an elementary checksum validation
method. Advanced validation methods should be implemented in
subclasses when possible.
'''
body = value
if checksum is None:
(body, checksum) = self.split(value)
return self.checksum(body) == checksum
def split(self, value):
'''
Splits the string including a checksum according to the checksum
algorithm used.
'''
raise NotImplementedError('Splitting is not implemented for this '
'checksum strategy.')
def _prepare(self, body):
''' Method to prepare the body string for checksum calculation. '''
return [int(d) for d in str(body)]
class Checksum(object):
'''
Checksum context class. Provides different checksum calculation and
verification algorithms by acting as a factory class.
'''
_strategies = {}
def __init__(self, strategy=None, body=None):
'''
Checksum context class constructor.
:param strategy : name of the used checksum algorithm
:param body : string that the checksum is calculated for
'''
self._strategy = None
self._body = None
self.strategy = strategy
self.body = body
# Setters and getters
# -------------------
@property
def body(self):
''' Getter for the body property. '''
return self._body
@body.setter
def body(self, value):
''' Setter for the body property. '''
if value is not None:
self._body = value
else:
self._body = ''
@property
def strategy(self):
''' Getter for the strategy property. '''
return self._strategy
@strategy.setter
def strategy(self, value):
''' Setter for the strategy property. '''
if value is None:
return
if value in self._strategies:
strategy = self._strategies[value]()
else:
raise NotImplementedError('Checksum strategy %s is not '
'implemented.' % value)
if (isinstance(strategy, ChecksumStrategy) and
type(strategy) != ChecksumStrategy):
self._strategy = strategy
else:
raise TypeError(
'Strategy requires a subclass of ChecksumStrategy.'
' Got instead %s.' % type(strategy))
def checksum(self):
'''
Calculates the checksum using selected algorithm for the body string.
'''
if self.strategy is not None:
return self.strategy.checksum(self._body)
def is_valid(self, value, checksum=None):
'''
Validates either a string containing a checksum or a body string and
a against separately provided checksum.
'''
if self.strategy is not None:
return self.strategy.is_valid(value, checksum)
def split(self, value):
'''
Splits a string containing a body and a checksum according to the
conventions of selected checksum algorithm.
'''
if self.strategy is not None:
return self.strategy.split(value)
def type(self):
'''
Returns the name of used checksum algorithm.
'''
if self.strategy is not None:
return self.strategy.name
else:
return None
@classmethod
def register_strategy(cls, strategy_cls):
'''
Registers a checksum strategy class in the available checksum
strategies.
'''
strategy = strategy_cls()
if (isinstance(strategy, ChecksumStrategy) and
type(strategy) != ChecksumStrategy):
cls._strategies[strategy_cls.name] = strategy_cls
else:
raise TypeError(
'Strategy requires a subclass of ChecksumStrategy.'
' Got instead %s.' % type(strategy))
@classmethod
def list_strategies(cls):
'''
Lists all the available strategies for checksum calculation.
'''
return cls._strategies.keys()
| mit |
ycaihua/kbengine | kbe/res/scripts/common/Lib/test/test_import.py | 60 | 40179 | # We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import unittest
import unittest.mock as mock
import textwrap
import errno
import shutil
import contextlib
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE)
from test import script_helper
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyo",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with script_helper.temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
pyo = TESTFN + ".pyo"
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
unlink(pyo)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc (or .pyo).
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertIn(ext, ('.pyc', '.pyo'))
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc/.pyo files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
if __debug__:
bytecode_only = path + "c"
else:
bytecode_only = path + "o"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(12)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147 related behaviors.
tag = sys.implementation.cache_tag
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertTrue(os.path.exists(os.path.join(
'__pycache__', '{}.{}.py{}'.format(
TESTFN, self.tag, 'c' if __debug__ else 'o'))))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertFalse(os.path.exists(os.path.join(
'__pycache__', '{}.{}.pyc'.format(TESTFN, self.tag))))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.FileFinder, mod.FileFinder)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
| lgpl-3.0 |
donspaulding/adspygoogle | examples/adspygoogle/dfp/v201206/make_test_network.py | 2 | 2023 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a test network.
You do not need to have a DFP account to
run this example, but you do need to have a new Google account (created at
http://www.google.com/accounts/newaccount) that is not associated with any
other DFP networks (including old sandbox networks). Once this network is
created, you can supply the network code in your settings to make calls to
other services.
Please see the following URL for more information:
https://developers.google.com/doubleclick-publishers/docs/signup
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201206')
# Get the current network.
network = network_service.MakeTestNetwork()[0]
# Display results.
print ('Test network with network code \'%s\' and display name \'%s\' created.'
% (network['networkCode'], network['displayName']))
print ('You may now sign in at http://www.google.com/dfp/main?networkCode=%s' %
network['networkCode'])
| apache-2.0 |
RadonX-ROM/external_skia | tools/svn.py | 84 | 7044 | '''
Copyright 2011 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
import fnmatch
import os
import re
import subprocess
import threading
PROPERTY_MIMETYPE = 'svn:mime-type'
# Status types for GetFilesWithStatus()
STATUS_ADDED = 0x01
STATUS_DELETED = 0x02
STATUS_MODIFIED = 0x04
STATUS_NOT_UNDER_SVN_CONTROL = 0x08
if os.name == 'nt':
SVN = 'svn.bat'
else:
SVN = 'svn'
def Cat(svn_url):
"""Returns the contents of the file at the given svn_url.
@param svn_url URL of the file to read
"""
proc = subprocess.Popen([SVN, 'cat', svn_url],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
exitcode = proc.wait()
if not exitcode == 0:
raise Exception('Could not retrieve %s. Verify that the URL is valid '
'and check your connection.' % svn_url)
return proc.communicate()[0]
class Svn:
def __init__(self, directory):
"""Set up to manipulate SVN control within the given directory.
The resulting object is thread-safe: access to all methods is
synchronized (if one thread is currently executing any of its methods,
all other threads must wait before executing any of its methods).
@param directory
"""
self._directory = directory
# This must be a reentrant lock, so that it can be held by both
# _RunCommand() and (some of) the methods that call it.
self._rlock = threading.RLock()
def _RunCommand(self, args):
"""Run a command (from self._directory) and return stdout as a single
string.
@param args a list of arguments
"""
with self._rlock:
print 'RunCommand: %s' % args
proc = subprocess.Popen(args, cwd=self._directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode is not 0:
raise Exception('command "%s" failed in dir "%s": %s' %
(args, self._directory, stderr))
return stdout
def GetInfo(self):
"""Run "svn info" and return a dictionary containing its output.
"""
output = self._RunCommand([SVN, 'info'])
svn_info = {}
for line in output.split('\n'):
if ':' in line:
(key, value) = line.split(':', 1)
svn_info[key.strip()] = value.strip()
return svn_info
def Checkout(self, url, path):
"""Check out a working copy from a repository.
Returns stdout as a single string.
@param url URL from which to check out the working copy
@param path path (within self._directory) where the local copy will be
written
"""
return self._RunCommand([SVN, 'checkout', url, path])
def Update(self, path, revision='HEAD'):
"""Update the working copy.
Returns stdout as a single string.
@param path path (within self._directory) within which to run
"svn update"
@param revision revision to update to
"""
return self._RunCommand([SVN, 'update', path, '--revision', revision])
def ListSubdirs(self, url):
"""Returns a list of all subdirectories (not files) within a given SVN
url.
@param url remote directory to list subdirectories of
"""
subdirs = []
filenames = self._RunCommand([SVN, 'ls', url]).split('\n')
for filename in filenames:
if filename.endswith('/'):
subdirs.append(filename.strip('/'))
return subdirs
def GetNewFiles(self):
"""Return a list of files which are in this directory but NOT under
SVN control.
"""
return self.GetFilesWithStatus(STATUS_NOT_UNDER_SVN_CONTROL)
def GetNewAndModifiedFiles(self):
"""Return a list of files in this dir which are newly added or modified,
including those that are not (yet) under SVN control.
"""
return self.GetFilesWithStatus(
STATUS_ADDED | STATUS_MODIFIED | STATUS_NOT_UNDER_SVN_CONTROL)
def GetFilesWithStatus(self, status):
"""Return a list of files in this dir with the given SVN status.
@param status bitfield combining one or more STATUS_xxx values
"""
status_types_string = ''
if status & STATUS_ADDED:
status_types_string += 'A'
if status & STATUS_DELETED:
status_types_string += 'D'
if status & STATUS_MODIFIED:
status_types_string += 'M'
if status & STATUS_NOT_UNDER_SVN_CONTROL:
status_types_string += '\?'
status_regex_string = '^[%s].....\s+(.+)$' % status_types_string
stdout = self._RunCommand([SVN, 'status']).replace('\r', '')
status_regex = re.compile(status_regex_string, re.MULTILINE)
files = status_regex.findall(stdout)
return files
def AddFiles(self, filenames):
"""Adds these files to SVN control.
@param filenames files to add to SVN control
"""
self._RunCommand([SVN, 'add'] + filenames)
def SetProperty(self, filenames, property_name, property_value):
"""Sets a svn property for these files.
@param filenames files to set property on
@param property_name property_name to set for each file
@param property_value what to set the property_name to
"""
if filenames:
self._RunCommand(
[SVN, 'propset', property_name, property_value] + filenames)
def SetPropertyByFilenamePattern(self, filename_pattern,
property_name, property_value):
"""Sets a svn property for all files matching filename_pattern.
@param filename_pattern set the property for all files whose names match
this Unix-style filename pattern (e.g., '*.jpg')
@param property_name property_name to set for each file
@param property_value what to set the property_name to
"""
with self._rlock:
all_files = os.listdir(self._directory)
matching_files = sorted(fnmatch.filter(all_files, filename_pattern))
self.SetProperty(matching_files, property_name, property_value)
def ExportBaseVersionOfFile(self, file_within_repo, dest_path):
"""Retrieves a copy of the base version (what you would get if you ran
'svn revert') of a file within the repository.
@param file_within_repo path to the file within the repo whose base
version you wish to obtain
@param dest_path destination to which to write the base content
"""
self._RunCommand([SVN, 'export', '--revision', 'BASE', '--force',
file_within_repo, dest_path])
| bsd-3-clause |
Nebucatnetzer/tamagotchi | pygame/lib/python3.4/site-packages/pip/_vendor/lockfile/mkdirlockfile.py | 536 | 3096 | from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock.
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
| gpl-2.0 |
marc-sensenich/ansible | test/units/modules/network/netscaler/test_netscaler_service.py | 68 | 14309 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from units.compat.mock import patch, Mock, MagicMock, call
import sys
if sys.version_info[:2] != (2, 6):
import requests
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
class TestNetscalerServiceModule(TestModule):
@classmethod
def setUpClass(cls):
m = MagicMock()
cls.service_mock = MagicMock()
cls.service_mock.__class__ = MagicMock()
cls.service_lbmonitor_binding_mock = MagicMock()
cls.lbmonitor_service_binding_mock = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.basic': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service.service': cls.service_mock,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding': cls.service_lbmonitor_binding_mock,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding.service_lbmonitor_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding.lbmonitor_service_binding': cls.lbmonitor_service_binding_mock,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def set_module_state(self, state):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state=state,
))
def setUp(self):
super(TestNetscalerServiceModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerServiceModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
self.set_module_state('present')
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_service
self.module = netscaler_service
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_service.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_service.nitro_exception', MockException):
self.module = netscaler_service
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_create_non_existing_service(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[False, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_has_calls([call.add()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_update_service_when_service_differs(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, True])
service_identical_mock = Mock(side_effect=[False, True])
monitor_bindings_identical_mock = Mock(side_effect=[True, True])
all_identical_mock = Mock(side_effect=[False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
service_identical=service_identical_mock,
monitor_bindings_identical=monitor_bindings_identical_mock,
all_identical=all_identical_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_has_calls([call.update()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_update_service_when_monitor_bindings_differ(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, True])
service_identical_mock = Mock(side_effect=[True, True])
monitor_bindings_identical_mock = Mock(side_effect=[False, True])
all_identical_mock = Mock(side_effect=[False])
sync_monitor_bindings_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
service_identical=service_identical_mock,
monitor_bindings_identical=monitor_bindings_identical_mock,
all_identical=all_identical_mock,
sync_monitor_bindings=sync_monitor_bindings_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
# poor man's assert_called_once since python3.5 does not implement that mock method
self.assertEqual(len(sync_monitor_bindings_mock.mock_calls), 1, msg='sync monitor bindings not called once')
self.assertTrue(result['changed'], msg='Change not recorded')
def test_no_change_to_module_when_all_identical(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, True])
service_identical_mock = Mock(side_effect=[True, True])
monitor_bindings_identical_mock = Mock(side_effect=[True, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
service_identical=service_identical_mock,
monitor_bindings_identical=monitor_bindings_identical_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
self.assertFalse(result['changed'], msg='Erroneous changed status update')
def test_absent_operation(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_has_calls([call.delete()])
self.assertTrue(result['changed'], msg='Changed status not set correctly')
def test_absent_operation_no_change(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[False, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_not_called()
self.assertFalse(result['changed'], msg='Changed status not set correctly')
def test_graceful_nitro_exception_operation_present(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
service_exists=m,
nitro_exception=MockException
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation present'
)
def test_graceful_nitro_exception_operation_absent(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
service_exists=m,
nitro_exception=MockException
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
| gpl-3.0 |
jovencoda/evoca-v2 | evoca_v2/core/migrations/0022_auto_20170820_0036.py | 1 | 1202 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-20 00:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0021_channel_image'),
]
operations = [
migrations.CreateModel(
name='ChannelTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uniqueID', models.UUIDField(default=uuid.uuid4, editable=False)),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(blank=True, null=True)),
],
),
migrations.AlterField(
model_name='channel',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='static/img/'),
),
migrations.AddField(
model_name='channeltag',
name='related_channel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_channel', to='core.Channel'),
),
]
| gpl-3.0 |
palerdot/calibre | src/calibre/gui2/dialogs/plugin_updater.py | 3 | 37726 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Grant Drake <grant.drake@gmail.com>'
__docformat__ = 'restructuredtext en'
import re, datetime, traceback
from lxml import html
from PyQt4.Qt import (Qt, QUrl, QFrame, QVBoxLayout, QLabel, QBrush, QTextEdit,
QComboBox, QAbstractItemView, QHBoxLayout, QDialogButtonBox,
QAbstractTableModel, QVariant, QTableView, QModelIndex,
QSortFilterProxyModel, QAction, QIcon, QDialog,
QFont, QPixmap, QSize)
from PyQt4.QtGui import QLineEdit
from calibre import browser, prints
from calibre.constants import numeric_version, iswindows, isosx, DEBUG, __appname__, __version__
from calibre.customize.ui import (
initialized_plugins, is_disabled, remove_plugin, add_plugin, enable_plugin, disable_plugin,
NameConflict, has_external_plugins)
from calibre.gui2 import error_dialog, question_dialog, info_dialog, NONE, open_url, gprefs
from calibre.gui2.preferences.plugins import ConfigWidget
from calibre.utils.date import UNDEFINED_DATE, format_date
SERVER = 'http://plugins.calibre-ebook.com/'
INDEX_URL = '%splugins.json.bz2' % SERVER
FILTER_ALL = 0
FILTER_INSTALLED = 1
FILTER_UPDATE_AVAILABLE = 2
FILTER_NOT_INSTALLED = 3
def get_plugin_updates_available(raise_error=False):
'''
API exposed to read whether there are updates available for any
of the installed user plugins.
Returns None if no updates found
Returns list(DisplayPlugin) of plugins installed that have a new version
'''
if not has_external_plugins():
return None
display_plugins = read_available_plugins(raise_error=raise_error)
if display_plugins:
update_plugins = filter(filter_upgradeable_plugins, display_plugins)
if len(update_plugins) > 0:
return update_plugins
return None
def filter_upgradeable_plugins(display_plugin):
return display_plugin.is_upgrade_available()
def filter_not_installed_plugins(display_plugin):
return not display_plugin.is_installed()
def read_available_plugins(raise_error=False):
import json, bz2
display_plugins = []
br = browser()
try:
raw = br.open_novisit(INDEX_URL).read()
if not raw:
return
raw = json.loads(bz2.decompress(raw))
except:
if raise_error:
raise
traceback.print_exc()
return
for plugin in raw.itervalues():
try:
display_plugin = DisplayPlugin(plugin)
get_installed_plugin_status(display_plugin)
display_plugins.append(display_plugin)
except:
if DEBUG:
prints('======= Plugin Parse Error =======')
traceback.print_exc()
import pprint
pprint.pprint(plugin)
display_plugins = sorted(display_plugins, key=lambda k: k.name)
return display_plugins
def get_installed_plugin_status(display_plugin):
display_plugin.installed_version = None
display_plugin.plugin = None
for plugin in initialized_plugins():
if plugin.name == display_plugin.name and plugin.plugin_path is not None:
display_plugin.plugin = plugin
display_plugin.installed_version = plugin.version
break
if display_plugin.uninstall_plugins:
# Plugin requires a specific plugin name to be uninstalled first
# This could occur when a plugin is renamed (Kindle Collections)
# or multiple plugins deprecated into a newly named one.
# Check whether user has the previous version(s) installed
plugins_to_remove = list(display_plugin.uninstall_plugins)
for plugin_to_uninstall in plugins_to_remove:
found = False
for plugin in initialized_plugins():
if plugin.name == plugin_to_uninstall:
found = True
break
if not found:
display_plugin.uninstall_plugins.remove(plugin_to_uninstall)
class ImageTitleLayout(QHBoxLayout):
'''
A reusable layout widget displaying an image followed by a title
'''
def __init__(self, parent, icon_name, title):
QHBoxLayout.__init__(self)
title_font = QFont()
title_font.setPointSize(16)
title_image_label = QLabel(parent)
pixmap = QPixmap()
pixmap.load(I(icon_name))
if pixmap is None:
error_dialog(parent, _('Restart required'),
_('You must restart Calibre before using this plugin!'), show=True)
else:
title_image_label.setPixmap(pixmap)
title_image_label.setMaximumSize(32, 32)
title_image_label.setScaledContents(True)
self.addWidget(title_image_label)
shelf_label = QLabel(title, parent)
shelf_label.setFont(title_font)
self.addWidget(shelf_label)
self.insertStretch(-1)
class SizePersistedDialog(QDialog):
'''
This dialog is a base class for any dialogs that want their size/position
restored when they are next opened.
'''
initial_extra_size = QSize(0, 0)
def __init__(self, parent, unique_pref_name):
QDialog.__init__(self, parent)
self.unique_pref_name = unique_pref_name
self.geom = gprefs.get(unique_pref_name, None)
self.finished.connect(self.dialog_closing)
def resize_dialog(self):
if self.geom is None:
self.resize(self.sizeHint()+self.initial_extra_size)
else:
self.restoreGeometry(self.geom)
def dialog_closing(self, result):
geom = bytearray(self.saveGeometry())
gprefs[self.unique_pref_name] = geom
class VersionHistoryDialog(SizePersistedDialog):
def __init__(self, parent, plugin_name, html):
SizePersistedDialog.__init__(self, parent, 'Plugin Updater plugin:version history dialog')
self.setWindowTitle(_('Version History for %s')%plugin_name)
layout = QVBoxLayout(self)
self.setLayout(layout)
self.notes = QTextEdit(html, self)
self.notes.setReadOnly(True)
layout.addWidget(self.notes)
self.button_box = QDialogButtonBox(QDialogButtonBox.Close)
self.button_box.rejected.connect(self.reject)
layout.addWidget(self.button_box)
# Cause our dialog size to be restored from prefs or created on first usage
self.resize_dialog()
class PluginFilterComboBox(QComboBox):
def __init__(self, parent):
QComboBox.__init__(self, parent)
items = [_('All'), _('Installed'), _('Update available'), _('Not installed')]
self.addItems(items)
class DisplayPlugin(object):
def __init__(self, plugin):
self.name = plugin['index_name']
self.forum_link = plugin['thread_url']
self.zip_url = SERVER + plugin['file']
self.installed_version = None
self.description = plugin['description']
self.donation_link = plugin['donate']
self.available_version = tuple(plugin['version'])
self.release_date = datetime.datetime(*tuple(map(int, re.split(r'\D', plugin['last_modified'])))[:6]).date()
self.calibre_required_version = plugin['minimum_calibre_version']
self.author = plugin['author']
self.platforms = plugin['supported_platforms']
self.uninstall_plugins = plugin['uninstall'] or []
self.has_changelog = plugin['history']
self.is_deprecated = plugin['deprecated']
def is_disabled(self):
if self.plugin is None:
return False
return is_disabled(self.plugin)
def is_installed(self):
return self.installed_version is not None
def name_matches_filter(self, filter_text):
# filter_text is already lowercase @set_filter_text
return filter_text in icu_lower(self.name) # case-insensitive filtering
def is_upgrade_available(self):
return self.is_installed() and (self.installed_version < self.available_version
or self.is_deprecated)
def is_valid_platform(self):
if iswindows:
return 'windows' in self.platforms
if isosx:
return 'osx' in self.platforms
return 'linux' in self.platforms
def is_valid_calibre(self):
return numeric_version >= self.calibre_required_version
def is_valid_to_install(self):
return self.is_valid_platform() and self.is_valid_calibre() and not self.is_deprecated
class DisplayPluginSortFilterModel(QSortFilterProxyModel):
def __init__(self, parent):
QSortFilterProxyModel.__init__(self, parent)
self.setSortRole(Qt.UserRole)
self.setSortCaseSensitivity(Qt.CaseInsensitive)
self.filter_criteria = FILTER_ALL
self.filter_text = ""
def filterAcceptsRow(self, sourceRow, sourceParent):
index = self.sourceModel().index(sourceRow, 0, sourceParent)
display_plugin = self.sourceModel().display_plugins[index.row()]
if self.filter_criteria == FILTER_ALL:
return not (display_plugin.is_deprecated and not display_plugin.is_installed()) and display_plugin.name_matches_filter(self.filter_text)
if self.filter_criteria == FILTER_INSTALLED:
return display_plugin.is_installed() and display_plugin.name_matches_filter(self.filter_text)
if self.filter_criteria == FILTER_UPDATE_AVAILABLE:
return display_plugin.is_upgrade_available() and display_plugin.name_matches_filter(self.filter_text)
if self.filter_criteria == FILTER_NOT_INSTALLED:
return not display_plugin.is_installed() and not display_plugin.is_deprecated and display_plugin.name_matches_filter(self.filter_text)
return False
def set_filter_criteria(self, filter_value):
self.filter_criteria = filter_value
self.invalidateFilter()
def set_filter_text(self, filter_text_value):
self.filter_text = icu_lower(unicode(filter_text_value))
self.invalidateFilter()
class DisplayPluginModel(QAbstractTableModel):
def __init__(self, display_plugins):
QAbstractTableModel.__init__(self)
self.display_plugins = display_plugins
self.headers = map(QVariant, [_('Plugin Name'), _('Donate'), _('Status'), _('Installed'),
_('Available'), _('Released'), _('Calibre'), _('Author')])
def rowCount(self, *args):
return len(self.display_plugins)
def columnCount(self, *args):
return len(self.headers)
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self.headers[section]
return NONE
def data(self, index, role):
if not index.isValid():
return NONE
row, col = index.row(), index.column()
if row < 0 or row >= self.rowCount():
return NONE
display_plugin = self.display_plugins[row]
if role in [Qt.DisplayRole, Qt.UserRole]:
if col == 0:
return QVariant(display_plugin.name)
if col == 1:
if display_plugin.donation_link:
return QVariant(_('PayPal'))
if col == 2:
return self._get_status(display_plugin)
if col == 3:
return QVariant(self._get_display_version(display_plugin.installed_version))
if col == 4:
return QVariant(self._get_display_version(display_plugin.available_version))
if col == 5:
if role == Qt.UserRole:
return self._get_display_release_date(display_plugin.release_date, 'yyyyMMdd')
else:
return self._get_display_release_date(display_plugin.release_date)
if col == 6:
return QVariant(self._get_display_version(display_plugin.calibre_required_version))
if col == 7:
return QVariant(display_plugin.author)
elif role == Qt.DecorationRole:
if col == 0:
return self._get_status_icon(display_plugin)
if col == 1:
if display_plugin.donation_link:
return QIcon(I('donate.png'))
elif role == Qt.ToolTipRole:
if col == 1 and display_plugin.donation_link:
return QVariant(_('This plugin is FREE but you can reward the developer for their effort\n'
'by donating to them via PayPal.\n\n'
'Right-click and choose Donate to reward: ')+display_plugin.author)
else:
return self._get_status_tooltip(display_plugin)
elif role == Qt.ForegroundRole:
if col != 1: # Never change colour of the donation column
if display_plugin.is_deprecated:
return QVariant(QBrush(Qt.blue))
if display_plugin.is_disabled():
return QVariant(QBrush(Qt.gray))
return NONE
def plugin_to_index(self, display_plugin):
for i, p in enumerate(self.display_plugins):
if display_plugin == p:
return self.index(i, 0, QModelIndex())
return QModelIndex()
def refresh_plugin(self, display_plugin):
idx = self.plugin_to_index(display_plugin)
self.dataChanged.emit(idx, idx)
def _get_display_release_date(self, date_value, format='dd MMM yyyy'):
if date_value and date_value != UNDEFINED_DATE:
return QVariant(format_date(date_value, format))
return NONE
def _get_display_version(self, version):
if version is None:
return ''
return '.'.join([str(v) for v in list(version)])
def _get_status(self, display_plugin):
if not display_plugin.is_valid_platform():
return _('Platform unavailable')
if not display_plugin.is_valid_calibre():
return _('Calibre upgrade required')
if display_plugin.is_installed():
if display_plugin.is_deprecated:
return _('Plugin deprecated')
elif display_plugin.is_upgrade_available():
return _('New version available')
else:
return _('Latest version installed')
return _('Not installed')
def _get_status_icon(self, display_plugin):
if display_plugin.is_deprecated:
icon_name = 'plugin_deprecated.png'
elif display_plugin.is_disabled():
if display_plugin.is_upgrade_available():
if display_plugin.is_valid_to_install():
icon_name = 'plugin_disabled_valid.png'
else:
icon_name = 'plugin_disabled_invalid.png'
else:
icon_name = 'plugin_disabled_ok.png'
elif display_plugin.is_installed():
if display_plugin.is_upgrade_available():
if display_plugin.is_valid_to_install():
icon_name = 'plugin_upgrade_valid.png'
else:
icon_name = 'plugin_upgrade_invalid.png'
else:
icon_name = 'plugin_upgrade_ok.png'
else: # A plugin available not currently installed
if display_plugin.is_valid_to_install():
icon_name = 'plugin_new_valid.png'
else:
icon_name = 'plugin_new_invalid.png'
return QIcon(I('plugins/' + icon_name))
def _get_status_tooltip(self, display_plugin):
if display_plugin.is_deprecated:
return QVariant(_('This plugin has been deprecated and should be uninstalled')+'\n\n'+
_('Right-click to see more options'))
if not display_plugin.is_valid_platform():
return QVariant(_('This plugin can only be installed on: %s') %
', '.join(display_plugin.platforms)+'\n\n'+
_('Right-click to see more options'))
if numeric_version < display_plugin.calibre_required_version:
return QVariant(_('You must upgrade to at least Calibre %s before installing this plugin') %
self._get_display_version(display_plugin.calibre_required_version)+'\n\n'+
_('Right-click to see more options'))
if display_plugin.installed_version < display_plugin.available_version:
if display_plugin.installed_version is None:
return QVariant(_('You can install this plugin')+'\n\n'+
_('Right-click to see more options'))
else:
return QVariant(_('A new version of this plugin is available')+'\n\n'+
_('Right-click to see more options'))
return QVariant(_('This plugin is installed and up-to-date')+'\n\n'+
_('Right-click to see more options'))
class PluginUpdaterDialog(SizePersistedDialog):
initial_extra_size = QSize(350, 100)
forum_label_text = _('Plugin homepage')
def __init__(self, gui, initial_filter=FILTER_UPDATE_AVAILABLE):
SizePersistedDialog.__init__(self, gui, 'Plugin Updater plugin:plugin updater dialog')
self.gui = gui
self.forum_link = None
self.zip_url = None
self.model = None
self.do_restart = False
self._initialize_controls()
self._create_context_menu()
display_plugins = read_available_plugins()
if display_plugins:
self.model = DisplayPluginModel(display_plugins)
self.proxy_model = DisplayPluginSortFilterModel(self)
self.proxy_model.setSourceModel(self.model)
self.plugin_view.setModel(self.proxy_model)
self.plugin_view.resizeColumnsToContents()
self.plugin_view.selectionModel().currentRowChanged.connect(self._plugin_current_changed)
self.plugin_view.doubleClicked.connect(self.install_button.click)
self.filter_combo.setCurrentIndex(initial_filter)
self._select_and_focus_view()
else:
error_dialog(self.gui, _('Update Check Failed'),
_('Unable to reach the plugin index page.'),
det_msg=INDEX_URL, show=True)
self.filter_combo.setEnabled(False)
# Cause our dialog size to be restored from prefs or created on first usage
self.resize_dialog()
def _initialize_controls(self):
self.setWindowTitle(_('User plugins'))
self.setWindowIcon(QIcon(I('plugins/plugin_updater.png')))
layout = QVBoxLayout(self)
self.setLayout(layout)
title_layout = ImageTitleLayout(self, 'plugins/plugin_updater.png',
_('User Plugins'))
layout.addLayout(title_layout)
header_layout = QHBoxLayout()
layout.addLayout(header_layout)
self.filter_combo = PluginFilterComboBox(self)
self.filter_combo.setMinimumContentsLength(20)
self.filter_combo.currentIndexChanged[int].connect(self._filter_combo_changed)
header_layout.addWidget(QLabel(_('Filter list of plugins')+':', self))
header_layout.addWidget(self.filter_combo)
header_layout.addStretch(10)
# filter plugins by name
header_layout.addWidget(QLabel(_('Filter by name')+':', self))
self.filter_by_name_lineedit = QLineEdit(self)
self.filter_by_name_lineedit.setText("")
self.filter_by_name_lineedit.textChanged.connect(self._filter_name_lineedit_changed)
header_layout.addWidget(self.filter_by_name_lineedit)
self.plugin_view = QTableView(self)
self.plugin_view.horizontalHeader().setStretchLastSection(True)
self.plugin_view.setSelectionBehavior(QAbstractItemView.SelectRows)
self.plugin_view.setSelectionMode(QAbstractItemView.SingleSelection)
self.plugin_view.setAlternatingRowColors(True)
self.plugin_view.setSortingEnabled(True)
self.plugin_view.setIconSize(QSize(28, 28))
layout.addWidget(self.plugin_view)
details_layout = QHBoxLayout()
layout.addLayout(details_layout)
forum_label = self.forum_label = QLabel('')
forum_label.setTextInteractionFlags(Qt.LinksAccessibleByMouse | Qt.LinksAccessibleByKeyboard)
forum_label.linkActivated.connect(self._forum_label_activated)
details_layout.addWidget(QLabel(_('Description')+':', self), 0, Qt.AlignLeft)
details_layout.addWidget(forum_label, 1, Qt.AlignRight)
self.description = QLabel(self)
self.description.setFrameStyle(QFrame.Panel | QFrame.Sunken)
self.description.setAlignment(Qt.AlignTop | Qt.AlignLeft)
self.description.setMinimumHeight(40)
self.description.setWordWrap(True)
layout.addWidget(self.description)
self.button_box = QDialogButtonBox(QDialogButtonBox.Close)
self.button_box.rejected.connect(self.reject)
self.finished.connect(self._finished)
self.install_button = self.button_box.addButton(_('&Install'), QDialogButtonBox.AcceptRole)
self.install_button.setToolTip(_('Install the selected plugin'))
self.install_button.clicked.connect(self._install_clicked)
self.install_button.setEnabled(False)
self.configure_button = self.button_box.addButton(' '+_('&Customize plugin ')+' ', QDialogButtonBox.ResetRole)
self.configure_button.setToolTip(_('Customize the options for this plugin'))
self.configure_button.clicked.connect(self._configure_clicked)
self.configure_button.setEnabled(False)
layout.addWidget(self.button_box)
def update_forum_label(self):
txt = ''
if self.forum_link:
txt = '<a href="%s">%s</a>' % (self.forum_link, self.forum_label_text)
self.forum_label.setText(txt)
def _create_context_menu(self):
self.plugin_view.setContextMenuPolicy(Qt.ActionsContextMenu)
self.install_action = QAction(QIcon(I('plugins/plugin_upgrade_ok.png')), _('&Install'), self)
self.install_action.setToolTip(_('Install the selected plugin'))
self.install_action.triggered.connect(self._install_clicked)
self.install_action.setEnabled(False)
self.plugin_view.addAction(self.install_action)
self.history_action = QAction(QIcon(I('chapters.png')), _('Version &History'), self)
self.history_action.setToolTip(_('Show history of changes to this plugin'))
self.history_action.triggered.connect(self._history_clicked)
self.history_action.setEnabled(False)
self.plugin_view.addAction(self.history_action)
self.forum_action = QAction(QIcon(I('plugins/mobileread.png')), _('Plugin &Forum Thread'), self)
self.forum_action.triggered.connect(self._forum_label_activated)
self.forum_action.setEnabled(False)
self.plugin_view.addAction(self.forum_action)
sep1 = QAction(self)
sep1.setSeparator(True)
self.plugin_view.addAction(sep1)
self.toggle_enabled_action = QAction(_('Enable/&Disable plugin'), self)
self.toggle_enabled_action.setToolTip(_('Enable or disable this plugin'))
self.toggle_enabled_action.triggered.connect(self._toggle_enabled_clicked)
self.toggle_enabled_action.setEnabled(False)
self.plugin_view.addAction(self.toggle_enabled_action)
self.uninstall_action = QAction(_('&Remove plugin'), self)
self.uninstall_action.setToolTip(_('Uninstall the selected plugin'))
self.uninstall_action.triggered.connect(self._uninstall_clicked)
self.uninstall_action.setEnabled(False)
self.plugin_view.addAction(self.uninstall_action)
sep2 = QAction(self)
sep2.setSeparator(True)
self.plugin_view.addAction(sep2)
self.donate_enabled_action = QAction(QIcon(I('donate.png')), _('Donate to developer'), self)
self.donate_enabled_action.setToolTip(_('Donate to the developer of this plugin'))
self.donate_enabled_action.triggered.connect(self._donate_clicked)
self.donate_enabled_action.setEnabled(False)
self.plugin_view.addAction(self.donate_enabled_action)
sep3 = QAction(self)
sep3.setSeparator(True)
self.plugin_view.addAction(sep3)
self.configure_action = QAction(QIcon(I('config.png')), _('&Customize plugin'), self)
self.configure_action.setToolTip(_('Customize the options for this plugin'))
self.configure_action.triggered.connect(self._configure_clicked)
self.configure_action.setEnabled(False)
self.plugin_view.addAction(self.configure_action)
def _finished(self, *args):
if self.model:
update_plugins = filter(filter_upgradeable_plugins, self.model.display_plugins)
self.gui.recalc_update_label(len(update_plugins))
def _plugin_current_changed(self, current, previous):
if current.isValid():
actual_idx = self.proxy_model.mapToSource(current)
display_plugin = self.model.display_plugins[actual_idx.row()]
self.description.setText(display_plugin.description)
self.forum_link = display_plugin.forum_link
self.zip_url = display_plugin.zip_url
self.forum_action.setEnabled(bool(self.forum_link))
self.install_button.setEnabled(display_plugin.is_valid_to_install())
self.install_action.setEnabled(self.install_button.isEnabled())
self.uninstall_action.setEnabled(display_plugin.is_installed())
self.history_action.setEnabled(display_plugin.has_changelog)
self.configure_button.setEnabled(display_plugin.is_installed())
self.configure_action.setEnabled(self.configure_button.isEnabled())
self.toggle_enabled_action.setEnabled(display_plugin.is_installed())
self.donate_enabled_action.setEnabled(bool(display_plugin.donation_link))
else:
self.description.setText('')
self.forum_link = None
self.zip_url = None
self.forum_action.setEnabled(False)
self.install_button.setEnabled(False)
self.install_action.setEnabled(False)
self.uninstall_action.setEnabled(False)
self.history_action.setEnabled(False)
self.configure_button.setEnabled(False)
self.configure_action.setEnabled(False)
self.toggle_enabled_action.setEnabled(False)
self.donate_enabled_action.setEnabled(False)
self.update_forum_label()
def _donate_clicked(self):
plugin = self._selected_display_plugin()
if plugin and plugin.donation_link:
open_url(QUrl(plugin.donation_link))
def _select_and_focus_view(self, change_selection=True):
if change_selection and self.plugin_view.model().rowCount() > 0:
self.plugin_view.selectRow(0)
else:
idx = self.plugin_view.selectionModel().currentIndex()
self._plugin_current_changed(idx, 0)
self.plugin_view.setFocus()
def _filter_combo_changed(self, idx):
self.filter_by_name_lineedit.setText("") # clear the name filter text when a different group was selected
self.proxy_model.set_filter_criteria(idx)
if idx == FILTER_NOT_INSTALLED:
self.plugin_view.sortByColumn(5, Qt.DescendingOrder)
else:
self.plugin_view.sortByColumn(0, Qt.AscendingOrder)
self._select_and_focus_view()
def _filter_name_lineedit_changed(self, text):
self.proxy_model.set_filter_text(text) # set the filter text for filterAcceptsRow
def _forum_label_activated(self):
if self.forum_link:
open_url(QUrl(self.forum_link))
def _selected_display_plugin(self):
idx = self.plugin_view.selectionModel().currentIndex()
actual_idx = self.proxy_model.mapToSource(idx)
return self.model.display_plugins[actual_idx.row()]
def _uninstall_plugin(self, name_to_remove):
if DEBUG:
prints('Removing plugin: ', name_to_remove)
remove_plugin(name_to_remove)
# Make sure that any other plugins that required this plugin
# to be uninstalled first have the requirement removed
for display_plugin in self.model.display_plugins:
# Make sure we update the status and display of the
# plugin we just uninstalled
if name_to_remove in display_plugin.uninstall_plugins:
if DEBUG:
prints('Removing uninstall dependency for: ', display_plugin.name)
display_plugin.uninstall_plugins.remove(name_to_remove)
if display_plugin.name == name_to_remove:
if DEBUG:
prints('Resetting plugin to uninstalled status: ', display_plugin.name)
display_plugin.installed_version = None
display_plugin.plugin = None
display_plugin.uninstall_plugins = []
if self.proxy_model.filter_criteria not in [FILTER_INSTALLED, FILTER_UPDATE_AVAILABLE]:
self.model.refresh_plugin(display_plugin)
def _uninstall_clicked(self):
display_plugin = self._selected_display_plugin()
if not question_dialog(self, _('Are you sure?'), '<p>'+
_('Are you sure you want to uninstall the <b>%s</b> plugin?')%display_plugin.name,
show_copy_button=False):
return
self._uninstall_plugin(display_plugin.name)
if self.proxy_model.filter_criteria in [FILTER_INSTALLED, FILTER_UPDATE_AVAILABLE]:
self.model.reset()
self._select_and_focus_view()
else:
self._select_and_focus_view(change_selection=False)
def _install_clicked(self):
display_plugin = self._selected_display_plugin()
if not question_dialog(self, _('Install %s')%display_plugin.name, '<p>' +
_('Installing plugins is a <b>security risk</b>. '
'Plugins can contain a virus/malware. '
'Only install it if you got it from a trusted source.'
' Are you sure you want to proceed?'),
show_copy_button=False):
return
if display_plugin.uninstall_plugins:
uninstall_names = list(display_plugin.uninstall_plugins)
if DEBUG:
prints('Uninstalling plugin: ', ', '.join(uninstall_names))
for name_to_remove in uninstall_names:
self._uninstall_plugin(name_to_remove)
plugin_zip_url = display_plugin.zip_url
if DEBUG:
prints('Downloading plugin zip attachment: ', plugin_zip_url)
self.gui.status_bar.showMessage(_('Downloading plugin zip attachment: %s') % plugin_zip_url)
zip_path = self._download_zip(plugin_zip_url)
if DEBUG:
prints('Installing plugin: ', zip_path)
self.gui.status_bar.showMessage(_('Installing plugin: %s') % zip_path)
do_restart = False
try:
try:
plugin = add_plugin(zip_path)
except NameConflict as e:
return error_dialog(self.gui, _('Already exists'),
unicode(e), show=True)
# Check for any toolbars to add to.
widget = ConfigWidget(self.gui)
widget.gui = self.gui
widget.check_for_add_to_toolbars(plugin)
self.gui.status_bar.showMessage(_('Plugin installed: %s') % display_plugin.name)
d = info_dialog(self.gui, _('Success'),
_('Plugin <b>{0}</b> successfully installed under <b>'
' {1} plugins</b>. You may have to restart calibre '
'for the plugin to take effect.').format(plugin.name, plugin.type),
show_copy_button=False)
b = d.bb.addButton(_('Restart calibre now'), d.bb.AcceptRole)
b.setIcon(QIcon(I('lt.png')))
d.do_restart = False
def rf():
d.do_restart = True
b.clicked.connect(rf)
d.set_details('')
d.exec_()
b.clicked.disconnect()
do_restart = d.do_restart
display_plugin.plugin = plugin
# We cannot read the 'actual' version information as the plugin will not be loaded yet
display_plugin.installed_version = display_plugin.available_version
except:
if DEBUG:
prints('ERROR occurred while installing plugin: %s'%display_plugin.name)
traceback.print_exc()
error_dialog(self.gui, _('Install Plugin Failed'),
_('A problem occurred while installing this plugin.'
' This plugin will now be uninstalled.'
' Please post the error message in details below into'
' the forum thread for this plugin and restart Calibre.'),
det_msg=traceback.format_exc(), show=True)
if DEBUG:
prints('Due to error now uninstalling plugin: %s'%display_plugin.name)
remove_plugin(display_plugin.name)
display_plugin.plugin = None
display_plugin.uninstall_plugins = []
if self.proxy_model.filter_criteria in [FILTER_NOT_INSTALLED, FILTER_UPDATE_AVAILABLE]:
self.model.reset()
self._select_and_focus_view()
else:
self.model.refresh_plugin(display_plugin)
self._select_and_focus_view(change_selection=False)
if do_restart:
self.do_restart = True
self.accept()
def _history_clicked(self):
display_plugin = self._selected_display_plugin()
text = self._read_version_history_html(display_plugin.forum_link)
if text:
dlg = VersionHistoryDialog(self, display_plugin.name, text)
dlg.exec_()
else:
return error_dialog(self, _('Version history missing'),
_('Unable to find the version history for %s')%display_plugin.name,
show=True)
def _configure_clicked(self):
display_plugin = self._selected_display_plugin()
plugin = display_plugin.plugin
if not plugin.is_customizable():
return info_dialog(self, _('Plugin not customizable'),
_('Plugin: %s does not need customization')%plugin.name, show=True)
from calibre.customize import InterfaceActionBase
if isinstance(plugin, InterfaceActionBase) and not getattr(plugin,
'actual_iaction_plugin_loaded', False):
return error_dialog(self, _('Must restart'),
_('You must restart calibre before you can'
' configure the <b>%s</b> plugin')%plugin.name, show=True)
plugin.do_user_config(self.parent())
def _toggle_enabled_clicked(self):
display_plugin = self._selected_display_plugin()
plugin = display_plugin.plugin
if not plugin.can_be_disabled:
return error_dialog(self,_('Plugin cannot be disabled'),
_('The plugin: %s cannot be disabled')%plugin.name, show=True)
if is_disabled(plugin):
enable_plugin(plugin)
else:
disable_plugin(plugin)
self.model.refresh_plugin(display_plugin)
def _read_version_history_html(self, forum_link):
br = browser()
br.set_handle_gzip(True)
try:
raw = br.open_novisit(forum_link).read()
if not raw:
return None
except:
traceback.print_exc()
return None
raw = raw.decode('utf-8', errors='replace')
root = html.fromstring(raw)
spoiler_nodes = root.xpath('//div[@class="smallfont" and strong="Spoiler"]')
for spoiler_node in spoiler_nodes:
try:
if spoiler_node.getprevious() is None:
# This is a spoiler node that has been indented using [INDENT]
# Need to go up to parent div, then previous node to get header
heading_node = spoiler_node.getparent().getprevious()
else:
# This is a spoiler node after a BR tag from the heading
heading_node = spoiler_node.getprevious().getprevious()
if heading_node is None:
continue
if heading_node.text_content().lower().find('version history') != -1:
div_node = spoiler_node.xpath('div')[0]
text = html.tostring(div_node, method='html', encoding=unicode)
return re.sub('<div\s.*?>', '<div>', text)
except:
if DEBUG:
prints('======= MobileRead Parse Error =======')
traceback.print_exc()
prints(html.tostring(spoiler_node))
return None
def _download_zip(self, plugin_zip_url):
from calibre.ptempfile import PersistentTemporaryFile
br = browser(user_agent='%s %s' % (__appname__, __version__))
raw = br.open_novisit(plugin_zip_url).read()
with PersistentTemporaryFile('.zip') as pt:
pt.write(raw)
return pt.name
| gpl-3.0 |
skosukhin/spack | var/spack/repos/builtin/packages/r-alsace/package.py | 1 | 2082 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAlsace(RPackage):
"""Alternating Least Squares (or Multivariate Curve Resolution)
for analytical chemical data, in particular hyphenated data where
the first direction is a retention time axis, and the second a
spectral axis. Package builds on the basic als function from the
ALS package and adds functionality for high-throughput analysis,
including definition of time windows, clustering of profiles,
retention time correction, etcetera."""
homepage = "https://www.bioconductor.org/packages/alsace/"
url = "https://git.bioconductor.org/packages/alsace"
version('1.12.0', git='https://git.bioconductor.org/packages/alsace', commit='1364c65bbff05786d05c02799fd44fd57748fae3')
depends_on('r-als', type=('build', 'run'))
depends_on('r-ptw', type=('build', 'run'))
| lgpl-2.1 |
KaranToor/MA450 | google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/gs/test_resumable_uploads.py | 25 | 25809 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests of Google Cloud Storage resumable uploads.
"""
import StringIO
import errno
import random
import os
import time
import boto
from boto import storage_uri
from boto.gs.resumable_upload_handler import ResumableUploadHandler
from boto.exception import InvalidUriError
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from cb_test_harness import CallbackTestHarness
from tests.integration.gs.testcase import GSTestCase
SMALL_KEY_SIZE = 2 * 1024 # 2 KB.
LARGE_KEY_SIZE = 500 * 1024 # 500 KB.
LARGEST_KEY_SIZE = 1024 * 1024 # 1 MB.
class ResumableUploadTests(GSTestCase):
"""Resumable upload test suite."""
def build_input_file(self, size):
buf = []
# I manually construct the random data here instead of calling
# os.urandom() because I want to constrain the range of data (in
# this case to 0'..'9') so the test
# code can easily overwrite part of the StringIO file with
# known-to-be-different values.
for i in range(size):
buf.append(str(random.randint(0, 9)))
file_as_string = ''.join(buf)
return (file_as_string, StringIO.StringIO(file_as_string))
def make_small_file(self):
return self.build_input_file(SMALL_KEY_SIZE)
def make_large_file(self):
return self.build_input_file(LARGE_KEY_SIZE)
def make_tracker_file(self, tmpdir=None):
if not tmpdir:
tmpdir = self._MakeTempDir()
tracker_file = os.path.join(tmpdir, 'tracker')
return tracker_file
def test_non_resumable_upload(self):
"""
Tests that non-resumable uploads work
"""
small_src_file_as_string, small_src_file = self.make_small_file()
# Seek to end incase its the first test.
small_src_file.seek(0, os.SEEK_END)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(small_src_file)
self.fail("should fail as need to rewind the filepointer")
except AttributeError:
pass
# Now try calling with a proper rewind.
dst_key.set_contents_from_file(small_src_file, rewind=True)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_upload_without_persistent_tracker(self):
"""
Tests a single resumable upload, with no tracker URI persistence
"""
res_upload_handler = ResumableUploadHandler()
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_failed_upload_with_persistent_tracker(self):
"""
Tests that failed resumable upload leaves a correct tracker URI file
"""
harness = CallbackTestHarness()
tracker_file_name = self.make_tracker_file()
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name, num_retries=0)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException as e:
# We'll get a ResumableUploadException at this point because
# of CallbackTestHarness (above). Check that the tracker file was
# created correctly.
self.assertEqual(e.disposition,
ResumableTransferDisposition.ABORT_CUR_PROCESS)
self.assertTrue(os.path.exists(tracker_file_name))
f = open(tracker_file_name)
uri_from_file = f.readline().strip()
f.close()
self.assertEqual(uri_from_file,
res_upload_handler.get_tracker_uri())
def test_retryable_exception_recovery(self):
"""
Tests handling of a retryable exception
"""
# Test one of the RETRYABLE_EXCEPTIONS.
exception = ResumableUploadHandler.RETRYABLE_EXCEPTIONS[0]
harness = CallbackTestHarness(exception=exception)
res_upload_handler = ResumableUploadHandler(num_retries=1)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_broken_pipe_recovery(self):
"""
Tests handling of a Broken Pipe (which interacts with an httplib bug)
"""
exception = IOError(errno.EPIPE, "Broken pipe")
harness = CallbackTestHarness(exception=exception)
res_upload_handler = ResumableUploadHandler(num_retries=1)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_non_retryable_exception_handling(self):
"""
Tests a resumable upload that fails with a non-retryable exception
"""
harness = CallbackTestHarness(
exception=OSError(errno.EACCES, 'Permission denied'))
res_upload_handler = ResumableUploadHandler(num_retries=1)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected OSError')
except OSError as e:
# Ensure the error was re-raised.
self.assertEqual(e.errno, 13)
def test_failed_and_restarted_upload_with_persistent_tracker(self):
"""
Tests resumable upload that fails once and then completes, with tracker
file
"""
harness = CallbackTestHarness()
tracker_file_name = self.make_tracker_file()
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name, num_retries=1)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
# Ensure tracker file deleted.
self.assertFalse(os.path.exists(tracker_file_name))
def test_multiple_in_process_failures_then_succeed(self):
"""
Tests resumable upload that fails twice in one process, then completes
"""
res_upload_handler = ResumableUploadHandler(num_retries=3)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_multiple_in_process_failures_then_succeed_with_tracker_file(self):
"""
Tests resumable upload that fails completely in one process,
then when restarted completes, using a tracker file
"""
# Set up test harness that causes more failures than a single
# ResumableUploadHandler instance will handle, writing enough data
# before the first failure that some of it survives that process run.
harness = CallbackTestHarness(
fail_after_n_bytes=LARGE_KEY_SIZE/2, num_times_to_fail=2)
tracker_file_name = self.make_tracker_file()
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name, num_retries=1)
larger_src_file_as_string, larger_src_file = self.make_large_file()
larger_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
larger_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException as e:
self.assertEqual(e.disposition,
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Ensure a tracker file survived.
self.assertTrue(os.path.exists(tracker_file_name))
# Try it one more time; this time should succeed.
larger_src_file.seek(0)
dst_key.set_contents_from_file(
larger_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.assertEqual(LARGE_KEY_SIZE, dst_key.size)
self.assertEqual(larger_src_file_as_string,
dst_key.get_contents_as_string())
self.assertFalse(os.path.exists(tracker_file_name))
# Ensure some of the file was uploaded both before and after failure.
self.assertTrue(len(harness.transferred_seq_before_first_failure) > 1
and
len(harness.transferred_seq_after_first_failure) > 1)
def test_upload_with_inital_partial_upload_before_failure(self):
"""
Tests resumable upload that successfully uploads some content
before it fails, then restarts and completes
"""
# Set up harness to fail upload after several hundred KB so upload
# server will have saved something before we retry.
harness = CallbackTestHarness(
fail_after_n_bytes=LARGE_KEY_SIZE/2)
res_upload_handler = ResumableUploadHandler(num_retries=1)
larger_src_file_as_string, larger_src_file = self.make_large_file()
larger_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
larger_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(LARGE_KEY_SIZE, dst_key.size)
self.assertEqual(larger_src_file_as_string,
dst_key.get_contents_as_string())
# Ensure some of the file was uploaded both before and after failure.
self.assertTrue(len(harness.transferred_seq_before_first_failure) > 1
and
len(harness.transferred_seq_after_first_failure) > 1)
def test_empty_file_upload(self):
"""
Tests uploading an empty file (exercises boundary conditions).
"""
res_upload_handler = ResumableUploadHandler()
empty_src_file = StringIO.StringIO('')
empty_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
empty_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(0, dst_key.size)
def test_upload_retains_metadata(self):
"""
Tests that resumable upload correctly sets passed metadata
"""
res_upload_handler = ResumableUploadHandler()
headers = {'Content-Type' : 'text/plain', 'x-goog-meta-abc' : 'my meta',
'x-goog-acl' : 'public-read'}
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, headers=headers,
res_upload_handler=res_upload_handler)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
dst_key.open_read()
self.assertEqual('text/plain', dst_key.content_type)
self.assertTrue('abc' in dst_key.metadata)
self.assertEqual('my meta', str(dst_key.metadata['abc']))
acl = dst_key.get_acl()
for entry in acl.entries.entry_list:
if str(entry.scope) == '<AllUsers>':
self.assertEqual('READ', str(acl.entries.entry_list[1].permission))
return
self.fail('No <AllUsers> scope found')
def test_upload_with_file_size_change_between_starts(self):
"""
Tests resumable upload on a file that changes sizes between initial
upload start and restart
"""
harness = CallbackTestHarness(
fail_after_n_bytes=LARGE_KEY_SIZE/2)
tracker_file_name = self.make_tracker_file()
# Set up first process' ResumableUploadHandler not to do any
# retries (initial upload request will establish expected size to
# upload server).
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name, num_retries=0)
larger_src_file_as_string, larger_src_file = self.make_large_file()
larger_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
larger_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException as e:
# First abort (from harness-forced failure) should be
# ABORT_CUR_PROCESS.
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Ensure a tracker file survived.
self.assertTrue(os.path.exists(tracker_file_name))
# Try it again, this time with different size source file.
# Wait 1 second between retry attempts, to give upload server a
# chance to save state so it can respond to changed file size with
# 500 response in the next attempt.
time.sleep(1)
try:
largest_src_file = self.build_input_file(LARGEST_KEY_SIZE)[1]
largest_src_file.seek(0)
dst_key.set_contents_from_file(
largest_src_file, res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException as e:
# This abort should be a hard abort (file size changing during
# transfer).
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
self.assertNotEqual(e.message.find('file size changed'), -1, e.message)
def test_upload_with_file_size_change_during_upload(self):
"""
Tests resumable upload on a file that changes sizes while upload
in progress
"""
# Create a file we can change during the upload.
test_file_size = 500 * 1024 # 500 KB.
test_file = self.build_input_file(test_file_size)[1]
harness = CallbackTestHarness(fp_to_change=test_file,
fp_change_pos=test_file_size)
res_upload_handler = ResumableUploadHandler(num_retries=1)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
test_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException as e:
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
self.assertNotEqual(
e.message.find('File changed during upload'), -1)
def test_upload_with_file_content_change_during_upload(self):
"""
Tests resumable upload on a file that changes one byte of content
(so, size stays the same) while upload in progress.
"""
def Execute():
res_upload_handler = ResumableUploadHandler(num_retries=1)
dst_key = self._MakeKey(set_contents=False)
bucket_uri = storage_uri('gs://' + dst_key.bucket.name)
dst_key_uri = bucket_uri.clone_replace_name(dst_key.name)
try:
dst_key.set_contents_from_file(
test_file, cb=harness.call,
res_upload_handler=res_upload_handler)
return False
except ResumableUploadException as e:
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
# Ensure the file size didn't change.
test_file.seek(0, os.SEEK_END)
self.assertEqual(test_file_size, test_file.tell())
self.assertNotEqual(
e.message.find('md5 signature doesn\'t match etag'), -1)
# Ensure the bad data wasn't left around.
try:
dst_key_uri.get_key()
self.fail('Did not get expected InvalidUriError')
except InvalidUriError as e:
pass
return True
test_file_size = 500 * 1024 # 500 KB
# The sizes of all the blocks written, except the final block, must be a
# multiple of 256K bytes. We need to trigger a failure after the first
# 256K bytes have been uploaded so that at least one block of data is
# written on the server.
# See https://developers.google.com/storage/docs/concepts-techniques#resumable
# for more information about chunking of uploads.
n_bytes = 300 * 1024 # 300 KB
delay = 0
# First, try the test without a delay. If that fails, try it with a
# 15-second delay. The first attempt may fail to recognize that the
# server has a block if the server hasn't yet committed that block
# when we resume the transfer. This would cause a restarted upload
# instead of a resumed upload.
for attempt in range(2):
test_file = self.build_input_file(test_file_size)[1]
harness = CallbackTestHarness(
fail_after_n_bytes=n_bytes,
fp_to_change=test_file,
# Write to byte 1, as the CallbackTestHarness writes
# 3 bytes. This will result in the data on the server
# being different than the local file.
fp_change_pos=1,
delay_after_change=delay)
if Execute():
break
if (attempt == 0 and
0 in harness.transferred_seq_after_first_failure):
# We can confirm the upload was restarted instead of resumed
# by determining if there is an entry of 0 in the
# transferred_seq_after_first_failure list.
# In that case, try again with a 15 second delay.
delay = 15
continue
self.fail('Did not get expected ResumableUploadException')
def test_upload_with_content_length_header_set(self):
"""
Tests resumable upload on a file when the user supplies a
Content-Length header. This is used by gsutil, for example,
to set the content length when gzipping a file.
"""
res_upload_handler = ResumableUploadHandler()
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler,
headers={'Content-Length' : SMALL_KEY_SIZE})
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException as e:
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
self.assertNotEqual(
e.message.find('Attempt to specify Content-Length header'), -1)
def test_upload_with_syntactically_invalid_tracker_uri(self):
"""
Tests resumable upload with a syntactically invalid tracker URI
"""
tmp_dir = self._MakeTempDir()
syntactically_invalid_tracker_file_name = os.path.join(tmp_dir,
'synt_invalid_uri_tracker')
with open(syntactically_invalid_tracker_file_name, 'w') as f:
f.write('ftp://example.com')
res_upload_handler = ResumableUploadHandler(
tracker_file_name=syntactically_invalid_tracker_file_name)
small_src_file_as_string, small_src_file = self.make_small_file()
# An error should be printed about the invalid URI, but then it
# should run the update successfully.
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_upload_with_invalid_upload_id_in_tracker_file(self):
"""
Tests resumable upload with invalid upload ID
"""
invalid_upload_id = ('http://pub.storage.googleapis.com/?upload_id='
'AyzB2Uo74W4EYxyi5dp_-r68jz8rtbvshsv4TX7srJVkJ57CxTY5Dw2')
tmpdir = self._MakeTempDir()
invalid_upload_id_tracker_file_name = os.path.join(tmpdir,
'invalid_upload_id_tracker')
with open(invalid_upload_id_tracker_file_name, 'w') as f:
f.write(invalid_upload_id)
res_upload_handler = ResumableUploadHandler(
tracker_file_name=invalid_upload_id_tracker_file_name)
small_src_file_as_string, small_src_file = self.make_small_file()
# An error should occur, but then the tracker URI should be
# regenerated and the the update should succeed.
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
self.assertNotEqual(invalid_upload_id,
res_upload_handler.get_tracker_uri())
def test_upload_with_unwritable_tracker_file(self):
"""
Tests resumable upload with an unwritable tracker file
"""
# Make dir where tracker_file lives temporarily unwritable.
tmp_dir = self._MakeTempDir()
tracker_file_name = self.make_tracker_file(tmp_dir)
save_mod = os.stat(tmp_dir).st_mode
try:
os.chmod(tmp_dir, 0)
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name)
except ResumableUploadException as e:
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
self.assertNotEqual(
e.message.find('Couldn\'t write URI tracker file'), -1)
finally:
# Restore original protection of dir where tracker_file lives.
os.chmod(tmp_dir, save_mod)
| apache-2.0 |
111t8e/h2o-2 | py/testdir_multi_jvm/test_parse_with_cancel.py | 8 | 3409 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_jobs
DELETE_KEYS = True
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3, java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_with_cancel(self):
mustWait = 10
importFolderPath = 'standard'
timeoutSecs = 500
csvFilenameList = [
("standard", "covtype.data", 54),
("manyfiles-nflx-gz", "file_1.dat.gz", 378),
("standard", "covtype20x.data", 54),
("manyfiles-nflx-gz", "file_[100-109].dat.gz", 378),
]
# just loop on the same file. If remnants exist and are locked, we will blow up?
# Maybe try to do an inspect to see if either the source key or parse key exist and cause stack traces
for (importFolderPath, csvFilename, response) in csvFilenameList:
# creates csvFilename.hex from file in importFolder dir
csvPathname = importFolderPath + "/" + csvFilename
hex_key = csvFilename + ".hex"
(importResult, importPattern) = h2i.import_only(bucket='home-0xdiag-datasets', path=csvPathname, timeoutSecs=50)
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, hex_key=hex_key,
timeoutSecs=500, noPoll=True, doSummary=False)
job_key = parseResult['job_key']
# give it a little time to start
time.sleep(3)
h2o.nodes[0].jobs_cancel(key=job_key)
# now wait until the job cancels, and we're idle
h2o_jobs.pollWaitJobs(timeoutSecs=30)
elapsed = time.time() - start
print "Cancelled parse completed in", elapsed, "seconds."
h2o.check_sandbox_for_errors()
# get a list of keys from storview. 20 is fine..shouldn't be many, since we putfile, not import folder
# there maybe a lot since we import the whole "standard" folder
# find the ones that pattern match the csvFilename, and inspect them. Might be none
storeViewResult = h2o_cmd.runStoreView(timeoutSecs=timeoutSecs, view=100)
keys = storeViewResult['keys']
for k in keys:
keyName = k['key']
print "kevin:", keyName
if csvFilename in keyName:
h2o_cmd.runInspect(key=keyName)
h2o.check_sandbox_for_errors()
# This will tell h2o to delete using the key name from the import file, whatever pattern matches to csvFilename
# we shouldn't have to do this..the import/parse should be able to overwrite without deleting.
# h2i.delete_keys_from_import_result(pattern=csvFilename, importResult=importResult)
# If you cancel a parse, you aren't allowed to reparse the same file or import a directory with that file,
# or cause the key name that the parse would have used, for 5 seconds after the cancel request gets a json
# response
print "Waiting", mustWait, "seconds before next reparse-cancel."
time.sleep(mustWait)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
moutai/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 56 | 3596 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'adam'}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
doksu/TA-centralops | bin/splunklib/client.py | 3 | 143072 | # Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# The purpose of this module is to provide a friendlier domain interface to
# various Splunk endpoints. The approach here is to leverage the binding
# layer to capture endpoint context and provide objects and methods that
# offer simplified access their corresponding endpoints. The design avoids
# caching resource state. From the perspective of this module, the 'policy'
# for caching resource state belongs in the application or a higher level
# framework, and its the purpose of this module to provide simplified
# access to that resource state.
#
# A side note, the objects below that provide helper methods for updating eg:
# Entity state, are written so that they may be used in a fluent style.
#
"""The **splunklib.client** module provides a Pythonic interface to the
`Splunk REST API <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTcontents>`_,
allowing you programmatically access Splunk's resources.
**splunklib.client** wraps a Pythonic layer around the wire-level
binding of the **splunklib.binding** module. The core of the library is the
:class:`Service` class, which encapsulates a connection to the server, and
provides access to the various aspects of Splunk's functionality, which are
exposed via the REST API. Typically you connect to a running Splunk instance
with the :func:`connect` function::
import splunklib.client as client
service = client.connect(host='localhost', port=8089,
username='admin', password='...')
assert isinstance(service, client.Service)
:class:`Service` objects have fields for the various Splunk resources (such as apps,
jobs, saved searches, inputs, and indexes). All of these fields are
:class:`Collection` objects::
appcollection = service.apps
my_app = appcollection.create('my_app')
my_app = appcollection['my_app']
appcollection.delete('my_app')
The individual elements of the collection, in this case *applications*,
are subclasses of :class:`Entity`. An ``Entity`` object has fields for its
attributes, and methods that are specific to each kind of entity. For example::
print my_app['author'] # Or: print my_app.author
my_app.package() # Creates a compressed package of this application
"""
import contextlib
import datetime
import json
import logging
import socket
from datetime import datetime, timedelta
from time import sleep
from splunklib import six
from splunklib.six.moves import urllib
from . import data
from .binding import (AuthenticationError, Context, HTTPError, UrlEncoded,
_encode, _make_cookie_header, _NoAuthenticationToken,
namespace)
from .data import record
__all__ = [
"connect",
"NotSupportedError",
"OperationError",
"IncomparableException",
"Service",
"namespace"
]
PATH_APPS = "apps/local/"
PATH_CAPABILITIES = "authorization/capabilities/"
PATH_CONF = "configs/conf-%s/"
PATH_PROPERTIES = "properties/"
PATH_DEPLOYMENT_CLIENTS = "deployment/client/"
PATH_DEPLOYMENT_TENANTS = "deployment/tenants/"
PATH_DEPLOYMENT_SERVERS = "deployment/server/"
PATH_DEPLOYMENT_SERVERCLASSES = "deployment/serverclass/"
PATH_EVENT_TYPES = "saved/eventtypes/"
PATH_FIRED_ALERTS = "alerts/fired_alerts/"
PATH_INDEXES = "data/indexes/"
PATH_INPUTS = "data/inputs/"
PATH_JOBS = "search/jobs/"
PATH_LOGGER = "/services/server/logger/"
PATH_MESSAGES = "messages/"
PATH_MODULAR_INPUTS = "data/modular-inputs"
PATH_ROLES = "authorization/roles/"
PATH_SAVED_SEARCHES = "saved/searches/"
PATH_STANZA = "configs/conf-%s/%s" # (file, stanza)
PATH_USERS = "authentication/users/"
PATH_RECEIVERS_STREAM = "/services/receivers/stream"
PATH_RECEIVERS_SIMPLE = "/services/receivers/simple"
PATH_STORAGE_PASSWORDS = "storage/passwords"
XNAMEF_ATOM = "{http://www.w3.org/2005/Atom}%s"
XNAME_ENTRY = XNAMEF_ATOM % "entry"
XNAME_CONTENT = XNAMEF_ATOM % "content"
MATCH_ENTRY_CONTENT = "%s/%s/*" % (XNAME_ENTRY, XNAME_CONTENT)
class IllegalOperationException(Exception):
"""Thrown when an operation is not possible on the Splunk instance that a
:class:`Service` object is connected to."""
pass
class IncomparableException(Exception):
"""Thrown when trying to compare objects (using ``==``, ``<``, ``>``, and
so on) of a type that doesn't support it."""
pass
class AmbiguousReferenceException(ValueError):
"""Thrown when the name used to fetch an entity matches more than one entity."""
pass
class InvalidNameException(Exception):
"""Thrown when the specified name contains characters that are not allowed
in Splunk entity names."""
pass
class NoSuchCapability(Exception):
"""Thrown when the capability that has been referred to doesn't exist."""
pass
class OperationError(Exception):
"""Raised for a failed operation, such as a time out."""
pass
class NotSupportedError(Exception):
"""Raised for operations that are not supported on a given object."""
pass
def _trailing(template, *targets):
"""Substring of *template* following all *targets*.
**Example**::
template = "this is a test of the bunnies."
_trailing(template, "is", "est", "the") == " bunnies"
Each target is matched successively in the string, and the string
remaining after the last target is returned. If one of the targets
fails to match, a ValueError is raised.
:param template: Template to extract a trailing string from.
:type template: ``string``
:param targets: Strings to successively match in *template*.
:type targets: list of ``string``s
:return: Trailing string after all targets are matched.
:rtype: ``string``
:raises ValueError: Raised when one of the targets does not match.
"""
s = template
for t in targets:
n = s.find(t)
if n == -1:
raise ValueError("Target " + t + " not found in template.")
s = s[n + len(t):]
return s
# Filter the given state content record according to the given arg list.
def _filter_content(content, *args):
if len(args) > 0:
return record((k, content[k]) for k in args)
return record((k, v) for k, v in six.iteritems(content)
if k not in ['eai:acl', 'eai:attributes', 'type'])
# Construct a resource path from the given base path + resource name
def _path(base, name):
if not base.endswith('/'): base = base + '/'
return base + name
# Load an atom record from the body of the given response
# this will ultimately be sent to an xml ElementTree so we
# should use the xmlcharrefreplace option
def _load_atom(response, match=None):
return data.load(response.body.read()
.decode('utf-8', 'xmlcharrefreplace'), match)
# Load an array of atom entries from the body of the given response
def _load_atom_entries(response):
r = _load_atom(response)
if 'feed' in r:
# Need this to handle a random case in the REST API
if r.feed.get('totalResults') in [0, '0']:
return []
entries = r.feed.get('entry', None)
if entries is None: return None
return entries if isinstance(entries, list) else [entries]
# Unlike most other endpoints, the jobs endpoint does not return
# its state wrapped in another element, but at the top level.
# For example, in XML, it returns <entry>...</entry> instead of
# <feed><entry>...</entry></feed>.
else:
entries = r.get('entry', None)
if entries is None: return None
return entries if isinstance(entries, list) else [entries]
# Load the sid from the body of the given response
def _load_sid(response):
return _load_atom(response).response.sid
# Parse the given atom entry record into a generic entity state record
def _parse_atom_entry(entry):
title = entry.get('title', None)
elink = entry.get('link', [])
elink = elink if isinstance(elink, list) else [elink]
links = record((link.rel, link.href) for link in elink)
# Retrieve entity content values
content = entry.get('content', {})
# Host entry metadata
metadata = _parse_atom_metadata(content)
# Filter some of the noise out of the content record
content = record((k, v) for k, v in six.iteritems(content)
if k not in ['eai:acl', 'eai:attributes'])
if 'type' in content:
if isinstance(content['type'], list):
content['type'] = [t for t in content['type'] if t != 'text/xml']
# Unset type if it was only 'text/xml'
if len(content['type']) == 0:
content.pop('type', None)
# Flatten 1 element list
if len(content['type']) == 1:
content['type'] = content['type'][0]
else:
content.pop('type', None)
return record({
'title': title,
'links': links,
'access': metadata.access,
'fields': metadata.fields,
'content': content,
'updated': entry.get("updated")
})
# Parse the metadata fields out of the given atom entry content record
def _parse_atom_metadata(content):
# Hoist access metadata
access = content.get('eai:acl', None)
# Hoist content metadata (and cleanup some naming)
attributes = content.get('eai:attributes', {})
fields = record({
'required': attributes.get('requiredFields', []),
'optional': attributes.get('optionalFields', []),
'wildcard': attributes.get('wildcardFields', [])})
return record({'access': access, 'fields': fields})
# kwargs: scheme, host, port, app, owner, username, password
def connect(**kwargs):
"""This function connects and logs in to a Splunk instance.
This function is a shorthand for :meth:`Service.login`.
The ``connect`` function makes one round trip to the server (for logging in).
:param host: The host name (the default is "localhost").
:type host: ``string``
:param port: The port number (the default is 8089).
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
:param verify: Enable (True) or disable (False) SSL verrification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional).
:type owner: ``string``
:param `app`: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode for the namespace (the default is "user").
:type sharing: "global", "system", "app", or "user"
:param `token`: The current session token (optional). Session tokens can be
shared across multiple service instances.
:type token: ``string``
:param cookie: A session cookie. When provided, you don't need to call :meth:`login`.
This parameter is only supported for Splunk 6.2+.
:type cookie: ``string``
:param autologin: When ``True``, automatically tries to log in again if the
session terminates.
:type autologin: ``boolean``
:param `username`: The Splunk account username, which is used to
authenticate the Splunk instance.
:type username: ``string``
:param `password`: The password for the Splunk account.
:type password: ``string``
:return: An initialized :class:`Service` connection.
**Example**::
import splunklib.client as client
s = client.connect(...)
a = s.apps["my_app"]
...
"""
s = Service(**kwargs)
s.login()
return s
# In preparation for adding Storm support, we added an
# intermediary class between Service and Context. Storm's
# API is not going to be the same as enterprise Splunk's
# API, so we will derive both Service (for enterprise Splunk)
# and StormService for (Splunk Storm) from _BaseService, and
# put any shared behavior on it.
class _BaseService(Context):
pass
class Service(_BaseService):
"""A Pythonic binding to Splunk instances.
A :class:`Service` represents a binding to a Splunk instance on an
HTTP or HTTPS port. It handles the details of authentication, wire
formats, and wraps the REST API endpoints into something more
Pythonic. All of the low-level operations on the instance from
:class:`splunklib.binding.Context` are also available in case you need
to do something beyond what is provided by this class.
After creating a ``Service`` object, you must call its :meth:`login`
method before you can issue requests to Splunk.
Alternately, use the :func:`connect` function to create an already
authenticated :class:`Service` object, or provide a session token
when creating the :class:`Service` object explicitly (the same
token may be shared by multiple :class:`Service` objects).
:param host: The host name (the default is "localhost").
:type host: ``string``
:param port: The port number (the default is 8089).
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
:param verify: Enable (True) or disable (False) SSL verrification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional; use "-" for wildcard).
:type owner: ``string``
:param `app`: The app context of the namespace (optional; use "-" for wildcard).
:type app: ``string``
:param `token`: The current session token (optional). Session tokens can be
shared across multiple service instances.
:type token: ``string``
:param cookie: A session cookie. When provided, you don't need to call :meth:`login`.
This parameter is only supported for Splunk 6.2+.
:type cookie: ``string``
:param `username`: The Splunk account username, which is used to
authenticate the Splunk instance.
:type username: ``string``
:param `password`: The password, which is used to authenticate the Splunk
instance.
:type password: ``string``
:return: A :class:`Service` instance.
**Example**::
import splunklib.client as client
s = client.Service(username="boris", password="natasha", ...)
s.login()
# Or equivalently
s = client.connect(username="boris", password="natasha")
# Or if you already have a session token
s = client.Service(token="atg232342aa34324a")
# Or if you already have a valid cookie
s = client.Service(cookie="splunkd_8089=...")
"""
def __init__(self, **kwargs):
super(Service, self).__init__(**kwargs)
self._splunk_version = None
@property
def apps(self):
"""Returns the collection of applications that are installed on this instance of Splunk.
:return: A :class:`Collection` of :class:`Application` entities.
"""
return Collection(self, PATH_APPS, item=Application)
@property
def confs(self):
"""Returns the collection of configuration files for this Splunk instance.
:return: A :class:`Configurations` collection of
:class:`ConfigurationFile` entities.
"""
return Configurations(self)
@property
def capabilities(self):
"""Returns the list of system capabilities.
:return: A ``list`` of capabilities.
"""
response = self.get(PATH_CAPABILITIES)
return _load_atom(response, MATCH_ENTRY_CONTENT).capabilities
@property
def event_types(self):
"""Returns the collection of event types defined in this Splunk instance.
:return: An :class:`Entity` containing the event types.
"""
return Collection(self, PATH_EVENT_TYPES)
@property
def fired_alerts(self):
"""Returns the collection of alerts that have been fired on the Splunk
instance, grouped by saved search.
:return: A :class:`Collection` of :class:`AlertGroup` entities.
"""
return Collection(self, PATH_FIRED_ALERTS, item=AlertGroup)
@property
def indexes(self):
"""Returns the collection of indexes for this Splunk instance.
:return: An :class:`Indexes` collection of :class:`Index` entities.
"""
return Indexes(self, PATH_INDEXES, item=Index)
@property
def info(self):
"""Returns the information about this instance of Splunk.
:return: The system information, as key-value pairs.
:rtype: ``dict``
"""
response = self.get("/services/server/info")
return _filter_content(_load_atom(response, MATCH_ENTRY_CONTENT))
def input(self, path, kind=None):
"""Retrieves an input by path, and optionally kind.
:return: A :class:`Input` object.
"""
return Input(self, path, kind=kind).refresh()
@property
def inputs(self):
"""Returns the collection of inputs configured on this Splunk instance.
:return: An :class:`Inputs` collection of :class:`Input` entities.
"""
return Inputs(self)
def job(self, sid):
"""Retrieves a search job by sid.
:return: A :class:`Job` object.
"""
return Job(self, sid).refresh()
@property
def jobs(self):
"""Returns the collection of current search jobs.
:return: A :class:`Jobs` collection of :class:`Job` entities.
"""
return Jobs(self)
@property
def loggers(self):
"""Returns the collection of logging level categories and their status.
:return: A :class:`Loggers` collection of logging levels.
"""
return Loggers(self)
@property
def messages(self):
"""Returns the collection of service messages.
:return: A :class:`Collection` of :class:`Message` entities.
"""
return Collection(self, PATH_MESSAGES, item=Message)
@property
def modular_input_kinds(self):
"""Returns the collection of the modular input kinds on this Splunk instance.
:return: A :class:`ReadOnlyCollection` of :class:`ModularInputKind` entities.
"""
if self.splunk_version >= (5,):
return ReadOnlyCollection(self, PATH_MODULAR_INPUTS, item=ModularInputKind)
else:
raise IllegalOperationException("Modular inputs are not supported before Splunk version 5.")
@property
def storage_passwords(self):
"""Returns the collection of the storage passwords on this Splunk instance.
:return: A :class:`ReadOnlyCollection` of :class:`StoragePasswords` entities.
"""
return StoragePasswords(self)
# kwargs: enable_lookups, reload_macros, parse_only, output_mode
def parse(self, query, **kwargs):
"""Parses a search query and returns a semantic map of the search.
:param query: The search query to parse.
:type query: ``string``
:param kwargs: Arguments to pass to the ``search/parser`` endpoint
(optional). Valid arguments are:
* "enable_lookups" (``boolean``): If ``True``, performs reverse lookups
to expand the search expression.
* "output_mode" (``string``): The output format (XML or JSON).
* "parse_only" (``boolean``): If ``True``, disables the expansion of
search due to evaluation of subsearches, time term expansion,
lookups, tags, eventtypes, and sourcetype alias.
* "reload_macros" (``boolean``): If ``True``, reloads macro
definitions from macros.conf.
:type kwargs: ``dict``
:return: A semantic map of the parsed search query.
"""
return self.get("search/parser", q=query, **kwargs)
def restart(self, timeout=None):
"""Restarts this Splunk instance.
The service is unavailable until it has successfully restarted.
If a *timeout* value is specified, ``restart`` blocks until the service
resumes or the timeout period has been exceeded. Otherwise, ``restart`` returns
immediately.
:param timeout: A timeout period, in seconds.
:type timeout: ``integer``
"""
msg = { "value": "Restart requested by " + self.username + "via the Splunk SDK for Python"}
# This message will be deleted once the server actually restarts.
self.messages.create(name="restart_required", **msg)
result = self.post("/services/server/control/restart")
if timeout is None:
return result
start = datetime.now()
diff = timedelta(seconds=timeout)
while datetime.now() - start < diff:
try:
self.login()
if not self.restart_required:
return result
except Exception as e:
sleep(1)
raise Exception("Operation time out.")
@property
def restart_required(self):
"""Indicates whether splunkd is in a state that requires a restart.
:return: A ``boolean`` that indicates whether a restart is required.
"""
response = self.get("messages").body.read()
messages = data.load(response)['feed']
if 'entry' not in messages:
result = False
else:
if isinstance(messages['entry'], dict):
titles = [messages['entry']['title']]
else:
titles = [x['title'] for x in messages['entry']]
result = 'restart_required' in titles
return result
@property
def roles(self):
"""Returns the collection of user roles.
:return: A :class:`Roles` collection of :class:`Role` entities.
"""
return Roles(self)
def search(self, query, **kwargs):
"""Runs a search using a search query and any optional arguments you
provide, and returns a `Job` object representing the search.
:param query: A search query.
:type query: ``string``
:param kwargs: Arguments for the search (optional):
* "output_mode" (``string``): Specifies the output format of the
results.
* "earliest_time" (``string``): Specifies the earliest time in the
time range to
search. The time string can be a UTC time (with fractional
seconds), a relative time specifier (to now), or a formatted
time string.
* "latest_time" (``string``): Specifies the latest time in the time
range to
search. The time string can be a UTC time (with fractional
seconds), a relative time specifier (to now), or a formatted
time string.
* "rf" (``string``): Specifies one or more fields to add to the
search.
:type kwargs: ``dict``
:rtype: class:`Job`
:returns: An object representing the created job.
"""
return self.jobs.create(query, **kwargs)
@property
def saved_searches(self):
"""Returns the collection of saved searches.
:return: A :class:`SavedSearches` collection of :class:`SavedSearch`
entities.
"""
return SavedSearches(self)
@property
def settings(self):
"""Returns the configuration settings for this instance of Splunk.
:return: A :class:`Settings` object containing configuration settings.
"""
return Settings(self)
@property
def splunk_version(self):
"""Returns the version of the splunkd instance this object is attached
to.
The version is returned as a tuple of the version components as
integers (for example, `(4,3,3)` or `(5,)`).
:return: A ``tuple`` of ``integers``.
"""
if self._splunk_version is None:
self._splunk_version = tuple([int(p) for p in self.info['version'].split('.')])
return self._splunk_version
@property
def kvstore(self):
"""Returns the collection of KV Store collections.
:return: A :class:`KVStoreCollections` collection of :class:`KVStoreCollection` entities.
"""
return KVStoreCollections(self)
@property
def users(self):
"""Returns the collection of users.
:return: A :class:`Users` collection of :class:`User` entities.
"""
return Users(self)
class Endpoint(object):
"""This class represents individual Splunk resources in the Splunk REST API.
An ``Endpoint`` object represents a URI, such as ``/services/saved/searches``.
This class provides the common functionality of :class:`Collection` and
:class:`Entity` (essentially HTTP GET and POST methods).
"""
def __init__(self, service, path):
self.service = service
self.path = path if path.endswith('/') else path + '/'
def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
"""Performs a GET operation on the path segment relative to this endpoint.
This method is named to match the HTTP method. This method makes at least
one roundtrip to the server, one additional round trip for
each 303 status returned, plus at most two additional round
trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method takes a
default namespace from the :class:`Service` object for this :class:`Endpoint`.
All other keyword arguments are included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Service`` is not logged in.
:raises HTTPError: Raised when an error in the request occurs.
:param path_segment: A path segment relative to this endpoint.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode for the namespace (optional).
:type sharing: "global", "system", "app", or "user"
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
import splunklib.client
s = client.service(...)
apps = s.apps
apps.get() == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
apps.get('nonexistant/path') # raises HTTPError
s.logout()
apps.get() # raises AuthenticationError
"""
# self.path to the Endpoint is relative in the SDK, so passing
# owner, app, sharing, etc. along will produce the correct
# namespace in the final request.
if path_segment.startswith('/'):
path = path_segment
else:
path = self.service._abspath(self.path + path_segment, owner=owner,
app=app, sharing=sharing)
# ^-- This was "%s%s" % (self.path, path_segment).
# That doesn't work, because self.path may be UrlEncoded.
return self.service.get(path,
owner=owner, app=app, sharing=sharing,
**query)
def post(self, path_segment="", owner=None, app=None, sharing=None, **query):
"""Performs a POST operation on the path segment relative to this endpoint.
This method is named to match the HTTP method. This method makes at least
one roundtrip to the server, one additional round trip for
each 303 status returned, plus at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method takes a
default namespace from the :class:`Service` object for this :class:`Endpoint`.
All other keyword arguments are included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Service`` is not logged in.
:raises HTTPError: Raised when an error in the request occurs.
:param path_segment: A path segment relative to this endpoint.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
import splunklib.client
s = client.service(...)
apps = s.apps
apps.post(name='boris') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '2908'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 18:34:50 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'Created',
'status': 201}
apps.get('nonexistant/path') # raises HTTPError
s.logout()
apps.get() # raises AuthenticationError
"""
if path_segment.startswith('/'):
path = path_segment
else:
path = self.service._abspath(self.path + path_segment, owner=owner, app=app, sharing=sharing)
return self.service.post(path, owner=owner, app=app, sharing=sharing, **query)
# kwargs: path, app, owner, sharing, state
class Entity(Endpoint):
"""This class is a base class for Splunk entities in the REST API, such as
saved searches, jobs, indexes, and inputs.
``Entity`` provides the majority of functionality required by entities.
Subclasses only implement the special cases for individual entities.
For example for deployment serverclasses, the subclass makes whitelists and
blacklists into Python lists.
An ``Entity`` is addressed like a dictionary, with a few extensions,
so the following all work::
ent['email.action']
ent['disabled']
ent['whitelist']
Many endpoints have values that share a prefix, such as
``email.to``, ``email.action``, and ``email.subject``. You can extract
the whole fields, or use the key ``email`` to get a dictionary of
all the subelements. That is, ``ent['email']`` returns a
dictionary with the keys ``to``, ``action``, ``subject``, and so on. If
there are multiple levels of dots, each level is made into a
subdictionary, so ``email.body.salutation`` can be accessed at
``ent['email']['body']['salutation']`` or
``ent['email.body.salutation']``.
You can also access the fields as though they were the fields of a Python
object, as in::
ent.email.action
ent.disabled
ent.whitelist
However, because some of the field names are not valid Python identifiers,
the dictionary-like syntax is preferrable.
The state of an :class:`Entity` object is cached, so accessing a field
does not contact the server. If you think the values on the
server have changed, call the :meth:`Entity.refresh` method.
"""
# Not every endpoint in the API is an Entity or a Collection. For
# example, a saved search at saved/searches/{name} has an additional
# method saved/searches/{name}/scheduled_times, but this isn't an
# entity in its own right. In these cases, subclasses should
# implement a method that uses the get and post methods inherited
# from Endpoint, calls the _load_atom function (it's elsewhere in
# client.py, but not a method of any object) to read the
# information, and returns the extracted data in a Pythonesque form.
#
# The primary use of subclasses of Entity is to handle specially
# named fields in the Entity. If you only need to provide a default
# value for an optional field, subclass Entity and define a
# dictionary ``defaults``. For instance,::
#
# class Hypothetical(Entity):
# defaults = {'anOptionalField': 'foo',
# 'anotherField': 'bar'}
#
# If you have to do more than provide a default, such as rename or
# actually process values, then define a new method with the
# ``@property`` decorator.
#
# class Hypothetical(Entity):
# @property
# def foobar(self):
# return self.content['foo'] + "-" + self.content["bar"]
# Subclasses can override defaults the default values for
# optional fields. See above.
defaults = {}
def __init__(self, service, path, **kwargs):
Endpoint.__init__(self, service, path)
self._state = None
if not kwargs.get('skip_refresh', False):
self.refresh(kwargs.get('state', None)) # "Prefresh"
return
def __contains__(self, item):
try:
self[item]
return True
except (KeyError, AttributeError):
return False
def __eq__(self, other):
"""Raises IncomparableException.
Since Entity objects are snapshots of times on the server, no
simple definition of equality will suffice beyond instance
equality, and instance equality leads to strange situations
such as::
import splunklib.client as client
c = client.connect(...)
saved_searches = c.saved_searches
x = saved_searches['asearch']
but then ``x != saved_searches['asearch']``.
whether or not there was a change on the server. Rather than
try to do something fancy, we simple declare that equality is
undefined for Entities.
Makes no roundtrips to the server.
"""
raise IncomparableException(
"Equality is undefined for objects of class %s" % \
self.__class__.__name__)
def __getattr__(self, key):
# Called when an attribute was not found by the normal method. In this
# case we try to find it in self.content and then self.defaults.
if key in self.state.content:
return self.state.content[key]
elif key in self.defaults:
return self.defaults[key]
else:
raise AttributeError(key)
def __getitem__(self, key):
# getattr attempts to find a field on the object in the normal way,
# then calls __getattr__ if it cannot.
return getattr(self, key)
# Load the Atom entry record from the given response - this is a method
# because the "entry" record varies slightly by entity and this allows
# for a subclass to override and handle any special cases.
def _load_atom_entry(self, response):
elem = _load_atom(response, XNAME_ENTRY)
if isinstance(elem, list):
raise AmbiguousReferenceException("Fetch from server returned multiple entries for name %s." % self.name)
else:
return elem.entry
# Load the entity state record from the given response
def _load_state(self, response):
entry = self._load_atom_entry(response)
return _parse_atom_entry(entry)
def _run_action(self, path_segment, **kwargs):
"""Run a method and return the content Record from the returned XML.
A method is a relative path from an Entity that is not itself
an Entity. _run_action assumes that the returned XML is an
Atom field containing one Entry, and the contents of Entry is
what should be the return value. This is right in enough cases
to make this method useful.
"""
response = self.get(path_segment, **kwargs)
data = self._load_atom_entry(response)
rec = _parse_atom_entry(data)
return rec.content
def _proper_namespace(self, owner=None, app=None, sharing=None):
"""Produce a namespace sans wildcards for use in entity requests.
This method tries to fill in the fields of the namespace which are `None`
or wildcard (`'-'`) from the entity's namespace. If that fails, it uses
the service's namespace.
:param owner:
:param app:
:param sharing:
:return:
"""
if owner is None and app is None and sharing is None: # No namespace provided
if self._state is not None and 'access' in self._state:
return (self._state.access.owner,
self._state.access.app,
self._state.access.sharing)
else:
return (self.service.namespace['owner'],
self.service.namespace['app'],
self.service.namespace['sharing'])
else:
return (owner,app,sharing)
def delete(self):
owner, app, sharing = self._proper_namespace()
return self.service.delete(self.path, owner=owner, app=app, sharing=sharing)
def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
owner, app, sharing = self._proper_namespace(owner, app, sharing)
return super(Entity, self).get(path_segment, owner=owner, app=app, sharing=sharing, **query)
def post(self, path_segment="", owner=None, app=None, sharing=None, **query):
owner, app, sharing = self._proper_namespace(owner, app, sharing)
return super(Entity, self).post(path_segment, owner=owner, app=app, sharing=sharing, **query)
def refresh(self, state=None):
"""Refreshes the state of this entity.
If *state* is provided, load it as the new state for this
entity. Otherwise, make a roundtrip to the server (by calling
the :meth:`read` method of ``self``) to fetch an updated state,
plus at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param state: Entity-specific arguments (optional).
:type state: ``dict``
:raises EntityDeletedException: Raised if the entity no longer exists on
the server.
**Example**::
import splunklib.client as client
s = client.connect(...)
search = s.apps['search']
search.refresh()
"""
if state is not None:
self._state = state
else:
self._state = self.read(self.get())
return self
@property
def access(self):
"""Returns the access metadata for this entity.
:return: A :class:`splunklib.data.Record` object with three keys:
``owner``, ``app``, and ``sharing``.
"""
return self.state.access
@property
def content(self):
"""Returns the contents of the entity.
:return: A ``dict`` containing values.
"""
return self.state.content
def disable(self):
"""Disables the entity at this endpoint."""
self.post("disable")
if self.service.restart_required:
self.service.restart(120)
return self
def enable(self):
"""Enables the entity at this endpoint."""
self.post("enable")
return self
@property
def fields(self):
"""Returns the content metadata for this entity.
:return: A :class:`splunklib.data.Record` object with three keys:
``required``, ``optional``, and ``wildcard``.
"""
return self.state.fields
@property
def links(self):
"""Returns a dictionary of related resources.
:return: A ``dict`` with keys and corresponding URLs.
"""
return self.state.links
@property
def name(self):
"""Returns the entity name.
:return: The entity name.
:rtype: ``string``
"""
return self.state.title
def read(self, response):
""" Reads the current state of the entity from the server. """
results = self._load_state(response)
# In lower layers of the SDK, we end up trying to URL encode
# text to be dispatched via HTTP. However, these links are already
# URL encoded when they arrive, and we need to mark them as such.
unquoted_links = dict([(k, UrlEncoded(v, skip_encode=True))
for k,v in six.iteritems(results['links'])])
results['links'] = unquoted_links
return results
def reload(self):
"""Reloads the entity."""
self.post("_reload")
return self
@property
def state(self):
"""Returns the entity's state record.
:return: A ``dict`` containing fields and metadata for the entity.
"""
if self._state is None: self.refresh()
return self._state
def update(self, **kwargs):
"""Updates the server with any changes you've made to the current entity
along with any additional arguments you specify.
**Note**: You cannot update the ``name`` field of an entity.
Many of the fields in the REST API are not valid Python
identifiers, which means you cannot pass them as keyword
arguments. That is, Python will fail to parse the following::
# This fails
x.update(check-new=False, email.to='boris@utopia.net')
However, you can always explicitly use a dictionary to pass
such keys::
# This works
x.update(**{'check-new': False, 'email.to': 'boris@utopia.net'})
:param kwargs: Additional entity-specific arguments (optional).
:type kwargs: ``dict``
:return: The entity this method is called on.
:rtype: class:`Entity`
"""
# The peculiarity in question: the REST API creates a new
# Entity if we pass name in the dictionary, instead of the
# expected behavior of updating this Entity. Therefore we
# check for 'name' in kwargs and throw an error if it is
# there.
if 'name' in kwargs:
raise IllegalOperationException('Cannot update the name of an Entity via the REST API.')
self.post(**kwargs)
return self
class ReadOnlyCollection(Endpoint):
"""This class represents a read-only collection of entities in the Splunk
instance.
"""
def __init__(self, service, path, item=Entity):
Endpoint.__init__(self, service, path)
self.item = item # Item accessor
self.null_count = -1
def __contains__(self, name):
"""Is there at least one entry called *name* in this collection?
Makes a single roundtrip to the server, plus at most two more
if
the ``autologin`` field of :func:`connect` is set to ``True``.
"""
try:
self[name]
return True
except KeyError:
return False
except AmbiguousReferenceException:
return True
def __getitem__(self, key):
"""Fetch an item named *key* from this collection.
A name is not a unique identifier in a collection. The unique
identifier is a name plus a namespace. For example, there can
be a saved search named ``'mysearch'`` with sharing ``'app'``
in application ``'search'``, and another with sharing
``'user'`` with owner ``'boris'`` and application
``'search'``. If the ``Collection`` is attached to a
``Service`` that has ``'-'`` (wildcard) as user and app in its
namespace, then both of these may be visible under the same
name.
Where there is no conflict, ``__getitem__`` will fetch the
entity given just the name. If there is a conflict and you
pass just a name, it will raise a ``ValueError``. In that
case, add the namespace as a second argument.
This function makes a single roundtrip to the server, plus at
most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param key: The name to fetch, or a tuple (name, namespace).
:return: An :class:`Entity` object.
:raises KeyError: Raised if *key* does not exist.
:raises ValueError: Raised if no namespace is specified and *key*
does not refer to a unique name.
**Example**::
s = client.connect(...)
saved_searches = s.saved_searches
x1 = saved_searches.create(
'mysearch', 'search * | head 1',
owner='admin', app='search', sharing='app')
x2 = saved_searches.create(
'mysearch', 'search * | head 1',
owner='admin', app='search', sharing='user')
# Raises ValueError:
saved_searches['mysearch']
# Fetches x1
saved_searches[
'mysearch',
client.namespace(sharing='app', app='search')]
# Fetches x2
saved_searches[
'mysearch',
client.namespace(sharing='user', owner='boris', app='search')]
"""
try:
if isinstance(key, tuple) and len(key) == 2:
# x[a,b] is translated to x.__getitem__( (a,b) ), so we
# have to extract values out.
key, ns = key
key = UrlEncoded(key, encode_slash=True)
response = self.get(key, owner=ns.owner, app=ns.app)
else:
key = UrlEncoded(key, encode_slash=True)
response = self.get(key)
entries = self._load_list(response)
if len(entries) > 1:
raise AmbiguousReferenceException("Found multiple entities named '%s'; please specify a namespace." % key)
elif len(entries) == 0:
raise KeyError(key)
else:
return entries[0]
except HTTPError as he:
if he.status == 404: # No entity matching key and namespace.
raise KeyError(key)
else:
raise
def __iter__(self, **kwargs):
"""Iterate over the entities in the collection.
:param kwargs: Additional arguments.
:type kwargs: ``dict``
:rtype: iterator over entities.
Implemented to give Collection a listish interface. This
function always makes a roundtrip to the server, plus at most
two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
**Example**::
import splunklib.client as client
c = client.connect(...)
saved_searches = c.saved_searches
for entity in saved_searches:
print "Saved search named %s" % entity.name
"""
for item in self.iter(**kwargs):
yield item
def __len__(self):
"""Enable ``len(...)`` for ``Collection`` objects.
Implemented for consistency with a listish interface. No
further failure modes beyond those possible for any method on
an Endpoint.
This function always makes a round trip to the server, plus at
most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
**Example**::
import splunklib.client as client
c = client.connect(...)
saved_searches = c.saved_searches
n = len(saved_searches)
"""
return len(self.list())
def _entity_path(self, state):
"""Calculate the path to an entity to be returned.
*state* should be the dictionary returned by
:func:`_parse_atom_entry`. :func:`_entity_path` extracts the
link to this entity from *state*, and strips all the namespace
prefixes from it to leave only the relative path of the entity
itself, sans namespace.
:rtype: ``string``
:return: an absolute path
"""
# This has been factored out so that it can be easily
# overloaded by Configurations, which has to switch its
# entities' endpoints from its own properties/ to configs/.
raw_path = urllib.parse.unquote(state.links.alternate)
if 'servicesNS/' in raw_path:
return _trailing(raw_path, 'servicesNS/', '/', '/')
elif 'services/' in raw_path:
return _trailing(raw_path, 'services/')
else:
return raw_path
def _load_list(self, response):
"""Converts *response* to a list of entities.
*response* is assumed to be a :class:`Record` containing an
HTTP response, of the form::
{'status': 200,
'headers': [('content-length', '232642'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Tue, 29 May 2012 15:27:08 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'body': ...a stream implementing .read()...}
The ``'body'`` key refers to a stream containing an Atom feed,
that is, an XML document with a toplevel element ``<feed>``,
and within that element one or more ``<entry>`` elements.
"""
# Some subclasses of Collection have to override this because
# splunkd returns something that doesn't match
# <feed><entry></entry><feed>.
entries = _load_atom_entries(response)
if entries is None: return []
entities = []
for entry in entries:
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
self._entity_path(state),
state=state)
entities.append(entity)
return entities
def itemmeta(self):
"""Returns metadata for members of the collection.
Makes a single roundtrip to the server, plus two more at most if
the ``autologin`` field of :func:`connect` is set to ``True``.
:return: A :class:`splunklib.data.Record` object containing the metadata.
**Example**::
import splunklib.client as client
import pprint
s = client.connect(...)
pprint.pprint(s.apps.itemmeta())
{'access': {'app': 'search',
'can_change_perms': '1',
'can_list': '1',
'can_share_app': '1',
'can_share_global': '1',
'can_share_user': '1',
'can_write': '1',
'modifiable': '1',
'owner': 'admin',
'perms': {'read': ['*'], 'write': ['admin']},
'removable': '0',
'sharing': 'user'},
'fields': {'optional': ['author',
'configured',
'description',
'label',
'manageable',
'template',
'visible'],
'required': ['name'], 'wildcard': []}}
"""
response = self.get("_new")
content = _load_atom(response, MATCH_ENTRY_CONTENT)
return _parse_atom_metadata(content)
def iter(self, offset=0, count=None, pagesize=None, **kwargs):
"""Iterates over the collection.
This method is equivalent to the :meth:`list` method, but
it returns an iterator and can load a certain number of entities at a
time from the server.
:param offset: The index of the first entity to return (optional).
:type offset: ``integer``
:param count: The maximum number of entities to return (optional).
:type count: ``integer``
:param pagesize: The number of entities to load (optional).
:type pagesize: ``integer``
:param kwargs: Additional arguments (optional):
- "search" (``string``): The search query to filter responses.
- "sort_dir" (``string``): The direction to sort returned items:
"asc" or "desc".
- "sort_key" (``string``): The field to use for sorting (optional).
- "sort_mode" (``string``): The collating sequence for sorting
returned items: "auto", "alpha", "alpha_case", or "num".
:type kwargs: ``dict``
**Example**::
import splunklib.client as client
s = client.connect(...)
for saved_search in s.saved_searches.iter(pagesize=10):
# Loads 10 saved searches at a time from the
# server.
...
"""
assert pagesize is None or pagesize > 0
if count is None:
count = self.null_count
fetched = 0
while count == self.null_count or fetched < count:
response = self.get(count=pagesize or count, offset=offset, **kwargs)
items = self._load_list(response)
N = len(items)
fetched += N
for item in items:
yield item
if pagesize is None or N < pagesize:
break
offset += N
logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
# kwargs: count, offset, search, sort_dir, sort_key, sort_mode
def list(self, count=None, **kwargs):
"""Retrieves a list of entities in this collection.
The entire collection is loaded at once and is returned as a list. This
function makes a single roundtrip to the server, plus at most two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
There is no caching--every call makes at least one round trip.
:param count: The maximum number of entities to return (optional).
:type count: ``integer``
:param kwargs: Additional arguments (optional):
- "offset" (``integer``): The offset of the first item to return.
- "search" (``string``): The search query to filter responses.
- "sort_dir" (``string``): The direction to sort returned items:
"asc" or "desc".
- "sort_key" (``string``): The field to use for sorting (optional).
- "sort_mode" (``string``): The collating sequence for sorting
returned items: "auto", "alpha", "alpha_case", or "num".
:type kwargs: ``dict``
:return: A ``list`` of entities.
"""
# response = self.get(count=count, **kwargs)
# return self._load_list(response)
return list(self.iter(count=count, **kwargs))
class Collection(ReadOnlyCollection):
"""A collection of entities.
Splunk provides a number of different collections of distinct
entity types: applications, saved searches, fired alerts, and a
number of others. Each particular type is available separately
from the Splunk instance, and the entities of that type are
returned in a :class:`Collection`.
The interface for :class:`Collection` does not quite match either
``list`` or ``dict`` in Python, because there are enough semantic
mismatches with either to make its behavior surprising. A unique
element in a :class:`Collection` is defined by a string giving its
name plus namespace (although the namespace is optional if the name is
unique).
**Example**::
import splunklib.client as client
service = client.connect(...)
mycollection = service.saved_searches
mysearch = mycollection['my_search', client.namespace(owner='boris', app='natasha', sharing='user')]
# Or if there is only one search visible named 'my_search'
mysearch = mycollection['my_search']
Similarly, ``name`` in ``mycollection`` works as you might expect (though
you cannot currently pass a namespace to the ``in`` operator), as does
``len(mycollection)``.
However, as an aggregate, :class:`Collection` behaves more like a
list. If you iterate over a :class:`Collection`, you get an
iterator over the entities, not the names and namespaces.
**Example**::
for entity in mycollection:
assert isinstance(entity, client.Entity)
Use the :meth:`create` and :meth:`delete` methods to create and delete
entities in this collection. To view the access control list and other
metadata of the collection, use the :meth:`ReadOnlyCollection.itemmeta` method.
:class:`Collection` does no caching. Each call makes at least one
round trip to the server to fetch data.
"""
def create(self, name, **params):
"""Creates a new entity in this collection.
This function makes either one or two roundtrips to the
server, depending on the type of entities in this
collection, plus at most two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param name: The name of the entity to create.
:type name: ``string``
:param namespace: A namespace, as created by the :func:`splunklib.binding.namespace`
function (optional). You can also set ``owner``, ``app``, and
``sharing`` in ``params``.
:type namespace: A :class:`splunklib.data.Record` object with keys ``owner``, ``app``,
and ``sharing``.
:param params: Additional entity-specific arguments (optional).
:type params: ``dict``
:return: The new entity.
:rtype: A subclass of :class:`Entity`, chosen by :meth:`Collection.self.item`.
**Example**::
import splunklib.client as client
s = client.connect(...)
applications = s.apps
new_app = applications.create("my_fake_app")
"""
if not isinstance(name, six.string_types):
raise InvalidNameException("%s is not a valid name for an entity." % name)
if 'namespace' in params:
namespace = params.pop('namespace')
params['owner'] = namespace.owner
params['app'] = namespace.app
params['sharing'] = namespace.sharing
response = self.post(name=name, **params)
atom = _load_atom(response, XNAME_ENTRY)
if atom is None:
# This endpoint doesn't return the content of the new
# item. We have to go fetch it ourselves.
return self[name]
else:
entry = atom.entry
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
self._entity_path(state),
state=state)
return entity
def delete(self, name, **params):
"""Deletes a specified entity from the collection.
:param name: The name of the entity to delete.
:type name: ``string``
:return: The collection.
:rtype: ``self``
This method is implemented for consistency with the REST API's DELETE
method.
If there is no *name* entity on the server, a ``KeyError`` is
thrown. This function always makes a roundtrip to the server.
**Example**::
import splunklib.client as client
c = client.connect(...)
saved_searches = c.saved_searches
saved_searches.create('my_saved_search',
'search * | head 1')
assert 'my_saved_search' in saved_searches
saved_searches.delete('my_saved_search')
assert 'my_saved_search' not in saved_searches
"""
name = UrlEncoded(name, encode_slash=True)
if 'namespace' in params:
namespace = params.pop('namespace')
params['owner'] = namespace.owner
params['app'] = namespace.app
params['sharing'] = namespace.sharing
try:
self.service.delete(_path(self.path, name), **params)
except HTTPError as he:
# An HTTPError with status code 404 means that the entity
# has already been deleted, and we reraise it as a
# KeyError.
if he.status == 404:
raise KeyError("No such entity %s" % name)
else:
raise
return self
def get(self, name="", owner=None, app=None, sharing=None, **query):
"""Performs a GET request to the server on the collection.
If *owner*, *app*, and *sharing* are omitted, this method takes a
default namespace from the :class:`Service` object for this :class:`Endpoint`.
All other keyword arguments are included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Service`` is not logged in.
:raises HTTPError: Raised when an error in the request occurs.
:param path_segment: A path segment relative to this endpoint.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode for the namespace (optional).
:type sharing: "global", "system", "app", or "user"
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
import splunklib.client
s = client.service(...)
saved_searches = s.saved_searches
saved_searches.get("my/saved/search") == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
saved_searches.get('nonexistant/search') # raises HTTPError
s.logout()
saved_searches.get() # raises AuthenticationError
"""
name = UrlEncoded(name, encode_slash=True)
return super(Collection, self).get(name, owner, app, sharing, **query)
class ConfigurationFile(Collection):
"""This class contains all of the stanzas from one configuration file.
"""
# __init__'s arguments must match those of an Entity, not a
# Collection, since it is being created as the elements of a
# Configurations, which is a Collection subclass.
def __init__(self, service, path, **kwargs):
Collection.__init__(self, service, path, item=Stanza)
self.name = kwargs['state']['title']
class Configurations(Collection):
"""This class provides access to the configuration files from this Splunk
instance. Retrieve this collection using :meth:`Service.confs`.
Splunk's configuration is divided into files, and each file into
stanzas. This collection is unusual in that the values in it are
themselves collections of :class:`ConfigurationFile` objects.
"""
def __init__(self, service):
Collection.__init__(self, service, PATH_PROPERTIES, item=ConfigurationFile)
if self.service.namespace.owner == '-' or self.service.namespace.app == '-':
raise ValueError("Configurations cannot have wildcards in namespace.")
def __getitem__(self, key):
# The superclass implementation is designed for collections that contain
# entities. This collection (Configurations) contains collections
# (ConfigurationFile).
#
# The configurations endpoint returns multiple entities when we ask for a single file.
# This screws up the default implementation of __getitem__ from Collection, which thinks
# that multiple entities means a name collision, so we have to override it here.
try:
response = self.get(key)
return ConfigurationFile(self.service, PATH_CONF % key, state={'title': key})
except HTTPError as he:
if he.status == 404: # No entity matching key
raise KeyError(key)
else:
raise
def __contains__(self, key):
# configs/conf-{name} never returns a 404. We have to post to properties/{name}
# in order to find out if a configuration exists.
try:
response = self.get(key)
return True
except HTTPError as he:
if he.status == 404: # No entity matching key
return False
else:
raise
def create(self, name):
""" Creates a configuration file named *name*.
If there is already a configuration file with that name,
the existing file is returned.
:param name: The name of the configuration file.
:type name: ``string``
:return: The :class:`ConfigurationFile` object.
"""
# This has to be overridden to handle the plumbing of creating
# a ConfigurationFile (which is a Collection) instead of some
# Entity.
if not isinstance(name, six.string_types):
raise ValueError("Invalid name: %s" % repr(name))
response = self.post(__conf=name)
if response.status == 303:
return self[name]
elif response.status == 201:
return ConfigurationFile(self.service, PATH_CONF % name, item=Stanza, state={'title': name})
else:
raise ValueError("Unexpected status code %s returned from creating a stanza" % response.status)
def delete(self, key):
"""Raises `IllegalOperationException`."""
raise IllegalOperationException("Cannot delete configuration files from the REST API.")
def _entity_path(self, state):
# Overridden to make all the ConfigurationFile objects
# returned refer to the configs/ path instead of the
# properties/ path used by Configrations.
return PATH_CONF % state['title']
class Stanza(Entity):
"""This class contains a single configuration stanza."""
def submit(self, stanza):
"""Adds keys to the current configuration stanza as a
dictionary of key-value pairs.
:param stanza: A dictionary of key-value pairs for the stanza.
:type stanza: ``dict``
:return: The :class:`Stanza` object.
"""
body = _encode(**stanza)
self.service.post(self.path, body=body)
return self
def __len__(self):
# The stanza endpoint returns all the keys at the same level in the XML as the eai information
# and 'disabled', so to get an accurate length, we have to filter those out and have just
# the stanza keys.
return len([x for x in self._state.content.keys()
if not x.startswith('eai') and x != 'disabled'])
class StoragePassword(Entity):
"""This class contains a storage password.
"""
def __init__(self, service, path, **kwargs):
state = kwargs.get('state', None)
kwargs['skip_refresh'] = kwargs.get('skip_refresh', state is not None)
super(StoragePassword, self).__init__(service, path, **kwargs)
self._state = state
@property
def clear_password(self):
return self.content.get('clear_password')
@property
def encrypted_password(self):
return self.content.get('encr_password')
@property
def realm(self):
return self.content.get('realm')
@property
def username(self):
return self.content.get('username')
class StoragePasswords(Collection):
"""This class provides access to the storage passwords from this Splunk
instance. Retrieve this collection using :meth:`Service.storage_passwords`.
"""
def __init__(self, service):
if service.namespace.owner == '-' or service.namespace.app == '-':
raise ValueError("StoragePasswords cannot have wildcards in namespace.")
super(StoragePasswords, self).__init__(service, PATH_STORAGE_PASSWORDS, item=StoragePassword)
def create(self, password, username, realm=None):
""" Creates a storage password.
A `StoragePassword` can be identified by <username>, or by <realm>:<username> if the
optional realm parameter is also provided.
:param password: The password for the credentials - this is the only part of the credentials that will be stored securely.
:type name: ``string``
:param username: The username for the credentials.
:type name: ``string``
:param realm: The credential realm. (optional)
:type name: ``string``
:return: The :class:`StoragePassword` object created.
"""
if not isinstance(username, six.string_types):
raise ValueError("Invalid name: %s" % repr(username))
if realm is None:
response = self.post(password=password, name=username)
else:
response = self.post(password=password, realm=realm, name=username)
if response.status != 201:
raise ValueError("Unexpected status code %s returned from creating a stanza" % response.status)
entries = _load_atom_entries(response)
state = _parse_atom_entry(entries[0])
storage_password = StoragePassword(self.service, self._entity_path(state), state=state, skip_refresh=True)
return storage_password
def delete(self, username, realm=None):
"""Delete a storage password by username and/or realm.
The identifier can be passed in through the username parameter as
<username> or <realm>:<username>, but the preferred way is by
passing in the username and realm parameters.
:param username: The username for the credentials, or <realm>:<username> if the realm parameter is omitted.
:type name: ``string``
:param realm: The credential realm. (optional)
:type name: ``string``
:return: The `StoragePassword` collection.
:rtype: ``self``
"""
if realm is None:
# This case makes the username optional, so
# the full name can be passed in as realm.
# Assume it's already encoded.
name = username
else:
# Encode each component separately
name = UrlEncoded(realm, encode_slash=True) + ":" + UrlEncoded(username, encode_slash=True)
# Append the : expected at the end of the name
if name[-1] != ":":
name = name + ":"
return Collection.delete(self, name)
class AlertGroup(Entity):
"""This class represents a group of fired alerts for a saved search. Access
it using the :meth:`alerts` property."""
def __init__(self, service, path, **kwargs):
Entity.__init__(self, service, path, **kwargs)
def __len__(self):
return self.count
@property
def alerts(self):
"""Returns a collection of triggered alerts.
:return: A :class:`Collection` of triggered alerts.
"""
return Collection(self.service, self.path)
@property
def count(self):
"""Returns the count of triggered alerts.
:return: The triggered alert count.
:rtype: ``integer``
"""
return int(self.content.get('triggered_alert_count', 0))
class Indexes(Collection):
"""This class contains the collection of indexes in this Splunk instance.
Retrieve this collection using :meth:`Service.indexes`.
"""
def get_default(self):
""" Returns the name of the default index.
:return: The name of the default index.
"""
index = self['_audit']
return index['defaultDatabase']
def delete(self, name):
""" Deletes a given index.
**Note**: This method is only supported in Splunk 5.0 and later.
:param name: The name of the index to delete.
:type name: ``string``
"""
if self.service.splunk_version >= (5,):
Collection.delete(self, name)
else:
raise IllegalOperationException("Deleting indexes via the REST API is "
"not supported before Splunk version 5.")
class Index(Entity):
"""This class represents an index and provides different operations, such as
cleaning the index, writing to the index, and so forth."""
def __init__(self, service, path, **kwargs):
Entity.__init__(self, service, path, **kwargs)
def attach(self, host=None, source=None, sourcetype=None):
"""Opens a stream (a writable socket) for writing events to the index.
:param host: The host value for events written to the stream.
:type host: ``string``
:param source: The source value for events written to the stream.
:type source: ``string``
:param sourcetype: The sourcetype value for events written to the
stream.
:type sourcetype: ``string``
:return: A writable socket.
"""
args = { 'index': self.name }
if host is not None: args['host'] = host
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
path = UrlEncoded(PATH_RECEIVERS_STREAM + "?" + urllib.parse.urlencode(args), skip_encode=True)
cookie_or_auth_header = "Authorization: Splunk %s\r\n" % \
(self.service.token if self.service.token is _NoAuthenticationToken
else self.service.token.replace("Splunk ", ""))
# If we have cookie(s), use them instead of "Authorization: ..."
if self.service.has_cookies():
cookie_or_auth_header = "Cookie: %s\r\n" % _make_cookie_header(self.service.get_cookies().items())
# Since we need to stream to the index connection, we have to keep
# the connection open and use the Splunk extension headers to note
# the input mode
sock = self.service.connect()
headers = [("POST %s HTTP/1.1\r\n" % str(self.service._abspath(path))).encode('utf-8'),
("Host: %s:%s\r\n" % (self.service.host, int(self.service.port))).encode('utf-8'),
b"Accept-Encoding: identity\r\n",
cookie_or_auth_header.encode('utf-8'),
b"X-Splunk-Input-Mode: Streaming\r\n",
b"\r\n"]
for h in headers:
sock.write(h)
return sock
@contextlib.contextmanager
def attached_socket(self, *args, **kwargs):
"""Opens a raw socket in a ``with`` block to write data to Splunk.
The arguments are identical to those for :meth:`attach`. The socket is
automatically closed at the end of the ``with`` block, even if an
exception is raised in the block.
:param host: The host value for events written to the stream.
:type host: ``string``
:param source: The source value for events written to the stream.
:type source: ``string``
:param sourcetype: The sourcetype value for events written to the
stream.
:type sourcetype: ``string``
:returns: Nothing.
**Example**::
import splunklib.client as client
s = client.connect(...)
index = s.indexes['some_index']
with index.attached_socket(sourcetype='test') as sock:
sock.send('Test event\\r\\n')
"""
try:
sock = self.attach(*args, **kwargs)
yield sock
finally:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
def clean(self, timeout=60):
"""Deletes the contents of the index.
This method blocks until the index is empty, because it needs to restore
values at the end of the operation.
:param timeout: The time-out period for the operation, in seconds (the
default is 60).
:type timeout: ``integer``
:return: The :class:`Index`.
"""
self.refresh()
tds = self['maxTotalDataSizeMB']
ftp = self['frozenTimePeriodInSecs']
was_disabled_initially = self.disabled
try:
if (not was_disabled_initially and \
self.service.splunk_version < (5,)):
# Need to disable the index first on Splunk 4.x,
# but it doesn't work to disable it on 5.0.
self.disable()
self.update(maxTotalDataSizeMB=1, frozenTimePeriodInSecs=1)
self.roll_hot_buckets()
# Wait until event count goes to 0.
start = datetime.now()
diff = timedelta(seconds=timeout)
while self.content.totalEventCount != '0' and datetime.now() < start+diff:
sleep(1)
self.refresh()
if self.content.totalEventCount != '0':
raise OperationError("Cleaning index %s took longer than %s seconds; timing out." % (self.name, timeout))
finally:
# Restore original values
self.update(maxTotalDataSizeMB=tds, frozenTimePeriodInSecs=ftp)
if (not was_disabled_initially and \
self.service.splunk_version < (5,)):
# Re-enable the index if it was originally enabled and we messed with it.
self.enable()
return self
def roll_hot_buckets(self):
"""Performs rolling hot buckets for this index.
:return: The :class:`Index`.
"""
self.post("roll-hot-buckets")
return self
def submit(self, event, host=None, source=None, sourcetype=None):
"""Submits a single event to the index using ``HTTP POST``.
:param event: The event to submit.
:type event: ``string``
:param `host`: The host value of the event.
:type host: ``string``
:param `source`: The source value of the event.
:type source: ``string``
:param `sourcetype`: The sourcetype value of the event.
:type sourcetype: ``string``
:return: The :class:`Index`.
"""
args = { 'index': self.name }
if host is not None: args['host'] = host
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
# The reason we use service.request directly rather than POST
# is that we are not sending a POST request encoded using
# x-www-form-urlencoded (as we do not have a key=value body),
# because we aren't really sending a "form".
self.service.post(PATH_RECEIVERS_SIMPLE, body=event, **args)
return self
# kwargs: host, host_regex, host_segment, rename-source, sourcetype
def upload(self, filename, **kwargs):
"""Uploads a file for immediate indexing.
**Note**: The file must be locally accessible from the server.
:param filename: The name of the file to upload. The file can be a
plain, compressed, or archived file.
:type filename: ``string``
:param kwargs: Additional arguments (optional). For more about the
available parameters, see `Index parameters <http://dev.splunk.com/view/SP-CAAAEE6#indexparams>`_ on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The :class:`Index`.
"""
kwargs['index'] = self.name
path = 'data/inputs/oneshot'
self.service.post(path, name=filename, **kwargs)
return self
class Input(Entity):
"""This class represents a Splunk input. This class is the base for all
typed input classes and is also used when the client does not recognize an
input kind.
"""
def __init__(self, service, path, kind=None, **kwargs):
# kind can be omitted (in which case it is inferred from the path)
# Otherwise, valid values are the paths from data/inputs ("udp",
# "monitor", "tcp/raw"), or two special cases: "tcp" (which is "tcp/raw")
# and "splunktcp" (which is "tcp/cooked").
Entity.__init__(self, service, path, **kwargs)
if kind is None:
path_segments = path.split('/')
i = path_segments.index('inputs') + 1
if path_segments[i] == 'tcp':
self.kind = path_segments[i] + '/' + path_segments[i+1]
else:
self.kind = path_segments[i]
else:
self.kind = kind
# Handle old input kind names.
if self.kind == 'tcp':
self.kind = 'tcp/raw'
if self.kind == 'splunktcp':
self.kind = 'tcp/cooked'
def update(self, **kwargs):
"""Updates the server with any changes you've made to the current input
along with any additional arguments you specify.
:param kwargs: Additional arguments (optional). For more about the
available parameters, see `Input parameters <http://dev.splunk.com/view/SP-CAAAEE6#inputparams>`_ on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The input this method was called on.
:rtype: class:`Input`
"""
# UDP and TCP inputs require special handling due to their restrictToHost
# field. For all other inputs kinds, we can dispatch to the superclass method.
if self.kind not in ['tcp', 'splunktcp', 'tcp/raw', 'tcp/cooked', 'udp']:
return super(Input, self).update(**kwargs)
else:
# The behavior of restrictToHost is inconsistent across input kinds and versions of Splunk.
# In Splunk 4.x, the name of the entity is only the port, independent of the value of
# restrictToHost. In Splunk 5.0 this changed so the name will be of the form <restrictToHost>:<port>.
# In 5.0 and 5.0.1, if you don't supply the restrictToHost value on every update, it will
# remove the host restriction from the input. As of 5.0.2 you simply can't change restrictToHost
# on an existing input.
# The logic to handle all these cases:
# - Throw an exception if the user tries to set restrictToHost on an existing input
# for *any* version of Splunk.
# - Set the existing restrictToHost value on the update args internally so we don't
# cause it to change in Splunk 5.0 and 5.0.1.
to_update = kwargs.copy()
if 'restrictToHost' in kwargs:
raise IllegalOperationException("Cannot set restrictToHost on an existing input with the SDK.")
elif 'restrictToHost' in self._state.content and self.kind != 'udp':
to_update['restrictToHost'] = self._state.content['restrictToHost']
# Do the actual update operation.
return super(Input, self).update(**to_update)
# Inputs is a "kinded" collection, which is a heterogenous collection where
# each item is tagged with a kind, that provides a single merged view of all
# input kinds.
class Inputs(Collection):
"""This class represents a collection of inputs. The collection is
heterogeneous and each member of the collection contains a *kind* property
that indicates the specific type of input.
Retrieve this collection using :meth:`Service.inputs`."""
def __init__(self, service, kindmap=None):
Collection.__init__(self, service, PATH_INPUTS, item=Input)
def __getitem__(self, key):
# The key needed to retrieve the input needs it's parenthesis to be URL encoded
# based on the REST API for input
# <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTinput>
if isinstance(key, tuple) and len(key) == 2:
# Fetch a single kind
key, kind = key
key = UrlEncoded(key, encode_slash=True)
try:
response = self.get(self.kindpath(kind) + "/" + key)
entries = self._load_list(response)
if len(entries) > 1:
raise AmbiguousReferenceException("Found multiple inputs of kind %s named %s." % (kind, key))
elif len(entries) == 0:
raise KeyError((key, kind))
else:
return entries[0]
except HTTPError as he:
if he.status == 404: # No entity matching kind and key
raise KeyError((key, kind))
else:
raise
else:
# Iterate over all the kinds looking for matches.
kind = None
candidate = None
key = UrlEncoded(key, encode_slash=True)
for kind in self.kinds:
try:
response = self.get(kind + "/" + key)
entries = self._load_list(response)
if len(entries) > 1:
raise AmbiguousReferenceException("Found multiple inputs of kind %s named %s." % (kind, key))
elif len(entries) == 0:
pass
else:
if candidate is not None: # Already found at least one candidate
raise AmbiguousReferenceException("Found multiple inputs named %s, please specify a kind" % key)
candidate = entries[0]
except HTTPError as he:
if he.status == 404:
pass # Just carry on to the next kind.
else:
raise
if candidate is None:
raise KeyError(key) # Never found a match.
else:
return candidate
def __contains__(self, key):
if isinstance(key, tuple) and len(key) == 2:
# If we specify a kind, this will shortcut properly
try:
self.__getitem__(key)
return True
except KeyError:
return False
else:
# Without a kind, we want to minimize the number of round trips to the server, so we
# reimplement some of the behavior of __getitem__ in order to be able to stop searching
# on the first hit.
for kind in self.kinds:
try:
response = self.get(self.kindpath(kind) + "/" + key)
entries = self._load_list(response)
if len(entries) > 0:
return True
else:
pass
except HTTPError as he:
if he.status == 404:
pass # Just carry on to the next kind.
else:
raise
return False
def create(self, name, kind, **kwargs):
"""Creates an input of a specific kind in this collection, with any
arguments you specify.
:param `name`: The input name.
:type name: ``string``
:param `kind`: The kind of input:
- "ad": Active Directory
- "monitor": Files and directories
- "registry": Windows Registry
- "script": Scripts
- "splunktcp": TCP, processed
- "tcp": TCP, unprocessed
- "udp": UDP
- "win-event-log-collections": Windows event log
- "win-perfmon": Performance monitoring
- "win-wmi-collections": WMI
:type kind: ``string``
:param `kwargs`: Additional arguments (optional). For more about the
available parameters, see `Input parameters <http://dev.splunk.com/view/SP-CAAAEE6#inputparams>`_ on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The new :class:`Input`.
"""
kindpath = self.kindpath(kind)
self.post(kindpath, name=name, **kwargs)
# If we created an input with restrictToHost set, then
# its path will be <restrictToHost>:<name>, not just <name>,
# and we have to adjust accordingly.
# Url encodes the name of the entity.
name = UrlEncoded(name, encode_slash=True)
path = _path(
self.path + kindpath,
'%s:%s' % (kwargs['restrictToHost'], name) \
if 'restrictToHost' in kwargs else name
)
return Input(self.service, path, kind)
def delete(self, name, kind=None):
"""Removes an input from the collection.
:param `kind`: The kind of input:
- "ad": Active Directory
- "monitor": Files and directories
- "registry": Windows Registry
- "script": Scripts
- "splunktcp": TCP, processed
- "tcp": TCP, unprocessed
- "udp": UDP
- "win-event-log-collections": Windows event log
- "win-perfmon": Performance monitoring
- "win-wmi-collections": WMI
:type kind: ``string``
:param name: The name of the input to remove.
:type name: ``string``
:return: The :class:`Inputs` collection.
"""
if kind is None:
self.service.delete(self[name].path)
else:
self.service.delete(self[name, kind].path)
return self
def itemmeta(self, kind):
"""Returns metadata for the members of a given kind.
:param `kind`: The kind of input:
- "ad": Active Directory
- "monitor": Files and directories
- "registry": Windows Registry
- "script": Scripts
- "splunktcp": TCP, processed
- "tcp": TCP, unprocessed
- "udp": UDP
- "win-event-log-collections": Windows event log
- "win-perfmon": Performance monitoring
- "win-wmi-collections": WMI
:type kind: ``string``
:return: The metadata.
:rtype: class:``splunklib.data.Record``
"""
response = self.get("%s/_new" % self._kindmap[kind])
content = _load_atom(response, MATCH_ENTRY_CONTENT)
return _parse_atom_metadata(content)
def _get_kind_list(self, subpath=None):
if subpath is None:
subpath = []
kinds = []
response = self.get('/'.join(subpath))
content = _load_atom_entries(response)
for entry in content:
this_subpath = subpath + [entry.title]
# The "all" endpoint doesn't work yet.
# The "tcp/ssl" endpoint is not a real input collection.
if entry.title == 'all' or this_subpath == ['tcp','ssl']:
continue
elif 'create' in [x.rel for x in entry.link]:
path = '/'.join(subpath + [entry.title])
kinds.append(path)
else:
subkinds = self._get_kind_list(subpath + [entry.title])
kinds.extend(subkinds)
return kinds
@property
def kinds(self):
"""Returns the input kinds on this Splunk instance.
:return: The list of input kinds.
:rtype: ``list``
"""
return self._get_kind_list()
def kindpath(self, kind):
"""Returns a path to the resources for a given input kind.
:param `kind`: The kind of input:
- "ad": Active Directory
- "monitor": Files and directories
- "registry": Windows Registry
- "script": Scripts
- "splunktcp": TCP, processed
- "tcp": TCP, unprocessed
- "udp": UDP
- "win-event-log-collections": Windows event log
- "win-perfmon": Performance monitoring
- "win-wmi-collections": WMI
:type kind: ``string``
:return: The relative endpoint path.
:rtype: ``string``
"""
if kind == 'tcp':
return UrlEncoded('tcp/raw', skip_encode=True)
elif kind == 'splunktcp':
return UrlEncoded('tcp/cooked', skip_encode=True)
else:
return UrlEncoded(kind, skip_encode=True)
def list(self, *kinds, **kwargs):
"""Returns a list of inputs that are in the :class:`Inputs` collection.
You can also filter by one or more input kinds.
This function iterates over all possible inputs, regardless of any arguments you
specify. Because the :class:`Inputs` collection is the union of all the inputs of each
kind, this method implements parameters such as "count", "search", and so
on at the Python level once all the data has been fetched. The exception
is when you specify a single input kind, and then this method makes a single request
with the usual semantics for parameters.
:param kinds: The input kinds to return (optional).
- "ad": Active Directory
- "monitor": Files and directories
- "registry": Windows Registry
- "script": Scripts
- "splunktcp": TCP, processed
- "tcp": TCP, unprocessed
- "udp": UDP
- "win-event-log-collections": Windows event log
- "win-perfmon": Performance monitoring
- "win-wmi-collections": WMI
:type kinds: ``string``
:param kwargs: Additional arguments (optional):
- "count" (``integer``): The maximum number of items to return.
- "offset" (``integer``): The offset of the first item to return.
- "search" (``string``): The search query to filter responses.
- "sort_dir" (``string``): The direction to sort returned items:
"asc" or "desc".
- "sort_key" (``string``): The field to use for sorting (optional).
- "sort_mode" (``string``): The collating sequence for sorting
returned items: "auto", "alpha", "alpha_case", or "num".
:type kwargs: ``dict``
:return: A list of input kinds.
:rtype: ``list``
"""
if len(kinds) == 0:
kinds = self.kinds
if len(kinds) == 1:
kind = kinds[0]
logging.debug("Inputs.list taking short circuit branch for single kind.")
path = self.kindpath(kind)
logging.debug("Path for inputs: %s", path)
try:
path = UrlEncoded(path, skip_encode=True)
response = self.get(path, **kwargs)
except HTTPError as he:
if he.status == 404: # No inputs of this kind
return []
entities = []
entries = _load_atom_entries(response)
if entries is None:
return [] # No inputs in a collection comes back with no feed or entry in the XML
for entry in entries:
state = _parse_atom_entry(entry)
# Unquote the URL, since all URL encoded in the SDK
# should be of type UrlEncoded, and all str should not
# be URL encoded.
path = urllib.parse.unquote(state.links.alternate)
entity = Input(self.service, path, kind, state=state)
entities.append(entity)
return entities
search = kwargs.get('search', '*')
entities = []
for kind in kinds:
response = None
try:
kind = UrlEncoded(kind, skip_encode=True)
response = self.get(self.kindpath(kind), search=search)
except HTTPError as e:
if e.status == 404:
continue # No inputs of this kind
else:
raise
entries = _load_atom_entries(response)
if entries is None: continue # No inputs to process
for entry in entries:
state = _parse_atom_entry(entry)
# Unquote the URL, since all URL encoded in the SDK
# should be of type UrlEncoded, and all str should not
# be URL encoded.
path = urllib.parse.unquote(state.links.alternate)
entity = Input(self.service, path, kind, state=state)
entities.append(entity)
if 'offset' in kwargs:
entities = entities[kwargs['offset']:]
if 'count' in kwargs:
entities = entities[:kwargs['count']]
if kwargs.get('sort_mode', None) == 'alpha':
sort_field = kwargs.get('sort_field', 'name')
if sort_field == 'name':
f = lambda x: x.name.lower()
else:
f = lambda x: x[sort_field].lower()
entities = sorted(entities, key=f)
if kwargs.get('sort_mode', None) == 'alpha_case':
sort_field = kwargs.get('sort_field', 'name')
if sort_field == 'name':
f = lambda x: x.name
else:
f = lambda x: x[sort_field]
entities = sorted(entities, key=f)
if kwargs.get('sort_dir', 'asc') == 'desc':
entities = list(reversed(entities))
return entities
def __iter__(self, **kwargs):
for item in self.iter(**kwargs):
yield item
def iter(self, **kwargs):
""" Iterates over the collection of inputs.
:param kwargs: Additional arguments (optional):
- "count" (``integer``): The maximum number of items to return.
- "offset" (``integer``): The offset of the first item to return.
- "search" (``string``): The search query to filter responses.
- "sort_dir" (``string``): The direction to sort returned items:
"asc" or "desc".
- "sort_key" (``string``): The field to use for sorting (optional).
- "sort_mode" (``string``): The collating sequence for sorting
returned items: "auto", "alpha", "alpha_case", or "num".
:type kwargs: ``dict``
"""
for item in self.list(**kwargs):
yield item
def oneshot(self, path, **kwargs):
""" Creates a oneshot data input, which is an upload of a single file
for one-time indexing.
:param path: The path and filename.
:type path: ``string``
:param kwargs: Additional arguments (optional). For more about the
available parameters, see `Input parameters <http://dev.splunk.com/view/SP-CAAAEE6#inputparams>`_ on Splunk Developer Portal.
:type kwargs: ``dict``
"""
self.post('oneshot', name=path, **kwargs)
class Job(Entity):
"""This class represents a search job."""
def __init__(self, service, sid, **kwargs):
path = PATH_JOBS + sid
Entity.__init__(self, service, path, skip_refresh=True, **kwargs)
self.sid = sid
# The Job entry record is returned at the root of the response
def _load_atom_entry(self, response):
return _load_atom(response).entry
def cancel(self):
"""Stops the current search and deletes the results cache.
:return: The :class:`Job`.
"""
try:
self.post("control", action="cancel")
except HTTPError as he:
if he.status == 404:
# The job has already been cancelled, so
# cancelling it twice is a nop.
pass
else:
raise
return self
def disable_preview(self):
"""Disables preview for this job.
:return: The :class:`Job`.
"""
self.post("control", action="disablepreview")
return self
def enable_preview(self):
"""Enables preview for this job.
**Note**: Enabling preview might slow search considerably.
:return: The :class:`Job`.
"""
self.post("control", action="enablepreview")
return self
def events(self, **kwargs):
"""Returns a streaming handle to this job's events.
:param kwargs: Additional parameters (optional). For a list of valid
parameters, see `GET search/jobs/{search_id}/events
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fevents>`_
in the REST API documentation.
:type kwargs: ``dict``
:return: The ``InputStream`` IO handle to this job's events.
"""
kwargs['segmentation'] = kwargs.get('segmentation', 'none')
return self.get("events", **kwargs).body
def finalize(self):
"""Stops the job and provides intermediate results for retrieval.
:return: The :class:`Job`.
"""
self.post("control", action="finalize")
return self
def is_done(self):
"""Indicates whether this job finished running.
:return: ``True`` if the job is done, ``False`` if not.
:rtype: ``boolean``
"""
if not self.is_ready():
return False
done = (self._state.content['isDone'] == '1')
return done
def is_ready(self):
"""Indicates whether this job is ready for querying.
:return: ``True`` if the job is ready, ``False`` if not.
:rtype: ``boolean``
"""
response = self.get()
if response.status == 204:
return False
self._state = self.read(response)
ready = self._state.content['dispatchState'] not in ['QUEUED', 'PARSING']
return ready
@property
def name(self):
"""Returns the name of the search job, which is the search ID (SID).
:return: The search ID.
:rtype: ``string``
"""
return self.sid
def pause(self):
"""Suspends the current search.
:return: The :class:`Job`.
"""
self.post("control", action="pause")
return self
def results(self, **query_params):
"""Returns a streaming handle to this job's search results. To get a
nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`,
as in::
import splunklib.client as client
import splunklib.results as results
from time import sleep
service = client.connect(...)
job = service.jobs.create("search * | head 5")
while not job.is_done():
sleep(.2)
rr = results.ResultsReader(job.results())
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
Results are not available until the job has finished. If called on
an unfinished job, the result is an empty event set.
This method makes a single roundtrip
to the server, plus at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param query_params: Additional parameters (optional). For a list of valid
parameters, see `GET search/jobs/{search_id}/results
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults>`_.
:type query_params: ``dict``
:return: The ``InputStream`` IO handle to this job's results.
"""
query_params['segmentation'] = query_params.get('segmentation', 'none')
return self.get("results", **query_params).body
def preview(self, **query_params):
"""Returns a streaming handle to this job's preview search results.
Unlike :class:`splunklib.results.ResultsReader`, which requires a job to
be finished to
return any results, the ``preview`` method returns any results that have
been generated so far, whether the job is running or not. The
returned search results are the raw data from the server. Pass
the handle returned to :class:`splunklib.results.ResultsReader` to get a
nice, Pythonic iterator over objects, as in::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
job = service.jobs.create("search * | head 5")
rr = results.ResultsReader(job.preview())
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
if rr.is_preview:
print "Preview of a running search job."
else:
print "Job is finished. Results are final."
This method makes one roundtrip to the server, plus at most
two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param query_params: Additional parameters (optional). For a list of valid
parameters, see `GET search/jobs/{search_id}/results_preview
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults_preview>`_
in the REST API documentation.
:type query_params: ``dict``
:return: The ``InputStream`` IO handle to this job's preview results.
"""
query_params['segmentation'] = query_params.get('segmentation', 'none')
return self.get("results_preview", **query_params).body
def searchlog(self, **kwargs):
"""Returns a streaming handle to this job's search log.
:param `kwargs`: Additional parameters (optional). For a list of valid
parameters, see `GET search/jobs/{search_id}/search.log
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fsearch.log>`_
in the REST API documentation.
:type kwargs: ``dict``
:return: The ``InputStream`` IO handle to this job's search log.
"""
return self.get("search.log", **kwargs).body
def set_priority(self, value):
"""Sets this job's search priority in the range of 0-10.
Higher numbers indicate higher priority. Unless splunkd is
running as *root*, you can only decrease the priority of a running job.
:param `value`: The search priority.
:type value: ``integer``
:return: The :class:`Job`.
"""
self.post('control', action="setpriority", priority=value)
return self
def summary(self, **kwargs):
"""Returns a streaming handle to this job's summary.
:param `kwargs`: Additional parameters (optional). For a list of valid
parameters, see `GET search/jobs/{search_id}/summary
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fsummary>`_
in the REST API documentation.
:type kwargs: ``dict``
:return: The ``InputStream`` IO handle to this job's summary.
"""
return self.get("summary", **kwargs).body
def timeline(self, **kwargs):
"""Returns a streaming handle to this job's timeline results.
:param `kwargs`: Additional timeline arguments (optional). For a list of valid
parameters, see `GET search/jobs/{search_id}/timeline
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Ftimeline>`_
in the REST API documentation.
:type kwargs: ``dict``
:return: The ``InputStream`` IO handle to this job's timeline.
"""
return self.get("timeline", **kwargs).body
def touch(self):
"""Extends the expiration time of the search to the current time (now) plus
the time-to-live (ttl) value.
:return: The :class:`Job`.
"""
self.post("control", action="touch")
return self
def set_ttl(self, value):
"""Set the job's time-to-live (ttl) value, which is the time before the
search job expires and is still available.
:param `value`: The ttl value, in seconds.
:type value: ``integer``
:return: The :class:`Job`.
"""
self.post("control", action="setttl", ttl=value)
return self
def unpause(self):
"""Resumes the current search, if paused.
:return: The :class:`Job`.
"""
self.post("control", action="unpause")
return self
class Jobs(Collection):
"""This class represents a collection of search jobs. Retrieve this
collection using :meth:`Service.jobs`."""
def __init__(self, service):
Collection.__init__(self, service, PATH_JOBS, item=Job)
# The count value to say list all the contents of this
# Collection is 0, not -1 as it is on most.
self.null_count = 0
def _load_list(self, response):
# Overridden because Job takes a sid instead of a path.
entries = _load_atom_entries(response)
if entries is None: return []
entities = []
for entry in entries:
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
entry['content']['sid'],
state=state)
entities.append(entity)
return entities
def create(self, query, **kwargs):
""" Creates a search using a search query and any additional parameters
you provide.
:param query: The search query.
:type query: ``string``
:param kwargs: Additiona parameters (optional). For a list of available
parameters, see `Search job parameters
<http://dev.splunk.com/view/SP-CAAAEE5#searchjobparams>`_
on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The :class:`Job`.
"""
if kwargs.get("exec_mode", None) == "oneshot":
raise TypeError("Cannot specify exec_mode=oneshot; use the oneshot method instead.")
response = self.post(search=query, **kwargs)
sid = _load_sid(response)
return Job(self.service, sid)
def export(self, query, **params):
"""Runs a search and immediately starts streaming preview events.
This method returns a streaming handle to this job's events as an XML
document from the server. To parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.export("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
Running an export search is more efficient as it streams the results
directly to you, rather than having to write them out to disk and make
them available later. As soon as results are ready, you will receive
them.
The ``export`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`preview`), plus at most two
more if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises `ValueError`: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional). For a list of valid
parameters, see `GET search/jobs/export
<http://docs/Documentation/Splunk/latest/RESTAPI/RESTsearch#search.2Fjobs.2Fexport>`_
in the REST API documentation.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server.
"""
if "exec_mode" in params:
raise TypeError("Cannot specify an exec_mode to export.")
params['segmentation'] = params.get('segmentation', 'none')
return self.post(path_segment="export",
search=query,
**params).body
def itemmeta(self):
"""There is no metadata available for class:``Jobs``.
Any call to this method raises a class:``NotSupportedError``.
:raises: class:``NotSupportedError``
"""
raise NotSupportedError()
def oneshot(self, query, **params):
"""Run a oneshot search and returns a streaming handle to the results.
The ``InputStream`` object streams XML fragments from the server. To
parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
The ``oneshot`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`results`), plus at most two more
if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises ValueError: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional):
- "output_mode": Specifies the output format of the results (XML,
JSON, or CSV).
- "earliest_time": Specifies the earliest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "latest_time": Specifies the latest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "rf": Specifies one or more fields to add to the search.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server.
"""
if "exec_mode" in params:
raise TypeError("Cannot specify an exec_mode to oneshot.")
params['segmentation'] = params.get('segmentation', 'none')
return self.post(search=query,
exec_mode="oneshot",
**params).body
class Loggers(Collection):
"""This class represents a collection of service logging categories.
Retrieve this collection using :meth:`Service.loggers`."""
def __init__(self, service):
Collection.__init__(self, service, PATH_LOGGER)
def itemmeta(self):
"""There is no metadata available for class:``Loggers``.
Any call to this method raises a class:``NotSupportedError``.
:raises: class:``NotSupportedError``
"""
raise NotSupportedError()
class Message(Entity):
def __init__(self, service, path, **kwargs):
Entity.__init__(self, service, path, **kwargs)
@property
def value(self):
"""Returns the message value.
:return: The message value.
:rtype: ``string``
"""
return self[self.name]
class ModularInputKind(Entity):
"""This class contains the different types of modular inputs. Retrieve this
collection using :meth:`Service.modular_input_kinds`.
"""
def __contains__(self, name):
args = self.state.content['endpoints']['args']
if name in args:
return True
else:
return Entity.__contains__(self, name)
def __getitem__(self, name):
args = self.state.content['endpoint']['args']
if name in args:
return args['item']
else:
return Entity.__getitem__(self, name)
@property
def arguments(self):
"""A dictionary of all the arguments supported by this modular input kind.
The keys in the dictionary are the names of the arguments. The values are
another dictionary giving the metadata about that argument. The possible
keys in that dictionary are ``"title"``, ``"description"``, ``"required_on_create``",
``"required_on_edit"``, ``"data_type"``. Each value is a string. It should be one
of ``"true"`` or ``"false"`` for ``"required_on_create"`` and ``"required_on_edit"``,
and one of ``"boolean"``, ``"string"``, or ``"number``" for ``"data_type"``.
:return: A dictionary describing the arguments this modular input kind takes.
:rtype: ``dict``
"""
return self.state.content['endpoint']['args']
def update(self, **kwargs):
"""Raises an error. Modular input kinds are read only."""
raise IllegalOperationException("Modular input kinds cannot be updated via the REST API.")
class SavedSearch(Entity):
"""This class represents a saved search."""
def __init__(self, service, path, **kwargs):
Entity.__init__(self, service, path, **kwargs)
def acknowledge(self):
"""Acknowledges the suppression of alerts from this saved search and
resumes alerting.
:return: The :class:`SavedSearch`.
"""
self.post("acknowledge")
return self
@property
def alert_count(self):
"""Returns the number of alerts fired by this saved search.
:return: The number of alerts fired by this saved search.
:rtype: ``integer``
"""
return int(self._state.content.get('triggered_alert_count', 0))
def dispatch(self, **kwargs):
"""Runs the saved search and returns the resulting search job.
:param `kwargs`: Additional dispatch arguments (optional). For details,
see the `POST saved/searches/{name}/dispatch
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#POST_saved.2Fsearches.2F.7Bname.7D.2Fdispatch>`_
endpoint in the REST API documentation.
:type kwargs: ``dict``
:return: The :class:`Job`.
"""
response = self.post("dispatch", **kwargs)
sid = _load_sid(response)
return Job(self.service, sid)
@property
def fired_alerts(self):
"""Returns the collection of fired alerts (a fired alert group)
corresponding to this saved search's alerts.
:raises IllegalOperationException: Raised when the search is not scheduled.
:return: A collection of fired alerts.
:rtype: :class:`AlertGroup`
"""
if self['is_scheduled'] == '0':
raise IllegalOperationException('Unscheduled saved searches have no alerts.')
c = Collection(
self.service,
self.service._abspath(PATH_FIRED_ALERTS + self.name,
owner=self._state.access.owner,
app=self._state.access.app,
sharing=self._state.access.sharing),
item=AlertGroup)
return c
def history(self):
"""Returns a list of search jobs corresponding to this saved search.
:return: A list of :class:`Job` objects.
"""
response = self.get("history")
entries = _load_atom_entries(response)
if entries is None: return []
jobs = []
for entry in entries:
job = Job(self.service, entry.title)
jobs.append(job)
return jobs
def update(self, search=None, **kwargs):
"""Updates the server with any changes you've made to the current saved
search along with any additional arguments you specify.
:param `search`: The search query (optional).
:type search: ``string``
:param `kwargs`: Additional arguments (optional). For a list of available
parameters, see `Saved search parameters
<http://dev.splunk.com/view/SP-CAAAEE5#savedsearchparams>`_
on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The :class:`SavedSearch`.
"""
# Updates to a saved search *require* that the search string be
# passed, so we pass the current search string if a value wasn't
# provided by the caller.
if search is None: search = self.content.search
Entity.update(self, search=search, **kwargs)
return self
def scheduled_times(self, earliest_time='now', latest_time='+1h'):
"""Returns the times when this search is scheduled to run.
By default this method returns the times in the next hour. For different
time ranges, set *earliest_time* and *latest_time*. For example,
for all times in the last day use "earliest_time=-1d" and
"latest_time=now".
:param earliest_time: The earliest time.
:type earliest_time: ``string``
:param latest_time: The latest time.
:type latest_time: ``string``
:return: The list of search times.
"""
response = self.get("scheduled_times",
earliest_time=earliest_time,
latest_time=latest_time)
data = self._load_atom_entry(response)
rec = _parse_atom_entry(data)
times = [datetime.fromtimestamp(int(t))
for t in rec.content.scheduled_times]
return times
def suppress(self, expiration):
"""Skips any scheduled runs of this search in the next *expiration*
number of seconds.
:param expiration: The expiration period, in seconds.
:type expiration: ``integer``
:return: The :class:`SavedSearch`.
"""
self.post("suppress", expiration=expiration)
return self
@property
def suppressed(self):
"""Returns the number of seconds that this search is blocked from running
(possibly 0).
:return: The number of seconds.
:rtype: ``integer``
"""
r = self._run_action("suppress")
if r.suppressed == "1":
return int(r.expiration)
else:
return 0
def unsuppress(self):
"""Cancels suppression and makes this search run as scheduled.
:return: The :class:`SavedSearch`.
"""
self.post("suppress", expiration="0")
return self
class SavedSearches(Collection):
"""This class represents a collection of saved searches. Retrieve this
collection using :meth:`Service.saved_searches`."""
def __init__(self, service):
Collection.__init__(
self, service, PATH_SAVED_SEARCHES, item=SavedSearch)
def create(self, name, search, **kwargs):
""" Creates a saved search.
:param name: The name for the saved search.
:type name: ``string``
:param search: The search query.
:type search: ``string``
:param kwargs: Additional arguments (optional). For a list of available
parameters, see `Saved search parameters
<http://dev.splunk.com/view/SP-CAAAEE5#savedsearchparams>`_
on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The :class:`SavedSearches` collection.
"""
return Collection.create(self, name, search=search, **kwargs)
class Settings(Entity):
"""This class represents configuration settings for a Splunk service.
Retrieve this collection using :meth:`Service.settings`."""
def __init__(self, service, **kwargs):
Entity.__init__(self, service, "/services/server/settings", **kwargs)
# Updates on the settings endpoint are POSTed to server/settings/settings.
def update(self, **kwargs):
"""Updates the settings on the server using the arguments you provide.
:param kwargs: Additional arguments. For a list of valid arguments, see
`POST server/settings/{name}
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsystem#POST_server.2Fsettings.2F.7Bname.7D>`_
in the REST API documentation.
:type kwargs: ``dict``
:return: The :class:`Settings` collection.
"""
self.service.post("/services/server/settings/settings", **kwargs)
return self
class User(Entity):
"""This class represents a Splunk user.
"""
@property
def role_entities(self):
"""Returns a list of roles assigned to this user.
:return: The list of roles.
:rtype: ``list``
"""
return [self.service.roles[name] for name in self.content.roles]
# Splunk automatically lowercases new user names so we need to match that
# behavior here to ensure that the subsequent member lookup works correctly.
class Users(Collection):
"""This class represents the collection of Splunk users for this instance of
Splunk. Retrieve this collection using :meth:`Service.users`.
"""
def __init__(self, service):
Collection.__init__(self, service, PATH_USERS, item=User)
def __getitem__(self, key):
return Collection.__getitem__(self, key.lower())
def __contains__(self, name):
return Collection.__contains__(self, name.lower())
def create(self, username, password, roles, **params):
"""Creates a new user.
This function makes two roundtrips to the server, plus at most
two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param username: The username.
:type username: ``string``
:param password: The password.
:type password: ``string``
:param roles: A single role or list of roles for the user.
:type roles: ``string`` or ``list``
:param params: Additional arguments (optional). For a list of available
parameters, see `User authentication parameters
<http://dev.splunk.com/view/SP-CAAAEJ6#userauthparams>`_
on Splunk Developer Portal.
:type params: ``dict``
:return: The new user.
:rtype: :class:`User`
**Example**::
import splunklib.client as client
c = client.connect(...)
users = c.users
boris = users.create("boris", "securepassword", roles="user")
hilda = users.create("hilda", "anotherpassword", roles=["user","power"])
"""
if not isinstance(username, six.string_types):
raise ValueError("Invalid username: %s" % str(username))
username = username.lower()
self.post(name=username, password=password, roles=roles, **params)
# splunkd doesn't return the user in the POST response body,
# so we have to make a second round trip to fetch it.
response = self.get(username)
entry = _load_atom(response, XNAME_ENTRY).entry
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
urllib.parse.unquote(state.links.alternate),
state=state)
return entity
def delete(self, name):
""" Deletes the user and returns the resulting collection of users.
:param name: The name of the user to delete.
:type name: ``string``
:return:
:rtype: :class:`Users`
"""
return Collection.delete(self, name.lower())
class Role(Entity):
"""This class represents a user role.
"""
def grant(self, *capabilities_to_grant):
"""Grants additional capabilities to this role.
:param capabilities_to_grant: Zero or more capabilities to grant this
role. For a list of capabilities, see
`Capabilities <http://dev.splunk.com/view/SP-CAAAEJ6#capabilities>`_
on Splunk Developer Portal.
:type capabilities_to_grant: ``string`` or ``list``
:return: The :class:`Role`.
**Example**::
service = client.connect(...)
role = service.roles['somerole']
role.grant('change_own_password', 'search')
"""
possible_capabilities = self.service.capabilities
for capability in capabilities_to_grant:
if capability not in possible_capabilities:
raise NoSuchCapability(capability)
new_capabilities = self['capabilities'] + list(capabilities_to_grant)
self.post(capabilities=new_capabilities)
return self
def revoke(self, *capabilities_to_revoke):
"""Revokes zero or more capabilities from this role.
:param capabilities_to_revoke: Zero or more capabilities to grant this
role. For a list of capabilities, see
`Capabilities <http://dev.splunk.com/view/SP-CAAAEJ6#capabilities>`_
on Splunk Developer Portal.
:type capabilities_to_revoke: ``string`` or ``list``
:return: The :class:`Role`.
**Example**::
service = client.connect(...)
role = service.roles['somerole']
role.revoke('change_own_password', 'search')
"""
possible_capabilities = self.service.capabilities
for capability in capabilities_to_revoke:
if capability not in possible_capabilities:
raise NoSuchCapability(capability)
old_capabilities = self['capabilities']
new_capabilities = []
for c in old_capabilities:
if c not in capabilities_to_revoke:
new_capabilities.append(c)
if new_capabilities == []:
new_capabilities = '' # Empty lists don't get passed in the body, so we have to force an empty argument.
self.post(capabilities=new_capabilities)
return self
class Roles(Collection):
"""This class represents the collection of roles in the Splunk instance.
Retrieve this collection using :meth:`Service.roles`."""
def __init__(self, service):
return Collection.__init__(self, service, PATH_ROLES, item=Role)
def __getitem__(self, key):
return Collection.__getitem__(self, key.lower())
def __contains__(self, name):
return Collection.__contains__(self, name.lower())
def create(self, name, **params):
"""Creates a new role.
This function makes two roundtrips to the server, plus at most
two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param name: Name for the role.
:type name: ``string``
:param params: Additional arguments (optional). For a list of available
parameters, see `Roles parameters
<http://dev.splunk.com/view/SP-CAAAEJ6#rolesparams>`_
on Splunk Developer Portal.
:type params: ``dict``
:return: The new role.
:rtype: :class:`Role`
**Example**::
import splunklib.client as client
c = client.connect(...)
roles = c.roles
paltry = roles.create("paltry", imported_roles="user", defaultApp="search")
"""
if not isinstance(name, six.string_types):
raise ValueError("Invalid role name: %s" % str(name))
name = name.lower()
self.post(name=name, **params)
# splunkd doesn't return the user in the POST response body,
# so we have to make a second round trip to fetch it.
response = self.get(name)
entry = _load_atom(response, XNAME_ENTRY).entry
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
urllib.parse.unquote(state.links.alternate),
state=state)
return entity
def delete(self, name):
""" Deletes the role and returns the resulting collection of roles.
:param name: The name of the role to delete.
:type name: ``string``
:rtype: The :class:`Roles`
"""
return Collection.delete(self, name.lower())
class Application(Entity):
"""Represents a locally-installed Splunk app."""
@property
def setupInfo(self):
"""Returns the setup information for the app.
:return: The setup information.
"""
return self.content.get('eai:setup', None)
def package(self):
""" Creates a compressed package of the app for archiving."""
return self._run_action("package")
def updateInfo(self):
"""Returns any update information that is available for the app."""
return self._run_action("update")
class KVStoreCollections(Collection):
def __init__(self, service):
Collection.__init__(self, service, 'storage/collections/config', item=KVStoreCollection)
def create(self, name, indexes = {}, fields = {}, **kwargs):
"""Creates a KV Store Collection.
:param name: name of collection to create
:type name: ``string``
:param indexes: dictionary of index definitions
:type indexes: ``dict``
:param fields: dictionary of field definitions
:type fields: ``dict``
:param kwargs: a dictionary of additional parameters specifying indexes and field definitions
:type kwargs: ``dict``
:return: Result of POST request
"""
for k, v in six.iteritems(indexes):
if isinstance(v, dict):
v = json.dumps(v)
kwargs['index.' + k] = v
for k, v in six.iteritems(fields):
kwargs['field.' + k] = v
return self.post(name=name, **kwargs)
class KVStoreCollection(Entity):
@property
def data(self):
"""Returns data object for this Collection.
:rtype: :class:`KVStoreCollectionData`
"""
return KVStoreCollectionData(self)
def update_index(self, name, value):
"""Changes the definition of a KV Store index.
:param name: name of index to change
:type name: ``string``
:param value: new index definition
:type value: ``dict`` or ``string``
:return: Result of POST request
"""
kwargs = {}
kwargs['index.' + name] = value if isinstance(value, basestring) else json.dumps(value)
return self.post(**kwargs)
def update_field(self, name, value):
"""Changes the definition of a KV Store field.
:param name: name of field to change
:type name: ``string``
:param value: new field definition
:type value: ``string``
:return: Result of POST request
"""
kwargs = {}
kwargs['field.' + name] = value
return self.post(**kwargs)
class KVStoreCollectionData(object):
"""This class represents the data endpoint for a KVStoreCollection.
Retrieve using :meth:`KVStoreCollection.data`
"""
JSON_HEADER = [('Content-Type', 'application/json')]
def __init__(self, collection):
self.service = collection.service
self.collection = collection
self.owner, self.app, self.sharing = collection._proper_namespace()
self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name) + '/'
def _get(self, url, **kwargs):
return self.service.get(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs)
def _post(self, url, **kwargs):
return self.service.post(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs)
def _delete(self, url, **kwargs):
return self.service.delete(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs)
def query(self, **query):
"""
Gets the results of query, with optional parameters sort, limit, skip, and fields.
:param query: Optional parameters. Valid options are sort, limit, skip, and fields
:type query: ``dict``
:return: Array of documents retrieved by query.
:rtype: ``array``
"""
return json.loads(self._get('', **query).body.read().decode('utf-8'))
def query_by_id(self, id):
"""
Returns object with _id = id.
:param id: Value for ID. If not a string will be coerced to string.
:type id: ``string``
:return: Document with id
:rtype: ``dict``
"""
return json.loads(self._get(UrlEncoded(str(id))).body.read().decode('utf-8'))
def insert(self, data):
"""
Inserts item into this collection. An _id field will be generated if not assigned in the data.
:param data: Document to insert
:type data: ``string``
:return: _id of inserted object
:rtype: ``dict``
"""
return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def delete(self, query=None):
"""
Deletes all data in collection if query is absent. Otherwise, deletes all data matched by query.
:param query: Query to select documents to delete
:type query: ``string``
:return: Result of DELETE request
"""
return self._delete('', **({'query': query}) if query else {})
def delete_by_id(self, id):
"""
Deletes document that has _id = id.
:param id: id of document to delete
:type id: ``string``
:return: Result of DELETE request
"""
return self._delete(UrlEncoded(str(id)))
def update(self, id, data):
"""
Replaces document with _id = id with data.
:param id: _id of document to update
:type id: ``string``
:param data: the new document to insert
:type data: ``string``
:return: id of replaced document
:rtype: ``dict``
"""
return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_find(self, *dbqueries):
"""
Returns array of results from queries dbqueries.
:param dbqueries: Array of individual queries as dictionaries
:type dbqueries: ``array`` of ``dict``
:return: Results of each query
:rtype: ``array`` of ``array``
"""
if len(dbqueries) < 1:
raise Exception('Must have at least one query.')
data = json.dumps(dbqueries)
return json.loads(self._post('batch_find', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_save(self, *documents):
"""
Inserts or updates every document specified in documents.
:param documents: Array of documents to save as dictionaries
:type documents: ``array`` of ``dict``
:return: Results of update operation as overall stats
:rtype: ``dict``
"""
if len(documents) < 1:
raise Exception('Must have at least one document.')
data = json.dumps(documents)
return json.loads(self._post('batch_save', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
| mit |
ptcrypto/p2pool-adaptive | p2pool/util/variable.py | 270 | 2541 | import itertools
import weakref
from twisted.internet import defer, reactor
from twisted.python import failure, log
class Event(object):
def __init__(self):
self.observers = {}
self.id_generator = itertools.count()
self._once = None
self.times = 0
def run_and_watch(self, func):
func()
return self.watch(func)
def watch_weakref(self, obj, func):
# func must not contain a reference to obj!
watch_id = self.watch(lambda *args: func(obj_ref(), *args))
obj_ref = weakref.ref(obj, lambda _: self.unwatch(watch_id))
def watch(self, func):
id = self.id_generator.next()
self.observers[id] = func
return id
def unwatch(self, id):
self.observers.pop(id)
@property
def once(self):
res = self._once
if res is None:
res = self._once = Event()
return res
def happened(self, *event):
self.times += 1
once, self._once = self._once, None
for id, func in sorted(self.observers.iteritems()):
try:
func(*event)
except:
log.err(None, "Error while processing Event callbacks:")
if once is not None:
once.happened(*event)
def get_deferred(self, timeout=None):
once = self.once
df = defer.Deferred()
id1 = once.watch(lambda *event: df.callback(event))
if timeout is not None:
def do_timeout():
df.errback(failure.Failure(defer.TimeoutError('in Event.get_deferred')))
once.unwatch(id1)
once.unwatch(x)
delay = reactor.callLater(timeout, do_timeout)
x = once.watch(lambda *event: delay.cancel())
return df
class Variable(object):
def __init__(self, value):
self.value = value
self.changed = Event()
self.transitioned = Event()
def set(self, value):
if value == self.value:
return
oldvalue = self.value
self.value = value
self.changed.happened(value)
self.transitioned.happened(oldvalue, value)
@defer.inlineCallbacks
def get_when_satisfies(self, func):
while True:
if func(self.value):
defer.returnValue(self.value)
yield self.changed.once.get_deferred()
def get_not_none(self):
return self.get_when_satisfies(lambda val: val is not None)
| gpl-3.0 |
lancms/lancms2 | fabfile.py | 1 | 5721 | from fabric.api import *
from fabric.colors import green, red
from fabric.contrib import files
import datetime
import os
def _environment ():
env.release = datetime.datetime.now().strftime ("%Y-%m-%d-%H%M%S")
env.project_name = 'lancms2'
# FIXME: hardcoded path:
env.path_home = '/opt/lancms2/'
env.path_root = os.path.join (env.path_home, 'deployment/')
env.path_current = os.path.join (env.path_root, 'current')
env.path_releases = os.path.join (env.path_root, 'releases/')
env.path_full_release = os.path.join (env.path_releases, env.release)
env.path_full_release_local_settings = os.path.join (env.path_full_release, 'lancms2/local_settings.py')
env.path_full_release_local_sqlite = os.path.join (env.path_full_release, 'lancms2.sql')
env.path_apache2_sites_available = '/etc/apache2/sites-available/'
env.filename_apacheconf = 'apache2-wsgi-virtualhost.conf'
env.virenv = 'source %s/virtualenv/bin/activate' % env.path_root
# FIXME: hardcoded user and group:
env.owner_user = 'www-data'
env.owner_group = 'lancms2'
def _upload_and_unpack ():
# local is on local host
local ('bzr export --format=tgz %s.tar.gz' % env.release);
# run is on remote host!
run ('mkdir -p %s' % env.path_full_release)
# put places local file on remote server
put ('%s.tar.gz' % env.release, env.path_releases, mode=0750)
local ('rm -f %s.tar.gz' % env.release)
with cd ('%s' % env.path_releases):
run ('tar -xzf %s.tar.gz' % env.release)
run ('rm %s.tar.gz' % env.release)
print (green ('Uploaded and unpacked'))
def _create_virtualenv ():
with cd ('%s' % env.path_root):
run ('virtualenv virtualenv -p python3')
run ('source %svirtualenv/bin/activate' % env.path_root)
print (green ('Created (or recreated) virtual environment'))
def _set_release_permissions ():
sudo ('chown %s:%s -R %s' % (env.owner_user, env.owner_group, env.path_full_release), shell=False)
sudo ('chmod g+w -R %s' % (env.path_full_release), shell=False)
print (green ('Set permissions for www-data on %s' % env.path_full_release))
def _install_requirements ():
with cd ('%s' % env.path_full_release):
run ('source %svirtualenv/bin/activate; pip install -r requirements.txt' % env.path_root)
print (green ('Installed requirements in virtual environment'))
def _symlink_local_settings ():
path_file = os.path.join (env.path_home, 'LOCAL_SETTINGS.py')
if files.exists (path_file):
run ('ln -s %s %s' % (path_file, env.path_full_release_local_settings))
print (green ('Symlinked local_settings'))
def _symlink_local_sqlite ():
path_file = os.path.join (env.path_home, 'LANCMS2.sql')
if files.exists (path_file):
run ('ln -s %s %s' % (path_file, env.path_full_release_local_sqlite))
print (green ('Symlinked local sqlite'))
def _symlink_current_release ():
if files.exists (env.path_current):
run ('rm -f %s' % env.path_current)
print (red ('Removed symlink for previous release'))
run ('ln -s %s %s' % (env.path_full_release, env.path_current))
print (green ('Symlinked current release %s to %s' % (env.release, env.path_current)))
def _check_hosts ():
if not env.hosts or env.hosts == "":
import sys
print ""
print red("Missing hosts. Printing helptext.")
help ()
sys.exit ()
def _install_local_requirements ():
path_file = os.path.join (env.path_home, 'REQUIREMENTS.txt')
if files.exists (path_file):
with cd ('%s' % env.path_full_release):
run ('source %svirtualenv/bin/activate; pip install -r %s' % (env.path_root, path_file))
print (green ('Installed local requirements (%s) in virtual environment' % path_file))
else:
print (red ('No local requirements (%s)' % path_file))
def _syncdb ():
with cd (env.path_current):
run ('source %svirtualenv/bin/activate; ./manage.py syncdb --noinput' % env.path_root)
print (green ('Ran syncdb'))
def _migrate ():
with cd (env.path_current):
run ('source %svirtualenv/bin/activate; ./manage.py migrate' % env.path_root)
print (green ('Ran migrate'))
def _restart_webserver ():
# FIXME: this could be too Debian specific for real reuse. I don't know, haven't used anything but Debian in a long while. :-)
sudo ('/usr/sbin/service apache2 restart', shell=False)
print (green ('Restarted apache2'))
def _configure_webserver ():
path_sfile = os.path.join (env.path_current, env.filename_apacheconf)
if files.exists (path_sfile):
path_dfile = os.path.join (env.path_apache2_sites_available, env.project_name)
sudo ('/bin/cp -f %s %s' % (path_sfile, path_dfile), shell=False)
sudo ('/usr/sbin/a2ensite %s' % env.project_name, shell=False)
print (green ('Configured apache2 and activated site'))
else:
print (red ("Didn't configure apache2, no config file found."))
def _collectstatic ():
with cd (env.path_current):
run ('source %svirtualenv/bin/activate; ./manage.py collectstatic --noinput' % env.path_root)
print (green ('Ran collectstatic'))
def _put_revision_number ():
local ('bzr revno > /tmp/%s' % env.release)
put ('/tmp/%s' % env.release, '%s/.bzr_rev' % env.path_full_release, mode=0750)
local ('rm /tmp/%s' % env.release)
def deploy ():
_check_hosts ()
_environment ()
_upload_and_unpack ()
_create_virtualenv ()
_install_requirements ()
_install_local_requirements ()
_symlink_local_settings ()
_symlink_local_sqlite ()
_symlink_current_release ()
_syncdb ()
_migrate ()
_collectstatic ()
_configure_webserver ()
_restart_webserver ()
_put_revision_number ()
_set_release_permissions ()
def help ():
print ""
print "deployment script for lancms2"
print ""
print "Only available command is 'deploy'."
print "Remember to define host (-H username@127.0.0.1)"
print "Please don't use this if you don't know what it does! No warranties!"
| gpl-2.0 |
leafclick/intellij-community | plugins/hg4idea/testData/bin/mercurial/archival.py | 94 | 10394 | # archival.py - revision archival for mercurial
#
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
from node import hex
import match as matchmod
import cmdutil
import scmutil, util, encoding
import cStringIO, os, tarfile, time, zipfile
import zlib, gzip
import struct
import error
# from unzip source code:
_UNX_IFREG = 0x8000
_UNX_IFLNK = 0xa000
def tidyprefix(dest, kind, prefix):
'''choose prefix to use for names in archive. make sure prefix is
safe for consumers.'''
if prefix:
prefix = util.normpath(prefix)
else:
if not isinstance(dest, str):
raise ValueError('dest must be string if no prefix')
prefix = os.path.basename(dest)
lower = prefix.lower()
for sfx in exts.get(kind, []):
if lower.endswith(sfx):
prefix = prefix[:-len(sfx)]
break
lpfx = os.path.normpath(util.localpath(prefix))
prefix = util.pconvert(lpfx)
if not prefix.endswith('/'):
prefix += '/'
if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
raise util.Abort(_('archive prefix contains illegal components'))
return prefix
exts = {
'tar': ['.tar'],
'tbz2': ['.tbz2', '.tar.bz2'],
'tgz': ['.tgz', '.tar.gz'],
'zip': ['.zip'],
}
def guesskind(dest):
for kind, extensions in exts.iteritems():
if util.any(dest.endswith(ext) for ext in extensions):
return kind
return None
class tarit(object):
'''write archive to tar file or stream. can write uncompressed,
or compress with gzip or bzip2.'''
class GzipFileWithTime(gzip.GzipFile):
def __init__(self, *args, **kw):
timestamp = None
if 'timestamp' in kw:
timestamp = kw.pop('timestamp')
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
gzip.GzipFile.__init__(self, *args, **kw)
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
# Python 2.6 introduced self.name and deprecated self.filename
try:
fname = self.name
except AttributeError:
fname = self.filename
if fname and fname.endswith('.gz'):
fname = fname[:-3]
flags = 0
if fname:
flags = gzip.FNAME
self.fileobj.write(chr(flags))
gzip.write32u(self.fileobj, long(self.timestamp))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def __init__(self, dest, mtime, kind=''):
self.mtime = mtime
self.fileobj = None
def taropen(name, mode, fileobj=None):
if kind == 'gz':
mode = mode[0]
if not fileobj:
fileobj = open(name, mode + 'b')
gzfileobj = self.GzipFileWithTime(name, mode + 'b',
zlib.Z_BEST_COMPRESSION,
fileobj, timestamp=mtime)
self.fileobj = gzfileobj
return tarfile.TarFile.taropen(name, mode, gzfileobj)
else:
return tarfile.open(name, mode + kind, fileobj)
if isinstance(dest, str):
self.z = taropen(dest, mode='w:')
else:
# Python 2.5-2.5.1 have a regression that requires a name arg
self.z = taropen(name='', mode='w|', fileobj=dest)
def addfile(self, name, mode, islink, data):
i = tarfile.TarInfo(name)
i.mtime = self.mtime
i.size = len(data)
if islink:
i.type = tarfile.SYMTYPE
i.mode = 0777
i.linkname = data
data = None
i.size = 0
else:
i.mode = mode
data = cStringIO.StringIO(data)
self.z.addfile(i, data)
def done(self):
self.z.close()
if self.fileobj:
self.fileobj.close()
class tellable(object):
'''provide tell method for zipfile.ZipFile when writing to http
response file object.'''
def __init__(self, fp):
self.fp = fp
self.offset = 0
def __getattr__(self, key):
return getattr(self.fp, key)
def write(self, s):
self.fp.write(s)
self.offset += len(s)
def tell(self):
return self.offset
class zipit(object):
'''write archive to zip file or stream. can write uncompressed,
or compressed with deflate.'''
def __init__(self, dest, mtime, compress=True):
if not isinstance(dest, str):
try:
dest.tell()
except (AttributeError, IOError):
dest = tellable(dest)
self.z = zipfile.ZipFile(dest, 'w',
compress and zipfile.ZIP_DEFLATED or
zipfile.ZIP_STORED)
# Python's zipfile module emits deprecation warnings if we try
# to store files with a date before 1980.
epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
if mtime < epoch:
mtime = epoch
self.mtime = mtime
self.date_time = time.gmtime(mtime)[:6]
def addfile(self, name, mode, islink, data):
i = zipfile.ZipInfo(name, self.date_time)
i.compress_type = self.z.compression
# unzip will not honor unix file modes unless file creator is
# set to unix (id 3).
i.create_system = 3
ftype = _UNX_IFREG
if islink:
mode = 0777
ftype = _UNX_IFLNK
i.external_attr = (mode | ftype) << 16L
# add "extended-timestamp" extra block, because zip archives
# without this will be extracted with unexpected timestamp,
# if TZ is not configured as GMT
i.extra += struct.pack('<hhBl',
0x5455, # block type: "extended-timestamp"
1 + 4, # size of this block
1, # "modification time is present"
int(self.mtime)) # last modification (UTC)
self.z.writestr(i, data)
def done(self):
self.z.close()
class fileit(object):
'''write archive as files in directory.'''
def __init__(self, name, mtime):
self.basedir = name
self.opener = scmutil.opener(self.basedir)
def addfile(self, name, mode, islink, data):
if islink:
self.opener.symlink(data, name)
return
f = self.opener(name, "w", atomictemp=True)
f.write(data)
f.close()
destfile = os.path.join(self.basedir, name)
os.chmod(destfile, mode)
def done(self):
pass
archivers = {
'files': fileit,
'tar': tarit,
'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
'uzip': lambda name, mtime: zipit(name, mtime, False),
'zip': zipit,
}
def archive(repo, dest, node, kind, decode=True, matchfn=None,
prefix=None, mtime=None, subrepos=False):
'''create archive of repo as it was at node.
dest can be name of directory, name of archive file, or file
object to write archive to.
kind is type of archive to create.
decode tells whether to put files through decode filters from
hgrc.
matchfn is function to filter names of files to write to archive.
prefix is name of path to put before every archive member.'''
if kind == 'files':
if prefix:
raise util.Abort(_('cannot give prefix when archiving to files'))
else:
prefix = tidyprefix(dest, kind, prefix)
def write(name, mode, islink, getdata):
data = getdata()
if decode:
data = repo.wwritedata(name, data)
archiver.addfile(prefix + name, mode, islink, data)
if kind not in archivers:
raise util.Abort(_("unknown archive type '%s'") % kind)
ctx = repo[node]
archiver = archivers[kind](dest, mtime or ctx.date()[0])
if repo.ui.configbool("ui", "archivemeta", True):
def metadata():
base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch()))
tags = ''.join('tag: %s\n' % t for t in ctx.tags()
if repo.tagtype(t) == 'global')
if not tags:
repo.ui.pushbuffer()
opts = {'template': '{latesttag}\n{latesttagdistance}',
'style': '', 'patch': None, 'git': None}
cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
ltags, dist = repo.ui.popbuffer().split('\n')
tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
tags += 'latesttagdistance: %s\n' % dist
return base + tags
name = '.hg_archival.txt'
if not matchfn or matchfn(name):
write(name, 0644, False, metadata)
if matchfn:
files = [f for f in ctx.manifest().keys() if matchfn(f)]
else:
files = ctx.manifest().keys()
total = len(files)
if total:
files.sort()
repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
for i, f in enumerate(files):
ff = ctx.flags(f)
write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
repo.ui.progress(_('archiving'), i + 1, item=f,
unit=_('files'), total=total)
repo.ui.progress(_('archiving'), None)
if subrepos:
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
submatch = matchmod.narrowmatcher(subpath, matchfn)
total += sub.archive(repo.ui, archiver, prefix, submatch)
if total == 0:
raise error.Abort(_('no files match the archive pattern'))
archiver.done()
return total
| apache-2.0 |
TalShafir/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_igw_facts.py | 44 | 4662 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_igw_facts
short_description: Gather facts about internet gateways in AWS
description:
- Gather facts about internet gateways in AWS.
version_added: "2.3"
requirements: [ boto3 ]
author: "Nick Aslanidis (@naslanidis)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters.
internet_gateway_ids:
description:
- Get details of specific Internet Gateway ID. Provide this value as a list.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather facts about all Internet Gateways for an account or profile
ec2_vpc_igw_facts:
region: ap-southeast-2
profile: production
register: igw_facts
- name: Gather facts about a filtered list of Internet Gateways
ec2_vpc_igw_facts:
region: ap-southeast-2
profile: production
filters:
"tag:Name": "igw-123"
register: igw_facts
- name: Gather facts about a specific internet gateway by InternetGatewayId
ec2_vpc_igw_facts:
region: ap-southeast-2
profile: production
internet_gateway_ids: igw-c1231234
register: igw_facts
'''
RETURN = '''
internet_gateways:
description: The internet gateways for the account.
returned: always
type: list
sample: [
{
"attachments": [
{
"state": "available",
"vpc_id": "vpc-02123b67"
}
],
"internet_gateway_id": "igw-2123634d",
"tags": [
{
"key": "Name",
"value": "test-vpc-20-igw"
}
]
}
]
changed:
description: True if listing the internet gateways succeeds.
type: bool
returned: always
sample: "false"
'''
try:
import botocore
except ImportError:
pass # will be captured by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, HAS_BOTO3)
def get_internet_gateway_info(internet_gateway):
internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'],
'Attachments': internet_gateway['Attachments'],
'Tags': internet_gateway['Tags']}
return internet_gateway_info
def list_internet_gateways(client, module):
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
if module.params.get("internet_gateway_ids"):
params['InternetGatewayIds'] = module.params.get("internet_gateway_ids")
try:
all_internet_gateways = client.describe_internet_gateways(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
return [camel_dict_to_snake_dict(get_internet_gateway_info(igw))
for igw in all_internet_gateways['InternetGateways']]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(type='dict', default=dict()),
internet_gateway_ids=dict(type='list', default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore and boto3 are required.')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - " + str(e))
# call your function here
results = list_internet_gateways(connection, module)
module.exit_json(internet_gateways=results)
if __name__ == '__main__':
main()
| gpl-3.0 |
vlachoudis/sl4a | python/src/Lib/lib2to3/pgen2/grammar.py | 52 | 4947 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print "s2n"
pprint(self.symbol2number)
print "n2s"
pprint(self.number2symbol)
print "states"
pprint(self.states)
print "dfas"
pprint(self.dfas)
print "labels"
pprint(self.labels)
print "start", self.start
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
| apache-2.0 |
pypa/warehouse | warehouse/config.py | 1 | 19675 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import distutils.util
import enum
import os
import shlex
import transaction
from pyramid import renderers
from pyramid.config import Configurator as _Configurator
from pyramid.response import Response
from pyramid.security import Allow, Authenticated
from pyramid.tweens import EXCVIEW
from pyramid_rpc.xmlrpc import XMLRPCRenderer
from warehouse.errors import BasicAuthBreachedPassword
from warehouse.utils.static import ManifestCacheBuster
from warehouse.utils.wsgi import HostRewrite, ProxyFixer, VhmRootRemover
class Environment(enum.Enum):
production = "production"
development = "development"
class Configurator(_Configurator):
def add_wsgi_middleware(self, middleware, *args, **kwargs):
middlewares = self.get_settings().setdefault("wsgi.middlewares", [])
middlewares.append((middleware, args, kwargs))
def make_wsgi_app(self, *args, **kwargs):
# Get the WSGI application from the underlying configurator
app = super().make_wsgi_app(*args, **kwargs)
# Look to see if we have any WSGI middlewares configured.
for middleware, args, kw in self.get_settings()["wsgi.middlewares"]:
app = middleware(app, *args, **kw)
# Finally, return our now wrapped app
return app
class RootFactory:
__parent__ = None
__name__ = None
__acl__ = [
(Allow, "group:admins", "admin"),
(Allow, "group:moderators", "moderator"),
(Allow, Authenticated, "manage:user"),
]
def __init__(self, request):
pass
def require_https_tween_factory(handler, registry):
if not registry.settings.get("enforce_https", True):
return handler
def require_https_tween(request):
# If we have an :action URL and we're not using HTTPS, then we want to
# return a 403 error.
if request.params.get(":action", None) and request.scheme != "https":
resp = Response("SSL is required.", status=403, content_type="text/plain")
resp.status = "403 SSL is required"
resp.headers["X-Fastly-Error"] = "803"
return resp
return handler(request)
return require_https_tween
def activate_hook(request):
if request.path.startswith(("/_debug_toolbar/", "/static/")):
return False
return True
def commit_veto(request, response):
# By default pyramid_tm will veto the commit anytime request.exc_info is not None,
# we are going to copy that logic with one difference, we are still going to commit
# if the exception was for a BreachedPassword.
# TODO: We should probably use a registry or something instead of hardcoded.
exc_info = getattr(request, "exc_info", None)
if exc_info is not None and not isinstance(exc_info[1], BasicAuthBreachedPassword):
return True
def template_view(config, name, route, template, route_kw=None, view_kw=None):
if route_kw is None:
route_kw = {}
if view_kw is None:
view_kw = {}
config.add_route(name, route, **route_kw)
config.add_view(renderer=template, route_name=name, **view_kw)
def maybe_set(settings, name, envvar, coercer=None, default=None):
if envvar in os.environ:
value = os.environ[envvar]
if coercer is not None:
value = coercer(value)
settings.setdefault(name, value)
elif default is not None:
settings.setdefault(name, default)
def maybe_set_compound(settings, base, name, envvar):
if envvar in os.environ:
value = shlex.split(os.environ[envvar])
kwargs = {k: v for k, v in (i.split("=") for i in value[1:])}
settings[".".join([base, name])] = value[0]
for key, value in kwargs.items():
settings[".".join([base, key])] = value
def configure(settings=None):
if settings is None:
settings = {}
# Add information about the current copy of the code.
maybe_set(settings, "warehouse.commit", "SOURCE_COMMIT", default="null")
# Set the environment from an environment variable, if one hasn't already
# been set.
maybe_set(
settings,
"warehouse.env",
"WAREHOUSE_ENV",
Environment,
default=Environment.production,
)
# Pull in default configuration from the environment.
maybe_set(settings, "warehouse.token", "WAREHOUSE_TOKEN")
maybe_set(settings, "warehouse.num_proxies", "WAREHOUSE_NUM_PROXIES", int)
maybe_set(settings, "warehouse.theme", "WAREHOUSE_THEME")
maybe_set(settings, "warehouse.domain", "WAREHOUSE_DOMAIN")
maybe_set(settings, "forklift.domain", "FORKLIFT_DOMAIN")
maybe_set(settings, "warehouse.legacy_domain", "WAREHOUSE_LEGACY_DOMAIN")
maybe_set(settings, "site.name", "SITE_NAME", default="Warehouse")
maybe_set(settings, "aws.key_id", "AWS_ACCESS_KEY_ID")
maybe_set(settings, "aws.secret_key", "AWS_SECRET_ACCESS_KEY")
maybe_set(settings, "aws.region", "AWS_REGION")
maybe_set(settings, "gcloud.credentials", "GCLOUD_CREDENTIALS")
maybe_set(settings, "gcloud.project", "GCLOUD_PROJECT")
maybe_set(
settings, "warehouse.release_files_table", "WAREHOUSE_RELEASE_FILES_TABLE"
)
maybe_set(settings, "warehouse.trending_table", "WAREHOUSE_TRENDING_TABLE")
maybe_set(settings, "celery.broker_url", "BROKER_URL")
maybe_set(settings, "celery.result_url", "REDIS_URL")
maybe_set(settings, "celery.scheduler_url", "REDIS_URL")
maybe_set(settings, "database.url", "DATABASE_URL")
maybe_set(settings, "elasticsearch.url", "ELASTICSEARCH_URL")
maybe_set(settings, "elasticsearch.url", "ELASTICSEARCH_SIX_URL")
maybe_set(settings, "sentry.dsn", "SENTRY_DSN")
maybe_set(settings, "sentry.frontend_dsn", "SENTRY_FRONTEND_DSN")
maybe_set(settings, "sentry.transport", "SENTRY_TRANSPORT")
maybe_set(settings, "sessions.url", "REDIS_URL")
maybe_set(settings, "ratelimit.url", "REDIS_URL")
maybe_set(settings, "sessions.secret", "SESSION_SECRET")
maybe_set(settings, "camo.url", "CAMO_URL")
maybe_set(settings, "camo.key", "CAMO_KEY")
maybe_set(settings, "docs.url", "DOCS_URL")
maybe_set(settings, "ga.tracking_id", "GA_TRACKING_ID")
maybe_set(settings, "statuspage.url", "STATUSPAGE_URL")
maybe_set(settings, "token.password.secret", "TOKEN_PASSWORD_SECRET")
maybe_set(settings, "token.email.secret", "TOKEN_EMAIL_SECRET")
maybe_set(settings, "token.two_factor.secret", "TOKEN_TWO_FACTOR_SECRET")
maybe_set(
settings,
"warehouse.xmlrpc.search.enabled",
"WAREHOUSE_XMLRPC_SEARCH",
coercer=distutils.util.strtobool,
default=True,
)
maybe_set(settings, "warehouse.xmlrpc.cache.url", "REDIS_URL")
maybe_set(
settings,
"warehouse.xmlrpc.client.ratelimit_string",
"XMLRPC_RATELIMIT_STRING",
default="3600 per hour",
)
maybe_set(settings, "token.password.max_age", "TOKEN_PASSWORD_MAX_AGE", coercer=int)
maybe_set(settings, "token.email.max_age", "TOKEN_EMAIL_MAX_AGE", coercer=int)
maybe_set(
settings,
"token.two_factor.max_age",
"TOKEN_TWO_FACTOR_MAX_AGE",
coercer=int,
default=300,
)
maybe_set(
settings,
"token.default.max_age",
"TOKEN_DEFAULT_MAX_AGE",
coercer=int,
default=21600, # 6 hours
)
maybe_set_compound(settings, "files", "backend", "FILES_BACKEND")
maybe_set_compound(settings, "docs", "backend", "DOCS_BACKEND")
maybe_set_compound(settings, "origin_cache", "backend", "ORIGIN_CACHE")
maybe_set_compound(settings, "mail", "backend", "MAIL_BACKEND")
maybe_set_compound(settings, "metrics", "backend", "METRICS_BACKEND")
maybe_set_compound(settings, "breached_passwords", "backend", "BREACHED_PASSWORDS")
maybe_set_compound(settings, "malware_check", "backend", "MALWARE_CHECK_BACKEND")
# Add the settings we use when the environment is set to development.
if settings["warehouse.env"] == Environment.development:
settings.setdefault("enforce_https", False)
settings.setdefault("pyramid.reload_assets", True)
settings.setdefault("pyramid.reload_templates", True)
settings.setdefault("pyramid.prevent_http_cache", True)
settings.setdefault("debugtoolbar.hosts", ["0.0.0.0/0"])
settings.setdefault(
"debugtoolbar.panels",
[
".".join(["pyramid_debugtoolbar.panels", panel])
for panel in [
"versions.VersionDebugPanel",
"settings.SettingsDebugPanel",
"headers.HeaderDebugPanel",
"request_vars.RequestVarsDebugPanel",
"renderings.RenderingsDebugPanel",
"logger.LoggingPanel",
"performance.PerformanceDebugPanel",
"routes.RoutesDebugPanel",
"sqla.SQLADebugPanel",
"tweens.TweensDebugPanel",
"introspection.IntrospectionDebugPanel",
]
],
)
# Actually setup our Pyramid Configurator with the values pulled in from
# the environment as well as the ones passed in to the configure function.
config = Configurator(settings=settings)
config.set_root_factory(RootFactory)
# Register support for services
config.include("pyramid_services")
# Register metrics
config.include(".metrics")
# Register our CSRF support. We do this here, immediately after we've
# created the Configurator instance so that we ensure to get our defaults
# set ASAP before anything else has a chance to set them and possibly call
# Configurator().commit()
config.include(".csrf")
# Include anything needed by the development environment.
if config.registry.settings["warehouse.env"] == Environment.development:
config.include("pyramid_debugtoolbar")
# Register our logging support
config.include(".logging")
# We'll want to use Jinja2 as our template system.
config.include("pyramid_jinja2")
# Include our filters
config.include(".filters")
# Including pyramid_mailer for sending emails through SMTP.
config.include("pyramid_mailer")
# We want to use newstyle gettext
config.add_settings({"jinja2.newstyle": True})
# Our translation strings are all in the "messages" domain
config.add_settings({"jinja2.i18n.domain": "messages"})
# We also want to use Jinja2 for .html templates as well, because we just
# assume that all templates will be using Jinja.
config.add_jinja2_renderer(".html")
# Sometimes our files are .txt files and we still want to use Jinja2 to
# render them.
config.add_jinja2_renderer(".txt")
# Anytime we want to render a .xml template, we'll also use Jinja.
config.add_jinja2_renderer(".xml")
# We need to enable our Client Side Include extension
config.get_settings().setdefault(
"jinja2.extensions", ["warehouse.utils.html.ClientSideIncludeExtension"]
)
# We'll want to configure some filters for Jinja2 as well.
filters = config.get_settings().setdefault("jinja2.filters", {})
filters.setdefault("format_classifiers", "warehouse.filters:format_classifiers")
filters.setdefault("classifier_id", "warehouse.filters:classifier_id")
filters.setdefault("format_tags", "warehouse.filters:format_tags")
filters.setdefault("json", "warehouse.filters:tojson")
filters.setdefault("camoify", "warehouse.filters:camoify")
filters.setdefault("shorten_number", "warehouse.filters:shorten_number")
filters.setdefault("urlparse", "warehouse.filters:urlparse")
filters.setdefault("contains_valid_uris", "warehouse.filters:contains_valid_uris")
filters.setdefault("format_package_type", "warehouse.filters:format_package_type")
filters.setdefault("parse_version", "warehouse.filters:parse_version")
filters.setdefault("localize_datetime", "warehouse.filters:localize_datetime")
# We also want to register some global functions for Jinja
jglobals = config.get_settings().setdefault("jinja2.globals", {})
jglobals.setdefault("is_valid_uri", "warehouse.utils.http:is_valid_uri")
jglobals.setdefault("gravatar", "warehouse.utils.gravatar:gravatar")
jglobals.setdefault("gravatar_profile", "warehouse.utils.gravatar:profile")
jglobals.setdefault("now", "warehouse.utils:now")
# And some enums to reuse in the templates
jglobals.setdefault(
"RoleInvitationStatus", "warehouse.packaging.models:RoleInvitationStatus"
)
# We'll store all of our templates in one location, warehouse/templates
# so we'll go ahead and add that to the Jinja2 search path.
config.add_jinja2_search_path("warehouse:templates", name=".html")
config.add_jinja2_search_path("warehouse:templates", name=".txt")
config.add_jinja2_search_path("warehouse:templates", name=".xml")
# We want to configure our JSON renderer to sort the keys, and also to use
# an ultra compact serialization format.
config.add_renderer("json", renderers.JSON(sort_keys=True, separators=(",", ":")))
# Configure retry support.
config.add_settings({"retry.attempts": 3})
config.include("pyramid_retry")
# Configure our transaction handling so that each request gets its own
# transaction handler and the lifetime of the transaction is tied to the
# lifetime of the request.
config.add_settings(
{
"tm.manager_hook": lambda request: transaction.TransactionManager(),
"tm.activate_hook": activate_hook,
"tm.commit_veto": commit_veto,
"tm.annotate_user": False,
}
)
config.include("pyramid_tm")
# Register our XMLRPC service
config.include(".legacy.api.xmlrpc")
# Register our XMLRPC cache
config.include(".legacy.api.xmlrpc.cache")
# Register support for XMLRPC and override it's renderer to allow
# specifying custom dumps arguments.
config.include("pyramid_rpc.xmlrpc")
config.add_renderer("xmlrpc", XMLRPCRenderer(allow_none=True))
# Register support for our legacy action URLs
config.include(".legacy.action_routing")
# Register support for our domain predicates
config.include(".domain")
# Register support for template views.
config.add_directive("add_template_view", template_view, action_wrap=False)
# Register support for internationalization and localization
config.include(".i18n")
# Register the configuration for the PostgreSQL database.
config.include(".db")
# Register the support for Celery Tasks
config.include(".tasks")
# Register support for our rate limiting mechanisms
config.include(".rate_limiting")
config.include(".static")
config.include(".policy")
config.include(".search")
# Register the support for AWS and Google Cloud
config.include(".aws")
config.include(".gcloud")
# Register our session support
config.include(".sessions")
# Register our support for http and origin caching
config.include(".cache.http")
config.include(".cache.origin")
# Register support for sending emails
config.include(".email")
# Register our authentication support.
config.include(".accounts")
# Register support for Macaroon based authentication
config.include(".macaroons")
# Register support for malware checks
config.include(".malware")
# Register logged-in views
config.include(".manage")
# Allow the packaging app to register any services it has.
config.include(".packaging")
# Configure redirection support
config.include(".redirects")
# Register all our URL routes for Warehouse.
config.include(".routes")
# Include our admin application
config.include(".admin")
# Register forklift, at least until we split it out into it's own project.
config.include(".forklift")
# Block non HTTPS requests for the legacy ?:action= routes when they are
# sent via POST.
config.add_tween("warehouse.config.require_https_tween_factory")
# Enable compression of our HTTP responses
config.add_tween(
"warehouse.utils.compression.compression_tween_factory",
over=[
"warehouse.cache.http.conditional_http_tween_factory",
"pyramid_debugtoolbar.toolbar_tween_factory",
EXCVIEW,
],
)
# Enable Warehouse to serve our static files
prevent_http_cache = config.get_settings().get("pyramid.prevent_http_cache", False)
config.add_static_view(
"static",
"warehouse:static/dist/",
# Don't cache at all if prevent_http_cache is true, else we'll cache
# the files for 10 years.
cache_max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.add_cache_buster(
"warehouse:static/dist/",
ManifestCacheBuster(
"warehouse:static/dist/manifest.json",
reload=config.registry.settings["pyramid.reload_assets"],
strict=not prevent_http_cache,
),
)
config.whitenoise_serve_static(
autorefresh=prevent_http_cache,
max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.whitenoise_add_files("warehouse:static/dist/", prefix="/static/")
config.whitenoise_add_manifest(
"warehouse:static/dist/manifest.json", prefix="/static/"
)
# Enable support of passing certain values like remote host, client
# address, and protocol support in from an outer proxy to the application.
config.add_wsgi_middleware(
ProxyFixer,
token=config.registry.settings["warehouse.token"],
num_proxies=config.registry.settings.get("warehouse.num_proxies", 1),
)
# Protect against cache poisoning via the X-Vhm-Root headers.
config.add_wsgi_middleware(VhmRootRemover)
# Fix our host header when getting sent upload.pypi.io as a HOST.
# TODO: Remove this, this is at the wrong layer.
config.add_wsgi_middleware(HostRewrite)
# We want Sentry to be the last things we add here so that it's the outer
# most WSGI middleware.
config.include(".sentry")
# Register Content-Security-Policy service
config.include(".csp")
# Register Referrer-Policy service
config.include(".referrer_policy")
config.add_settings({"http": {"verify": "/etc/ssl/certs/"}})
config.include(".http")
# Add our theme if one was configured
if config.get_settings().get("warehouse.theme"):
config.include(config.get_settings()["warehouse.theme"])
# Scan everything for configuration
config.scan(
ignore=["warehouse.migrations.env", "warehouse.celery", "warehouse.wsgi"]
)
# Sanity check our request and responses.
# Note: It is very important that this go last. We need everything else that might
# have added a tween to be registered prior to this.
config.include(".sanity")
# Finally, commit all of our changes
config.commit()
return config
| apache-2.0 |
Stanford-Online/edx-platform | openedx/core/djangoapps/verified_track_content/tests/test_models.py | 16 | 13466 | """
Tests for Verified Track Cohorting models
"""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=no-member
import ddt
import mock
from mock import patch
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.course_groups.cohorts import (
DEFAULT_COHORT_NAME,
CourseCohort,
add_cohort,
get_cohort,
set_course_cohorted
)
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.models import CourseMode
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..models import DEFAULT_VERIFIED_COHORT_NAME, VerifiedTrackCohortedCourse
from ..tasks import sync_cohort_with_mode
class TestVerifiedTrackCohortedCourse(TestCase):
"""
Tests that the configuration works as expected.
"""
SAMPLE_COURSE = 'edX/Test_Course/Run'
def test_course_enabled(self):
course_key = CourseKey.from_string(self.SAMPLE_COURSE)
# Test when no configuration exists
self.assertFalse(VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key))
# Enable for a course
config = VerifiedTrackCohortedCourse.objects.create(course_key=course_key, enabled=True)
config.save()
self.assertTrue(VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key))
# Disable for the course
config.enabled = False
config.save()
self.assertFalse(VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key))
def test_unicode(self):
course_key = CourseKey.from_string(self.SAMPLE_COURSE)
# Enable for a course
config = VerifiedTrackCohortedCourse.objects.create(course_key=course_key, enabled=True)
config.save()
self.assertEqual(unicode(config), "Course: {}, enabled: True".format(self.SAMPLE_COURSE))
def test_verified_cohort_name(self):
cohort_name = 'verified cohort'
course_key = CourseKey.from_string(self.SAMPLE_COURSE)
config = VerifiedTrackCohortedCourse.objects.create(
course_key=course_key, enabled=True, verified_cohort_name=cohort_name
)
config.save()
self.assertEqual(VerifiedTrackCohortedCourse.verified_cohort_name_for_course(course_key), cohort_name)
def test_unset_verified_cohort_name(self):
fake_course_id = 'fake/course/key'
course_key = CourseKey.from_string(fake_course_id)
self.assertEqual(VerifiedTrackCohortedCourse.verified_cohort_name_for_course(course_key), None)
@skip_unless_lms
@ddt.ddt
class TestMoveToVerified(SharedModuleStoreTestCase):
""" Tests for the post-save listener. """
@classmethod
def setUpClass(cls):
super(TestMoveToVerified, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestMoveToVerified, self).setUp()
self.user = UserFactory()
# Spy on number of calls to celery task.
celery_task_patcher = patch.object(
sync_cohort_with_mode, 'apply_async',
mock.Mock(wraps=sync_cohort_with_mode.apply_async)
)
self.mocked_celery_task = celery_task_patcher.start()
self.addCleanup(celery_task_patcher.stop)
def _enable_cohorting(self):
""" Turn on cohorting in the course. """
set_course_cohorted(self.course.id, True)
def _create_verified_cohort(self, name=DEFAULT_VERIFIED_COHORT_NAME):
""" Create a verified cohort. """
add_cohort(self.course.id, name, CourseCohort.MANUAL)
def _create_named_random_cohort(self, name):
""" Create a random cohort with the supplied name. """
return add_cohort(self.course.id, name, CourseCohort.RANDOM)
def _enable_verified_track_cohorting(self, cohort_name=None):
""" Enable verified track cohorting for the default course. """
if cohort_name:
config = VerifiedTrackCohortedCourse.objects.create(
course_key=self.course.id, enabled=True, verified_cohort_name=cohort_name
)
else:
config = VerifiedTrackCohortedCourse.objects.create(course_key=self.course.id, enabled=True)
config.save()
def _enroll_in_course(self):
""" Enroll self.user in self.course. """
self.enrollment = CourseEnrollmentFactory(course_id=self.course.id, user=self.user)
def _upgrade_enrollment(self, mode=CourseMode.VERIFIED):
""" Upgrade the default enrollment to verified. """
self.enrollment.update_enrollment(mode=mode)
def _verify_no_automatic_cohorting(self):
""" Check that upgrading self.user to verified does not move them into a cohort. """
self._enroll_in_course()
self.assertIsNone(get_cohort(self.user, self.course.id, assign=False))
self._upgrade_enrollment()
self.assertIsNone(get_cohort(self.user, self.course.id, assign=False))
self.assertEqual(0, self.mocked_celery_task.call_count)
def _unenroll(self):
""" Unenroll self.user from self.course. """
self.enrollment.unenroll(self.user, self.course.id)
def _reenroll(self):
""" Re-enroll the learner into mode AUDIT. """
self.enrollment.activate()
self.enrollment.change_mode(CourseMode.AUDIT)
@mock.patch('openedx.core.djangoapps.verified_track_content.models.log.error')
def test_automatic_cohorting_disabled(self, error_logger):
"""
If the VerifiedTrackCohortedCourse feature is disabled for a course, enrollment mode changes do not move
learners into a cohort.
"""
# Enable cohorting and create a verified cohort.
self._enable_cohorting()
self._create_verified_cohort()
# But do not enable the verified track cohorting feature.
self.assertFalse(VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(self.course.id))
self._verify_no_automatic_cohorting()
# No logging occurs if feature is disabled for course.
self.assertFalse(error_logger.called)
@mock.patch('openedx.core.djangoapps.verified_track_content.models.log.error')
def test_cohorting_enabled_course_not_cohorted(self, error_logger):
"""
If the VerifiedTrackCohortedCourse feature is enabled for a course, but the course is not cohorted,
an error is logged and enrollment mode changes do not move learners into a cohort.
"""
# Enable verified track cohorting feature, but course has not been marked as cohorting.
self._enable_verified_track_cohorting()
self.assertTrue(VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(self.course.id))
self._verify_no_automatic_cohorting()
self.assertTrue(error_logger.called)
self.assertIn("course is not cohorted", error_logger.call_args[0][0])
@mock.patch('openedx.core.djangoapps.verified_track_content.models.log.error')
def test_cohorting_enabled_missing_verified_cohort(self, error_logger):
"""
If the VerifiedTrackCohortedCourse feature is enabled for a course and the course is cohorted,
but the course does not have a verified cohort, an error is logged and enrollment mode changes do not
move learners into a cohort.
"""
# Enable cohorting, but do not create the verified cohort.
self._enable_cohorting()
# Enable verified track cohorting feature
self._enable_verified_track_cohorting()
self.assertTrue(VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(self.course.id))
self._verify_no_automatic_cohorting()
self.assertTrue(error_logger.called)
error_message = "cohort named '%s' does not exist"
self.assertIn(error_message, error_logger.call_args[0][0])
@ddt.data(CourseMode.VERIFIED, CourseMode.CREDIT_MODE)
def test_automatic_cohorting_enabled(self, upgrade_mode):
"""
If the VerifiedTrackCohortedCourse feature is enabled for a course (with course cohorting enabled
with an existing verified cohort), enrollment in the verified track automatically moves learners
into the verified cohort.
"""
# Enable cohorting and create a verified cohort.
self._enable_cohorting()
self._create_verified_cohort()
# Enable verified track cohorting feature
self._enable_verified_track_cohorting()
self.assertTrue(VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(self.course.id))
self._enroll_in_course()
self.assertEqual(2, self.mocked_celery_task.call_count)
self.assertEqual(DEFAULT_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
self._upgrade_enrollment(upgrade_mode)
self.assertEqual(4, self.mocked_celery_task.call_count)
self.assertEqual(DEFAULT_VERIFIED_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
def test_cohorting_enabled_multiple_random_cohorts(self):
"""
If the VerifiedTrackCohortedCourse feature is enabled for a course, and the course is cohorted
with > 1 random cohorts, the learner is randomly assigned to one of the random
cohorts when in the audit track.
"""
# Enable cohorting, and create the verified cohort.
self._enable_cohorting()
self._create_verified_cohort()
# Create two random cohorts.
self._create_named_random_cohort("Random 1")
self._create_named_random_cohort("Random 2")
# Enable verified track cohorting feature
self._enable_verified_track_cohorting()
self._enroll_in_course()
self.assertIn(get_cohort(self.user, self.course.id, assign=False).name, ["Random 1", "Random 2"])
self._upgrade_enrollment()
self.assertEqual(DEFAULT_VERIFIED_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
self._unenroll()
self._reenroll()
self.assertIn(get_cohort(self.user, self.course.id, assign=False).name, ["Random 1", "Random 2"])
def test_unenrolled(self):
"""
Test that un-enrolling and re-enrolling works correctly. This is important because usually
learners maintain their previously assigned cohort on re-enrollment.
"""
# Enable verified track cohorting feature and enroll in the verified track
self._enable_cohorting()
self._create_verified_cohort()
self._enable_verified_track_cohorting()
self._enroll_in_course()
self._upgrade_enrollment()
self.assertEqual(DEFAULT_VERIFIED_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
# Un-enroll from the course and then re-enroll
self._unenroll()
self.assertEqual(DEFAULT_VERIFIED_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
self._reenroll()
self.assertEqual(DEFAULT_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
self._upgrade_enrollment()
self.assertEqual(DEFAULT_VERIFIED_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
def test_custom_verified_cohort_name(self):
"""
Test that enrolling in verified works correctly when the "verified cohort" has a custom name.
"""
custom_cohort_name = 'special verified cohort'
self._enable_cohorting()
self._create_verified_cohort(name=custom_cohort_name)
self._enable_verified_track_cohorting(cohort_name=custom_cohort_name)
self._enroll_in_course()
self._upgrade_enrollment()
self.assertEqual(custom_cohort_name, get_cohort(self.user, self.course.id, assign=False).name)
def test_custom_default_cohort_name(self):
"""
Test that enrolling and un-enrolling works correctly when the single cohort
of type random has a different name from "Default Group".
"""
random_cohort_name = "custom random cohort"
self._enable_cohorting()
self._create_verified_cohort()
default_cohort = self._create_named_random_cohort(random_cohort_name)
self._enable_verified_track_cohorting()
self._enroll_in_course()
self.assertEqual(random_cohort_name, get_cohort(self.user, self.course.id, assign=False).name)
self._upgrade_enrollment()
self.assertEqual(DEFAULT_VERIFIED_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
# Un-enroll from the course. The learner stays in the verified cohort, but is no longer active.
self._unenroll()
self.assertEqual(DEFAULT_VERIFIED_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
# Change the name of the "default" cohort.
modified_cohort_name = "renamed random cohort"
default_cohort.name = modified_cohort_name
default_cohort.save()
# Re-enroll in the course, which will downgrade the learner to audit.
self._reenroll()
self.assertEqual(modified_cohort_name, get_cohort(self.user, self.course.id, assign=False).name)
self._upgrade_enrollment()
self.assertEqual(DEFAULT_VERIFIED_COHORT_NAME, get_cohort(self.user, self.course.id, assign=False).name)
| agpl-3.0 |
awkspace/ansible | lib/ansible/modules/network/ios/ios_ntp.py | 24 | 8785 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: ios_ntp
extends_documentation_fragment: ios
version_added: "2.8"
short_description: Manages core NTP configuration.
description:
- Manages core NTP configuration.
author:
- Federico Olivieri (@Federico87)
options:
server:
description:
- Network address of NTP server.
source_int:
description:
- Source interface for NTP packets.
acl:
description:
- ACL for peer/server access restricition.
logging:
description:
- Enable NTP logs. Data type boolean.
type: bool
default: False
auth:
description:
- Enable NTP authentication. Data type boolean.
type: bool
default: False
auth_key:
description:
- md5 NTP authentication key of tye 7.
key_id:
description:
- auth_key id. Datat type string
state:
description:
- Manage the state of the resource.
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
# Set new NTP server and source interface
- ios_ntp:
server: 10.0.255.10
source_int: Loopback0
logging: false
state: present
# Remove NTP ACL and logging
- ios_ntp:
acl: NTP_ACL
logging: true
state: absent
# Set NTP authentication
- ios_ntp:
key_id: 10
auth_key: 15435A030726242723273C21181319000A
auth: true
state: present
# Set new NTP configuration
- ios_ntp:
server: 10.0.255.10
source_int: Loopback0
acl: NTP_ACL
logging: true
key_id: 10
auth_key: 15435A030726242723273C21181319000A
auth: true
state: present
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: ["no ntp server 10.0.255.10", "no ntp source Loopback0"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
def parse_server(line, dest):
if dest == 'server':
match = re.search(r'(ntp server )(\d+\.\d+\.\d+\.\d+)', line, re.M)
if match:
server = match.group(2)
return server
def parse_source_int(line, dest):
if dest == 'source':
match = re.search(r'(ntp source )(\S+)', line, re.M)
if match:
source = match.group(2)
return source
def parse_acl(line, dest):
if dest == 'access-group':
match = re.search(r'ntp access-group (?:peer|serve)(?:\s+)(\S+)', line, re.M)
if match:
acl = match.group(1)
return acl
def parse_logging(line, dest):
if dest == 'logging':
logging = dest
return logging
def parse_auth_key(line, dest):
if dest == 'authentication-key':
match = re.search(r'(ntp authentication-key \d+ md5 )(\w+)', line, re.M)
if match:
auth_key = match.group(2)
return auth_key
def parse_key_id(line, dest):
if dest == 'trusted-key':
match = re.search(r'(ntp trusted-key )(\d+)', line, re.M)
if match:
auth_key = match.group(2)
return auth_key
def parse_auth(dest):
if dest == 'authenticate':
return dest
def map_config_to_obj(module):
obj_dict = {}
obj = []
server_list = []
config = get_config(module, flags=['| include ntp'])
for line in config.splitlines():
match = re.search(r'ntp (\S+)', line, re.M)
if match:
dest = match.group(1)
server = parse_server(line, dest)
source_int = parse_source_int(line, dest)
acl = parse_acl(line, dest)
logging = parse_logging(line, dest)
auth = parse_auth(dest)
auth_key = parse_auth_key(line, dest)
key_id = parse_key_id(line, dest)
if server:
server_list.append(server)
if source_int:
obj_dict['source_int'] = source_int
if acl:
obj_dict['acl'] = acl
if logging:
obj_dict['logging'] = True
if auth:
obj_dict['auth'] = True
if auth_key:
obj_dict['auth_key'] = auth_key
if key_id:
obj_dict['key_id'] = key_id
obj_dict['server'] = server_list
obj.append(obj_dict)
return obj
def map_params_to_obj(module):
obj = []
obj.append({
'state': module.params['state'],
'server': module.params['server'],
'source_int': module.params['source_int'],
'logging': module.params['logging'],
'acl': module.params['acl'],
'auth': module.params['auth'],
'auth_key': module.params['auth_key'],
'key_id': module.params['key_id']
})
return obj
def map_obj_to_commands(want, have, module):
commands = list()
server_have = have[0].get('server', None)
source_int_have = have[0].get('source_int', None)
acl_have = have[0].get('acl', None)
logging_have = have[0].get('logging', None)
auth_have = have[0].get('auth', None)
auth_key_have = have[0].get('auth_key', None)
key_id_have = have[0].get('key_id', None)
for w in want:
server = w['server']
source_int = w['source_int']
acl = w['acl']
logging = w['logging']
state = w['state']
auth = w['auth']
auth_key = w['auth_key']
key_id = w['key_id']
if state == 'absent':
if server_have and server in server_have:
commands.append('no ntp server {0}'.format(server))
if source_int and source_int_have:
commands.append('no ntp source {0}'.format(source_int))
if acl and acl_have:
commands.append('no ntp access-group peer {0}'.format(acl))
if logging is True and logging_have:
commands.append('no ntp logging')
if auth is True and auth_have:
commands.append('no ntp authenticate')
if key_id and key_id_have:
commands.append('no ntp trusted-key {0}'.format(key_id))
if auth_key and auth_key_have:
if key_id and key_id_have:
commands.append('no ntp authentication-key {0} md5 {1} 7'.format(key_id, auth_key))
elif state == 'present':
if server is not None and server not in server_have:
commands.append('ntp server {0}'.format(server))
if source_int is not None and source_int != source_int_have:
commands.append('ntp source {0}'.format(source_int))
if acl is not None and acl != acl_have:
commands.append('ntp access-group peer {0}'.format(acl))
if logging is not None and logging != logging_have and logging is not False:
commands.append('ntp logging')
if auth is not None and auth != auth_have and auth is not False:
commands.append('ntp authenticate')
if key_id is not None and key_id != key_id_have:
commands.append('ntp trusted-key {0}'.format(key_id))
if auth_key is not None and auth_key != auth_key_have:
if key_id is not None:
commands.append('ntp authentication-key {0} md5 {1} 7'.format(key_id, auth_key))
return commands
def main():
argument_spec = dict(
server=dict(),
source_int=dict(),
acl=dict(),
logging=dict(type='bool', default=False),
auth=dict(type='bool', default=False),
auth_key=dict(),
key_id=dict(),
state=dict(choices=['absent', 'present'], default='present')
)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
garywu/pypedream | pypedream/plot/_filt.py | 1 | 2685 | import numpy
has_matplotlib = True
try:
from matplotlib import pyplot, figure
except ImportError:
has_matplotlib = False
from dagpype._core import filters
def _make_relay_call(fn, name):
def new_fn(*args, **kwargs):
@filters
def _dagpype_internal_fn_act(target):
try:
while True:
target.send((yield))
except GeneratorExit:
fn(*args, **kwargs)
target.close()
return _dagpype_internal_fn_act
new_fn.__name__ = name
new_fn.__doc__ = """
Convenience filter utility for corresponding function in pyplot.
Example:
>>> source([1, 2, 3, 4]) | plot.xlabel('x') | plot.ylabel('y') | plot.title('xy') | (plot.plot() | plot.savefig('foo.png'))
"""
return new_fn
_try_fns = [
'annotate',
'arrow',
'autogen_docstring',
'autoscale',
'autumn',
'axes',
'axhline',
'axhspan',
'axis',
'axvline',
'axvspan',
'barbs',
'bone',
'box',
'broken_barh',
'cla',
'clabel',
'clf',
'clim',
'cm',
'cohere',
'colorbar',
'colormaps',
'colors',
'connect',
'cool',
'copper',
'csd',
'dedent',
'delaxes',
'docstring',
'draw',
'figaspect',
'figimage',
'figlegend',
'figtext',
'figure',
'fill',
'fill_between',
'fill_betweenx',
'flag',
'gca',
'gcf',
'gci',
'get',
'gray',
'grid',
'hold',
'hot',
'hsv',
'jet',
'locator_params',
'margins',
'minorticks_off',
'minorticks_on',
'normalize',
'over',
'pcolor',
'pcolormesh',
'pink',
'plotfile',
'plotting',
'polar',
'prism',
'psd',
'quiver',
'quiverkey',
'rc',
'register_cmap',
'rgrids',
'sca',
'sci',
'set_cmap',
'setp',
'silent_list',
'specgram',
'spectral',
'spring',
'spy',
'stem',
'step',
'subplot',
'subplot2grid',
'subplot_tool',
'subplots',
'subplots_adjust',
'summer',
'suptitle',
'table',
'text',
'thetagrids',
'tick_params',
'ticklabel_format',
'tight_layout',
'title',
'tricontour',
'tricontourf',
'tripcolor',
'triplot',
'twinx',
'twiny',
'winter',
'xlabel',
'xlim',
'xscale',
'xticks',
'ylabel',
'ylim',
'yscale',
'yticks']
_fns = []
if has_matplotlib:
for fn in _try_fns:
try:
exec('%s = _make_relay_call(pyplot.%s, "%s")' % (fn, fn, fn))
_fns.append(fn)
except AttributeError:
pass
| bsd-3-clause |
indictranstech/fbd_frappe | frappe/templates/pages/website_theme.py | 38 | 1670 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import re
import frappe
from frappe.website.utils import get_shade
from frappe.website.doctype.website_theme.website_theme import get_active_theme
no_sitemap = 1
base_template_path = "templates/pages/website_theme.css"
default_properties = {
"background_color": "#ffffff",
"top_bar_color": "#ffffff",
"top_bar_text_color": "#000000",
"footer_color": "#ffffff",
"footer_text_color": "#000000",
"font_size": "14px",
"text_color": "#000000",
"link_color": "#000000"
}
def get_context(context):
"""returns web style"""
website_theme = get_active_theme()
if not website_theme:
return {}
prepare(website_theme)
return { "theme": website_theme }
def prepare(theme):
for d in default_properties:
if not theme.get(d):
theme.set(d, default_properties[d])
theme.footer_border_color = get_shade(theme.footer_color, 10)
theme.border_color = get_shade(theme.background_color, 10)
webfonts = list(set(theme.get(key)
for key in ("heading_webfont", 'text_webfont') if theme.get(key)))
theme.webfont_import = "\n".join('@import url(http://fonts.googleapis.com/css?family={0}:400,300,400italic,700&subset=latin,latin-ext);'\
.format(font.replace(" ", "+")) for font in webfonts)
# move @import from css field to the top of the css file
if theme.css:
if "@import url" in theme.css:
webfont_import = list(set(re.findall("@import url\([^\(\)]*\);", theme.css)))
theme.webfont_import += "\n" + "\n".join(webfont_import)
for wfimport in webfont_import:
theme.css = theme.css.replace(wfimport, "")
| mit |
nliolios24/textrank | share/doc/networkx-1.9.1/examples/graph/unix_email.py | 62 | 2683 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2005 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| mit |
hkupty/python-mode | pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/imports.py | 17 | 15940 | # Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""imports checkers for Python code"""
import sys
from logilab.common.graph import get_cycles, DotBackend
from logilab.common.modutils import get_module_part, is_standard_module
from logilab.common.ureports import VerbatimText, Paragraph
import astroid
from astroid import are_exclusive
from pylint.interfaces import IAstroidChecker
from pylint.utils import EmptyReport
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
def get_first_import(node, context, name, base, level):
"""return the node where [base.]<name> is imported or None if not found
"""
fullname = '%s.%s' % (base, name) if base else name
first = None
found = False
for first in context.body:
if first is node:
continue
if first.scope() is node.scope() and first.fromlineno > node.fromlineno:
continue
if isinstance(first, astroid.Import):
if any(fullname == iname[0] for iname in first.names):
found = True
break
elif isinstance(first, astroid.From):
if level == first.level and any(
fullname == '%s.%s' % (first.modname, iname[0]) for iname in first.names):
found = True
break
if found and not are_exclusive(first, node):
return first
# utilities to represents import dependencies as tree and dot graph ###########
def make_tree_defs(mod_files_list):
"""get a list of 2-uple (module, list_of_files_which_import_this_module),
it will return a dictionary to represent this as a tree
"""
tree_defs = {}
for mod, files in mod_files_list:
node = (tree_defs, ())
for prefix in mod.split('.'):
node = node[0].setdefault(prefix, [{}, []])
node[1] += files
return tree_defs
def repr_tree_defs(data, indent_str=None):
"""return a string which represents imports as a tree"""
lines = []
nodes = data.items()
for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])):
if not files:
files = ''
else:
files = '(%s)' % ','.join(files)
if indent_str is None:
lines.append('%s %s' % (mod, files))
sub_indent_str = ' '
else:
lines.append(r'%s\-%s %s' % (indent_str, mod, files))
if i == len(nodes)-1:
sub_indent_str = '%s ' % indent_str
else:
sub_indent_str = '%s| ' % indent_str
if sub:
lines.append(repr_tree_defs(sub, sub_indent_str))
return '\n'.join(lines)
def dependencies_graph(filename, dep_info):
"""write dependencies as a dot (graphviz) file
"""
done = {}
printer = DotBackend(filename[:-4], rankdir='LR')
printer.emit('URL="." node[shape="box"]')
for modname, dependencies in sorted(dep_info.iteritems()):
done[modname] = 1
printer.emit_node(modname)
for modname in dependencies:
if modname not in done:
done[modname] = 1
printer.emit_node(modname)
for depmodname, dependencies in sorted(dep_info.iteritems()):
for modname in dependencies:
printer.emit_edge(modname, depmodname)
printer.generate(filename)
def make_graph(filename, dep_info, sect, gtype):
"""generate a dependencies graph and add some information about it in the
report's section
"""
dependencies_graph(filename, dep_info)
sect.append(Paragraph('%simports graph has been written to %s'
% (gtype, filename)))
# the import checker itself ###################################################
MSGS = {
'F0401': ('Unable to import %s',
'import-error',
'Used when pylint has been unable to import a module.'),
'R0401': ('Cyclic import (%s)',
'cyclic-import',
'Used when a cyclic import between two or more modules is \
detected.'),
'W0401': ('Wildcard import %s',
'wildcard-import',
'Used when `from module import *` is detected.'),
'W0402': ('Uses of a deprecated module %r',
'deprecated-module',
'Used a module marked as deprecated is imported.'),
'W0403': ('Relative import %r, should be %r',
'relative-import',
'Used when an import relative to the package directory is \
detected.'),
'W0404': ('Reimport %r (imported line %s)',
'reimported',
'Used when a module is reimported multiple times.'),
'W0406': ('Module import itself',
'import-self',
'Used when a module is importing itself.'),
'W0410': ('__future__ import is not the first non docstring statement',
'misplaced-future',
'Python 2.5 and greater require __future__ import to be the \
first non docstring statement in the module.',
{'maxversion': (3, 0)}),
}
class ImportsChecker(BaseChecker):
"""checks for
* external modules dependencies
* relative / wildcard imports
* cyclic imports
* uses of deprecated modules
"""
__implements__ = IAstroidChecker
name = 'imports'
msgs = MSGS
priority = -2
if sys.version_info < (3,):
deprecated_modules = ('regsub', 'TERMIOS', 'Bastion', 'rexec')
else:
deprecated_modules = ('stringprep', 'optparse')
options = (('deprecated-modules',
{'default' : deprecated_modules,
'type' : 'csv',
'metavar' : '<modules>',
'help' : 'Deprecated modules which should not be used, \
separated by a comma'}
),
('import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of every (i.e. internal and \
external) dependencies in the given file (report RP0402 must not be disabled)'}
),
('ext-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of external dependencies in the \
given file (report RP0402 must not be disabled)'}
),
('int-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of internal dependencies in the \
given file (report RP0402 must not be disabled)'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self.import_graph = None
self.__int_dep_info = self.__ext_dep_info = None
self.reports = (('RP0401', 'External dependencies',
self.report_external_dependencies),
('RP0402', 'Modules dependencies graph',
self.report_dependencies_graph),
)
def open(self):
"""called before visiting project (i.e set of modules)"""
self.linter.add_stats(dependencies={})
self.linter.add_stats(cycles=[])
self.stats = self.linter.stats
self.import_graph = {}
def close(self):
"""called before visiting project (i.e set of modules)"""
# don't try to compute cycles if the associated message is disabled
if self.linter.is_message_enabled('cyclic-import'):
for cycle in get_cycles(self.import_graph):
self.add_message('cyclic-import', args=' -> '.join(cycle))
def visit_import(self, node):
"""triggered when an import statement is seen"""
modnode = node.root()
for name, _ in node.names:
importedmodnode = self.get_imported_module(modnode, node, name)
if importedmodnode is None:
continue
self._check_relative_import(modnode, node, importedmodnode, name)
self._add_imported_module(node, importedmodnode.name)
self._check_deprecated_module(node, name)
self._check_reimport(node, name)
# TODO This appears to be the list of all messages of the checker...
# @check_messages('W0410', 'W0401', 'W0403', 'W0402', 'W0404', 'W0406', 'F0401')
@check_messages(*(MSGS.keys()))
def visit_from(self, node):
"""triggered when a from statement is seen"""
basename = node.modname
if basename == '__future__':
# check if this is the first non-docstring statement in the module
prev = node.previous_sibling()
if prev:
# consecutive future statements are possible
if not (isinstance(prev, astroid.From)
and prev.modname == '__future__'):
self.add_message('misplaced-future', node=node)
return
for name, _ in node.names:
if name == '*':
self.add_message('wildcard-import', args=basename, node=node)
modnode = node.root()
importedmodnode = self.get_imported_module(modnode, node, basename)
if importedmodnode is None:
return
self._check_relative_import(modnode, node, importedmodnode, basename)
self._check_deprecated_module(node, basename)
for name, _ in node.names:
if name != '*':
self._add_imported_module(node, '%s.%s' % (importedmodnode.name, name))
self._check_reimport(node, name, basename, node.level)
def get_imported_module(self, modnode, importnode, modname):
try:
return importnode.do_import_module(modname)
except astroid.InferenceError, ex:
if str(ex) != modname:
args = '%r (%s)' % (modname, ex)
else:
args = repr(modname)
self.add_message("import-error", args=args, node=importnode)
def _check_relative_import(self, modnode, importnode, importedmodnode,
importedasname):
"""check relative import. node is either an Import or From node, modname
the imported module name.
"""
if not self.linter.is_message_enabled('relative-import'):
return
if importedmodnode.file is None:
return False # built-in module
if modnode is importedmodnode:
return False # module importing itself
if modnode.absolute_import_activated() or getattr(importnode, 'level', None):
return False
if importedmodnode.name != importedasname:
# this must be a relative import...
self.add_message('relative-import', args=(importedasname, importedmodnode.name),
node=importnode)
def _add_imported_module(self, node, importedmodname):
"""notify an imported module, used to analyze dependencies"""
importedmodname = get_module_part(importedmodname)
context_name = node.root().name
if context_name == importedmodname:
# module importing itself !
self.add_message('import-self', node=node)
elif not is_standard_module(importedmodname):
# handle dependencies
importedmodnames = self.stats['dependencies'].setdefault(
importedmodname, set())
if not context_name in importedmodnames:
importedmodnames.add(context_name)
# update import graph
mgraph = self.import_graph.setdefault(context_name, set())
if not importedmodname in mgraph:
mgraph.add(importedmodname)
def _check_deprecated_module(self, node, mod_path):
"""check if the module is deprecated"""
for mod_name in self.config.deprecated_modules:
if mod_path == mod_name or mod_path.startswith(mod_name + '.'):
self.add_message('deprecated-module', node=node, args=mod_path)
def _check_reimport(self, node, name, basename=None, level=None):
"""check if the import is necessary (i.e. not already done)"""
if not self.linter.is_message_enabled('reimported'):
return
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None))
for context, level in contexts:
first = get_first_import(node, context, name, basename, level)
if first is not None:
self.add_message('reimported', node=node,
args=(name, first.fromlineno))
def report_external_dependencies(self, sect, _, dummy):
"""return a verbatim layout for displaying dependencies"""
dep_info = make_tree_defs(self._external_dependencies_info().iteritems())
if not dep_info:
raise EmptyReport()
tree_str = repr_tree_defs(dep_info)
sect.append(VerbatimText(tree_str))
def report_dependencies_graph(self, sect, _, dummy):
"""write dependencies as a dot (graphviz) file"""
dep_info = self.stats['dependencies']
if not dep_info or not (self.config.import_graph
or self.config.ext_import_graph
or self.config.int_import_graph):
raise EmptyReport()
filename = self.config.import_graph
if filename:
make_graph(filename, dep_info, sect, '')
filename = self.config.ext_import_graph
if filename:
make_graph(filename, self._external_dependencies_info(),
sect, 'external ')
filename = self.config.int_import_graph
if filename:
make_graph(filename, self._internal_dependencies_info(),
sect, 'internal ')
def _external_dependencies_info(self):
"""return cached external dependencies information or build and
cache them
"""
if self.__ext_dep_info is None:
package = self.linter.base_name
self.__ext_dep_info = result = {}
for importee, importers in self.stats['dependencies'].iteritems():
if not importee.startswith(package):
result[importee] = importers
return self.__ext_dep_info
def _internal_dependencies_info(self):
"""return cached internal dependencies information or build and
cache them
"""
if self.__int_dep_info is None:
package = self.linter.base_name
self.__int_dep_info = result = {}
for importee, importers in self.stats['dependencies'].iteritems():
if importee.startswith(package):
result[importee] = importers
return self.__int_dep_info
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ImportsChecker(linter))
| lgpl-3.0 |
morissette/devopsdays-hackathon-2016 | venv/lib/python2.7/site-packages/docutils/parsers/rst/languages/cs.py | 128 | 4857 | # $Id: cs.py 7119 2011-09-02 13:00:23Z milde $
# Author: Marek Blaha <mb@dat.cz>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Czech-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'pozor': 'attention',
u'caution (translation required)': 'caution', # jak rozlisit caution a warning?
u'code (translation required)': 'code',
u'nebezpe\u010D\u00ED': 'danger',
u'chyba': 'error',
u'rada': 'hint',
u'd\u016Fle\u017Eit\u00E9': 'important',
u'pozn\u00E1mka': 'note',
u'tip (translation required)': 'tip',
u'varov\u00E1n\u00ED': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
u't\u00E9ma': 'topic',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'odd\u00EDl': 'rubric',
u'moto': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'math (translation required)': 'math',
u'meta (translation required)': 'meta',
#'imagemap': 'imagemap',
u'image (translation required)': 'image', # obrazek
u'figure (translation required)': 'figure', # a tady?
u'include (translation required)': 'include',
u'raw (translation required)': 'raw',
u'replace (translation required)': 'replace',
u'unicode (translation required)': 'unicode',
u'datum': 'date',
u't\u0159\u00EDda': 'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'obsah': 'contents',
u'sectnum (translation required)': 'sectnum',
u'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'target-notes (translation required)': 'target-notes',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Czech name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'abbreviation (translation required)': 'abbreviation',
u'ab (translation required)': 'abbreviation',
u'acronym (translation required)': 'acronym',
u'ac (translation required)': 'acronym',
u'code (translation required)': 'code',
u'index (translation required)': 'index',
u'i (translation required)': 'index',
u'subscript (translation required)': 'subscript',
u'sub (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'sup (translation required)': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'title (translation required)': 'title-reference',
u't (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'pep (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'rfc (translation required)': 'rfc-reference',
u'emphasis (translation required)': 'emphasis',
u'strong (translation required)': 'strong',
u'literal (translation required)': 'literal',
u'math (translation required)': 'math',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',
u'uri (translation required)': 'uri-reference',
u'url (translation required)': 'uri-reference',
u'raw (translation required)': 'raw',}
"""Mapping of Czech role names to canonical role names for interpreted text.
"""
| gpl-3.0 |
orekyuu/intellij-community | plugins/hg4idea/testData/bin/hgext/convert/__init__.py | 92 | 15637 | # convert.py Foreign SCM converter
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''import revisions from foreign VCS repositories into Mercurial'''
import convcmd
import cvsps
import subversion
from mercurial import commands, templatekw
from mercurial.i18n import _
testedwith = 'internal'
# Commands definition was moved elsewhere to ease demandload job.
def convert(ui, src, dest=None, revmapfile=None, **opts):
"""convert a foreign SCM repository to a Mercurial one.
Accepted source formats [identifiers]:
- Mercurial [hg]
- CVS [cvs]
- Darcs [darcs]
- git [git]
- Subversion [svn]
- Monotone [mtn]
- GNU Arch [gnuarch]
- Bazaar [bzr]
- Perforce [p4]
Accepted destination formats [identifiers]:
- Mercurial [hg]
- Subversion [svn] (history on branches is not preserved)
If no revision is given, all revisions will be converted.
Otherwise, convert will only import up to the named revision
(given in a format understood by the source).
If no destination directory name is specified, it defaults to the
basename of the source with ``-hg`` appended. If the destination
repository doesn't exist, it will be created.
By default, all sources except Mercurial will use --branchsort.
Mercurial uses --sourcesort to preserve original revision numbers
order. Sort modes have the following effects:
--branchsort convert from parent to child revision when possible,
which means branches are usually converted one after
the other. It generates more compact repositories.
--datesort sort revisions by date. Converted repositories have
good-looking changelogs but are often an order of
magnitude larger than the same ones generated by
--branchsort.
--sourcesort try to preserve source revisions order, only
supported by Mercurial sources.
--closesort try to move closed revisions as close as possible
to parent branches, only supported by Mercurial
sources.
If ``REVMAP`` isn't given, it will be put in a default location
(``<dest>/.hg/shamap`` by default). The ``REVMAP`` is a simple
text file that maps each source commit ID to the destination ID
for that revision, like so::
<source ID> <destination ID>
If the file doesn't exist, it's automatically created. It's
updated on each commit copied, so :hg:`convert` can be interrupted
and can be run repeatedly to copy new commits.
The authormap is a simple text file that maps each source commit
author to a destination commit author. It is handy for source SCMs
that use unix logins to identify authors (e.g.: CVS). One line per
author mapping and the line format is::
source author = destination author
Empty lines and lines starting with a ``#`` are ignored.
The filemap is a file that allows filtering and remapping of files
and directories. Each line can contain one of the following
directives::
include path/to/file-or-dir
exclude path/to/file-or-dir
rename path/to/source path/to/destination
Comment lines start with ``#``. A specified path matches if it
equals the full relative name of a file or one of its parent
directories. The ``include`` or ``exclude`` directive with the
longest matching path applies, so line order does not matter.
The ``include`` directive causes a file, or all files under a
directory, to be included in the destination repository, and the
exclusion of all other files and directories not explicitly
included. The ``exclude`` directive causes files or directories to
be omitted. The ``rename`` directive renames a file or directory if
it is converted. To rename from a subdirectory into the root of
the repository, use ``.`` as the path to rename to.
The splicemap is a file that allows insertion of synthetic
history, letting you specify the parents of a revision. This is
useful if you want to e.g. give a Subversion merge two parents, or
graft two disconnected series of history together. Each entry
contains a key, followed by a space, followed by one or two
comma-separated values::
key parent1, parent2
The key is the revision ID in the source
revision control system whose parents should be modified (same
format as a key in .hg/shamap). The values are the revision IDs
(in either the source or destination revision control system) that
should be used as the new parents for that node. For example, if
you have merged "release-1.0" into "trunk", then you should
specify the revision on "trunk" as the first parent and the one on
the "release-1.0" branch as the second.
The branchmap is a file that allows you to rename a branch when it is
being brought in from whatever external repository. When used in
conjunction with a splicemap, it allows for a powerful combination
to help fix even the most badly mismanaged repositories and turn them
into nicely structured Mercurial repositories. The branchmap contains
lines of the form::
original_branch_name new_branch_name
where "original_branch_name" is the name of the branch in the
source repository, and "new_branch_name" is the name of the branch
is the destination repository. No whitespace is allowed in the
branch names. This can be used to (for instance) move code in one
repository from "default" to a named branch.
Mercurial Source
################
The Mercurial source recognizes the following configuration
options, which you can set on the command line with ``--config``:
:convert.hg.ignoreerrors: ignore integrity errors when reading.
Use it to fix Mercurial repositories with missing revlogs, by
converting from and to Mercurial. Default is False.
:convert.hg.saverev: store original revision ID in changeset
(forces target IDs to change). It takes a boolean argument and
defaults to False.
:convert.hg.startrev: convert start revision and its descendants.
It takes a hg revision identifier and defaults to 0.
CVS Source
##########
CVS source will use a sandbox (i.e. a checked-out copy) from CVS
to indicate the starting point of what will be converted. Direct
access to the repository files is not needed, unless of course the
repository is ``:local:``. The conversion uses the top level
directory in the sandbox to find the CVS repository, and then uses
CVS rlog commands to find files to convert. This means that unless
a filemap is given, all files under the starting directory will be
converted, and that any directory reorganization in the CVS
sandbox is ignored.
The following options can be used with ``--config``:
:convert.cvsps.cache: Set to False to disable remote log caching,
for testing and debugging purposes. Default is True.
:convert.cvsps.fuzz: Specify the maximum time (in seconds) that is
allowed between commits with identical user and log message in
a single changeset. When very large files were checked in as
part of a changeset then the default may not be long enough.
The default is 60.
:convert.cvsps.mergeto: Specify a regular expression to which
commit log messages are matched. If a match occurs, then the
conversion process will insert a dummy revision merging the
branch on which this log message occurs to the branch
indicated in the regex. Default is ``{{mergetobranch
([-\\w]+)}}``
:convert.cvsps.mergefrom: Specify a regular expression to which
commit log messages are matched. If a match occurs, then the
conversion process will add the most recent revision on the
branch indicated in the regex as the second parent of the
changeset. Default is ``{{mergefrombranch ([-\\w]+)}}``
:convert.localtimezone: use local time (as determined by the TZ
environment variable) for changeset date/times. The default
is False (use UTC).
:hooks.cvslog: Specify a Python function to be called at the end of
gathering the CVS log. The function is passed a list with the
log entries, and can modify the entries in-place, or add or
delete them.
:hooks.cvschangesets: Specify a Python function to be called after
the changesets are calculated from the CVS log. The
function is passed a list with the changeset entries, and can
modify the changesets in-place, or add or delete them.
An additional "debugcvsps" Mercurial command allows the builtin
changeset merging code to be run without doing a conversion. Its
parameters and output are similar to that of cvsps 2.1. Please see
the command help for more details.
Subversion Source
#################
Subversion source detects classical trunk/branches/tags layouts.
By default, the supplied ``svn://repo/path/`` source URL is
converted as a single branch. If ``svn://repo/path/trunk`` exists
it replaces the default branch. If ``svn://repo/path/branches``
exists, its subdirectories are listed as possible branches. If
``svn://repo/path/tags`` exists, it is looked for tags referencing
converted branches. Default ``trunk``, ``branches`` and ``tags``
values can be overridden with following options. Set them to paths
relative to the source URL, or leave them blank to disable auto
detection.
The following options can be set with ``--config``:
:convert.svn.branches: specify the directory containing branches.
The default is ``branches``.
:convert.svn.tags: specify the directory containing tags. The
default is ``tags``.
:convert.svn.trunk: specify the name of the trunk branch. The
default is ``trunk``.
:convert.localtimezone: use local time (as determined by the TZ
environment variable) for changeset date/times. The default
is False (use UTC).
Source history can be retrieved starting at a specific revision,
instead of being integrally converted. Only single branch
conversions are supported.
:convert.svn.startrev: specify start Subversion revision number.
The default is 0.
Perforce Source
###############
The Perforce (P4) importer can be given a p4 depot path or a
client specification as source. It will convert all files in the
source to a flat Mercurial repository, ignoring labels, branches
and integrations. Note that when a depot path is given you then
usually should specify a target directory, because otherwise the
target may be named ``...-hg``.
It is possible to limit the amount of source history to be
converted by specifying an initial Perforce revision:
:convert.p4.startrev: specify initial Perforce revision (a
Perforce changelist number).
Mercurial Destination
#####################
The following options are supported:
:convert.hg.clonebranches: dispatch source branches in separate
clones. The default is False.
:convert.hg.tagsbranch: branch name for tag revisions, defaults to
``default``.
:convert.hg.usebranchnames: preserve branch names. The default is
True.
"""
return convcmd.convert(ui, src, dest, revmapfile, **opts)
def debugsvnlog(ui, **opts):
return subversion.debugsvnlog(ui, **opts)
def debugcvsps(ui, *args, **opts):
'''create changeset information from CVS
This command is intended as a debugging tool for the CVS to
Mercurial converter, and can be used as a direct replacement for
cvsps.
Hg debugcvsps reads the CVS rlog for current directory (or any
named directory) in the CVS repository, and converts the log to a
series of changesets based on matching commit log entries and
dates.'''
return cvsps.debugcvsps(ui, *args, **opts)
commands.norepo += " convert debugsvnlog debugcvsps"
cmdtable = {
"convert":
(convert,
[('', 'authors', '',
_('username mapping filename (DEPRECATED, use --authormap instead)'),
_('FILE')),
('s', 'source-type', '',
_('source repository type'), _('TYPE')),
('d', 'dest-type', '',
_('destination repository type'), _('TYPE')),
('r', 'rev', '',
_('import up to target revision REV'), _('REV')),
('A', 'authormap', '',
_('remap usernames using this file'), _('FILE')),
('', 'filemap', '',
_('remap file names using contents of file'), _('FILE')),
('', 'splicemap', '',
_('splice synthesized history into place'), _('FILE')),
('', 'branchmap', '',
_('change branch names while converting'), _('FILE')),
('', 'branchsort', None, _('try to sort changesets by branches')),
('', 'datesort', None, _('try to sort changesets by date')),
('', 'sourcesort', None, _('preserve source changesets order')),
('', 'closesort', None, _('try to reorder closed revisions'))],
_('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')),
"debugsvnlog":
(debugsvnlog,
[],
'hg debugsvnlog'),
"debugcvsps":
(debugcvsps,
[
# Main options shared with cvsps-2.1
('b', 'branches', [], _('only return changes on specified branches')),
('p', 'prefix', '', _('prefix to remove from file names')),
('r', 'revisions', [],
_('only return changes after or between specified tags')),
('u', 'update-cache', None, _("update cvs log cache")),
('x', 'new-cache', None, _("create new cvs log cache")),
('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
('', 'root', '', _('specify cvsroot')),
# Options specific to builtin cvsps
('', 'parents', '', _('show parent changesets')),
('', 'ancestors', '',
_('show current changeset in ancestor branches')),
# Options that are ignored for compatibility with cvsps-2.1
('A', 'cvs-direct', None, _('ignored for compatibility')),
],
_('hg debugcvsps [OPTION]... [PATH]...')),
}
def kwconverted(ctx, name):
rev = ctx.extra().get('convert_revision', '')
if rev.startswith('svn:'):
if name == 'svnrev':
return str(subversion.revsplit(rev)[2])
elif name == 'svnpath':
return subversion.revsplit(rev)[1]
elif name == 'svnuuid':
return subversion.revsplit(rev)[0]
return rev
def kwsvnrev(repo, ctx, **args):
""":svnrev: String. Converted subversion revision number."""
return kwconverted(ctx, 'svnrev')
def kwsvnpath(repo, ctx, **args):
""":svnpath: String. Converted subversion revision project path."""
return kwconverted(ctx, 'svnpath')
def kwsvnuuid(repo, ctx, **args):
""":svnuuid: String. Converted subversion revision repository identifier."""
return kwconverted(ctx, 'svnuuid')
def extsetup(ui):
templatekw.keywords['svnrev'] = kwsvnrev
templatekw.keywords['svnpath'] = kwsvnpath
templatekw.keywords['svnuuid'] = kwsvnuuid
# tell hggettext to extract docstrings from these functions:
i18nfunctions = [kwsvnrev, kwsvnpath, kwsvnuuid]
| apache-2.0 |
dnevels/heekscnc | pycnc/wxPocketDlg.py | 25 | 13424 | import wx
import HeeksCNC
from Pocket import Pocket
from wxHDialog import HDialog
from wxPictureWindow import PictureWindow
from wxNiceTextCtrl import LengthCtrl
from wxNiceTextCtrl import DoubleCtrl
from wxNiceTextCtrl import GeomCtrl
from consts import *
ID_SKETCHES = 100
ID_STEP_OVER = 101
ID_MATERIAL_ALLOWANCE = 102
ID_STARTING_PLACE = 103
ID_KEEP_TOOL_DOWN = 104
ID_USE_ZIG_ZAG = 105
ID_ZIG_ANGLE = 106
ID_ABS_MODE = 107
ID_CLEARANCE_HEIGHT = 108
ID_RAPID_SAFETY_SPACE = 109
ID_START_DEPTH = 110
ID_FINAL_DEPTH = 111
ID_STEP_DOWN = 112
ID_HFEED = 113
ID_VFEED = 114
ID_SPINDLE_SPEED = 115
ID_COMMENT = 116
ID_ACTIVE = 117
ID_TITLE = 118
ID_TOOL = 119
ID_DESCENT_STRATGEY = 120
ID_PICK_SKETCHES = 121
class PocketDlg(HDialog):
def __init__(self, pocket):
HDialog.__init__(self, "Pocket Operation")
self.pocket = pocket
self.general_bitmap = None
self.step_over_bitmap = None
self.material_allowance_bitmap = None
self.starting_center_bitmap = None
self.starting_boundary_bitmap = None
self.tool_down_bitmap = None
self.not_tool_down_bitmap = None
self.use_zig_zag_bitmap = None
self.zig_angle_bitmap = None
self.clearance_height_bitmap = None
self.rapid_down_to_bitmap = None
self.start_depth_bitmap = None
self.final_depth_bitmap = None
self.step_down_bitmap = None
self.entry_move_bitmap = None
self.ignore_event_functions = True
sizerMain = wx.BoxSizer(wx.HORIZONTAL)
#add left sizer
sizerLeft = wx.BoxSizer(wx.VERTICAL)
sizerMain.Add( sizerLeft, 0, wx.ALL, self.control_border )
# add right sizer
sizerRight = wx.BoxSizer(wx.VERTICAL)
sizerMain.Add( sizerRight, 0, wx.ALL, self.control_border )
# add picture to right side
self.picture = PictureWindow(self, wx.Size(300, 200))
pictureSizer = wx.BoxSizer(wx.VERTICAL)
pictureSizer.Add(self.picture, 1, wx.GROW)
sizerRight.Add( pictureSizer, 0, wx.ALL, self.control_border )
# add some of the controls to the right side
self.cmbAbsMode = wx.ComboBox(self, ID_ABS_MODE, choices = ["absolute", "incremental"])
self.AddLabelAndControl(sizerRight, "absolute mode", self.cmbAbsMode)
self.lgthHFeed = LengthCtrl(self, ID_HFEED)
self.AddLabelAndControl(sizerRight, "horizontal feedrate", self.lgthHFeed)
self.lgthVFeed = LengthCtrl(self, ID_VFEED)
self.AddLabelAndControl(sizerRight, "vertical feedrate", self.lgthVFeed)
self.dblSpindleSpeed = DoubleCtrl(self, ID_SPINDLE_SPEED)
self.AddLabelAndControl(sizerRight, "spindle speed", self.dblSpindleSpeed)
self.txtComment = wx.TextCtrl(self, ID_COMMENT)
self.AddLabelAndControl(sizerRight, "comment", self.txtComment)
self.chkActive = wx.CheckBox( self, ID_ACTIVE, "active" )
sizerRight.Add( self.chkActive, 0, wx.ALL, self.control_border )
self.txtTitle = wx.TextCtrl(self, ID_TITLE)
self.AddLabelAndControl(sizerRight, "title", self.txtTitle)
# add OK and Cancel to right side
sizerOKCancel = self.MakeOkAndCancel(wx.HORIZONTAL)
sizerRight.Add( sizerOKCancel, 0, wx.ALL + wx.ALIGN_RIGHT + wx.ALIGN_BOTTOM, self.control_border )
# add all the controls to the left side
self.idsSketches = GeomCtrl(self, ID_SKETCHES)
self.AddLabelAndControl(sizerLeft, "sketches", self.idsSketches)
btn_pick_sketches = wx.Button(self, ID_PICK_SKETCHES)
self.AddLabelAndControl(sizerLeft, "pick sketches", btn_pick_sketches)
self.lgthStepOver = LengthCtrl(self, ID_STEP_OVER)
self.AddLabelAndControl(sizerLeft, "step over", self.lgthStepOver)
self.lgthMaterialAllowance = LengthCtrl(self, ID_MATERIAL_ALLOWANCE)
self.AddLabelAndControl(sizerLeft, "material allowance", self.lgthMaterialAllowance)
self.cmbStartingPlace = wx.ComboBox(self, ID_STARTING_PLACE, choices = ["boundary", "center"])
self.AddLabelAndControl(sizerLeft, "starting place", self.cmbStartingPlace)
self.cmbEntryMove = wx.ComboBox(self, ID_DESCENT_STRATGEY, choices = ["Plunge", "Ramp", "Helical"])
self.AddLabelAndControl(sizerLeft, "entry move", self.cmbEntryMove)
self.tools_for_combo = HeeksCNC.program.tools.FindAllTools()
tool_choices = []
for tool in self.tools_for_combo:
tool_choices.append(tool[1])
self.cmbTool = wx.ComboBox(self, ID_TOOL, choices = tool_choices)
self.AddLabelAndControl(sizerLeft, "Tool", self.cmbTool)
self.chkUseZigZag = wx.CheckBox( self, ID_USE_ZIG_ZAG, "use zig zag" )
sizerLeft.Add( self.chkUseZigZag, 0, wx.ALL, self.control_border )
self.chkKeepToolDown = wx.CheckBox( self, ID_KEEP_TOOL_DOWN, "keep tool down" )
sizerLeft.Add( self.chkKeepToolDown, 0, wx.ALL, self.control_border )
self.dblZigAngle = DoubleCtrl(self, ID_ZIG_ANGLE)
self.AddLabelAndControl(sizerLeft, "zig zag angle", self.dblZigAngle)
self.lgthClearanceHeight = LengthCtrl(self, ID_CLEARANCE_HEIGHT)
self.AddLabelAndControl(sizerLeft, "clearance height", self.lgthClearanceHeight)
self.lgthRapidDownToHeight = LengthCtrl(self, ID_RAPID_SAFETY_SPACE)
self.AddLabelAndControl(sizerLeft, "rapid safety space", self.lgthRapidDownToHeight)
self.lgthStartDepth = LengthCtrl(self, ID_START_DEPTH)
self.AddLabelAndControl(sizerLeft, "start depth", self.lgthStartDepth)
self.lgthFinalDepth = LengthCtrl(self, ID_FINAL_DEPTH)
self.AddLabelAndControl(sizerLeft, "final depth", self.lgthFinalDepth)
self.lgthStepDown = LengthCtrl(self, ID_STEP_DOWN)
self.AddLabelAndControl(sizerLeft, "step down", self.lgthStepDown)
self.SetFromData()
self.SetSizer( sizerMain )
sizerMain.SetSizeHints(self)
sizerMain.Fit(self)
self.idsSketches.SetFocus()
self.ignore_event_functions = False
self.SetPicture()
self.Bind(wx.EVT_CHILD_FOCUS, self.OnChildFocus)
self.Bind(wx.EVT_COMBOBOX, self.OnComboStartingPlace, self.cmbStartingPlace)
self.Bind(wx.EVT_CHECKBOX, self.OnCheckKeepToolDown, self.chkKeepToolDown)
self.Bind(wx.EVT_CHECKBOX, self.OnCheckUseZigZag, self.chkUseZigZag)
self.Bind(wx.EVT_COMBOBOX, self.OnComboTool, self.cmbTool)
self.Bind(wx.EVT_BUTTON, self.OnPickSketches, btn_pick_sketches)
def OnChildFocus(self, event):
if self.ignore_event_functions: return
if event.GetWindow():
self.SetPicture()
def OnComboStartingPlace(self, event):
if self.ignore_event_functions: return
self.SetPicture()
def OnCheckKeepToolDown(self, event):
if self.ignore_event_functions: return
self.SetPicture()
def OnCheckUseZigZag(self, event):
if self.ignore_event_functions: return
self.SetPicture()
def OnComboTool(self, event):
pass
#if self.ignore_event_functions: return
#self.SetPicture()
def OnPickSketches(self, event):
if HeeksCNC.cad.hide_window_on_pick_sketches():
self.Show(False)
sketches = HeeksCNC.cad.pick_sketches()
if HeeksCNC.cad.hide_window_on_pick_sketches():
self.Show()
self.idsSketches.SetFromGeomList(sketches)
def GetData(self):
if self.ignore_event_functions: return
self.ignore_event_functions = True
self.pocket.sketches = self.idsSketches.GetGeomList()
self.pocket.step_over = self.lgthStepOver.GetValue()
self.pocket.material_allowance = self.lgthMaterialAllowance.GetValue()
self.pocket.starting_place = (self.cmbStartingPlace.GetValue() == "center")
if self.cmbEntryMove.GetValue() == "Plunge": self.pocket.entry_move = ENTRY_STYLE_PLUNGE
elif self.cmbEntryMove.GetValue() == "Ramp": self.pocket.entry_move = ENTRY_STYLE_RAMP
elif self.cmbEntryMove.GetValue() == "Helical": self.pocket.entry_move = ENTRY_STYLE_HELICAL
self.pocket.keep_tool_down_if_poss = self.chkKeepToolDown.GetValue()
self.pocket.use_zig_zag = self.chkUseZigZag.GetValue()
if self.pocket.use_zig_zag: self.pocket.zig_angle = self.dblZigAngle.GetValue()
if self.cmbAbsMode.GetValue() == "incremental": self.pocket.abs_mode = ABS_MODE_INCREMENTAL
else: self.pocket.abs_mode = ABS_MODE_ABSOLUTE
self.pocket.clearance_height = self.lgthClearanceHeight.GetValue()
self.pocket.rapid_safety_space = self.lgthRapidDownToHeight.GetValue()
self.pocket.start_depth = self.lgthStartDepth.GetValue()
self.pocket.final_depth = self.lgthFinalDepth.GetValue()
self.pocket.step_down = self.lgthStepDown.GetValue()
self.pocket.horizontal_feed_rate = self.lgthHFeed.GetValue()
self.pocket.vertical_feed_rate = self.lgthVFeed.GetValue()
self.pocket.spindle_speed = self.dblSpindleSpeed.GetValue()
self.pocket.comment = self.txtComment.GetValue()
self.pocket.active = self.chkActive.GetValue()
# get the tool number
self.pocket.tool_number = 0
if self.cmbTool.GetSelection() >= 0:
self.pocket.tool_number = self.tools_for_combo[self.cmbTool.GetSelection()][0]
self.pocket.title = self.txtTitle.GetValue()
self.ignore_event_functions = False
def SetFromData(self):
self.ignore_event_functions = True
self.idsSketches.SetFromGeomList(self.pocket.sketches)
self.lgthStepOver.SetValue(self.pocket.step_over)
self.lgthMaterialAllowance.SetValue(self.pocket.material_allowance)
self.cmbStartingPlace.SetValue("center" if self.pocket.starting_place else "boundary")
if self.pocket.entry_move == ENTRY_STYLE_PLUNGE: self.cmbEntryMove.SetValue("Plunge")
elif self.pocket.entry_move == ENTRY_STYLE_RAMP: self.cmbEntryMove.SetValue("Ramp")
elif self.pocket.entry_move == ENTRY_STYLE_HELICAL: self.cmbEntryMove.SetValue("Helical")
# set the tool combo to the correct tool
for i in range(0, len(self.tools_for_combo)):
if self.tools_for_combo[i][0] == self.pocket.tool_number:
self.cmbTool.SetSelection(i)
break
self.chkKeepToolDown.SetValue(self.pocket.keep_tool_down_if_poss)
self.chkUseZigZag.SetValue(self.pocket.use_zig_zag)
if self.pocket.use_zig_zag: self.dblZigAngle.SetValue(self.pocket.zig_angle)
if self.pocket.abs_mode == ABS_MODE_ABSOLUTE: self.cmbAbsMode.SetValue("absolute")
else: self.cmbAbsMode.SetValue("incremental")
self.lgthClearanceHeight.SetValue(self.pocket.clearance_height)
self.lgthRapidDownToHeight.SetValue(self.pocket.rapid_safety_space)
self.lgthStartDepth.SetValue(self.pocket.start_depth)
self.lgthFinalDepth.SetValue(self.pocket.final_depth)
self.lgthStepDown.SetValue(self.pocket.step_down)
self.lgthHFeed.SetValue(self.pocket.horizontal_feed_rate)
self.lgthVFeed.SetValue(self.pocket.vertical_feed_rate)
self.dblSpindleSpeed.SetValue(self.pocket.spindle_speed)
self.txtComment.SetValue(self.pocket.comment)
self.chkActive.SetValue(self.pocket.active)
self.txtTitle.SetValue(self.pocket.title)
self.ignore_event_functions = False
def SetPictureBitmap(self, bitmap, name):
self.picture.SetPictureBitmap(bitmap, HeeksCNC.heekscnc_path + "/bitmaps/pocket/" + name + ".png", wx.BITMAP_TYPE_PNG)
def SetPicture(self):
w = self.FindFocus()
if w == self.lgthStepOver: self.SetPictureBitmap(self.step_over_bitmap, "step over")
elif w == self.lgthMaterialAllowance: self.SetPictureBitmap(self.material_allowance_bitmap, "material allowance")
elif w == self.cmbStartingPlace:
if self.cmbStartingPlace.GetValue() == "boundary": self.SetPictureBitmap(self.starting_boundary_bitmap, "starting boundary")
else: self.SetPictureBitmap(self.starting_center_bitmap, "starting center")
elif w == self.chkKeepToolDown:
if self.chkKeepToolDown.IsChecked(): self.SetPictureBitmap(self.tool_down_bitmap, "tool down")
else: self.SetPictureBitmap(self.not_tool_down_bitmap, "not tool down")
elif w == self.chkUseZigZag:
if self.chkUseZigZag.IsChecked(): self.SetPictureBitmap(self.use_zig_zag_bitmap, "use zig zag")
else: self.SetPictureBitmap(self.general_bitmap, "general")
elif w == self.dblZigAngle: self.SetPictureBitmap(self.zig_angle_bitmap, "zig angle")
elif w == self.lgthClearanceHeight: self.SetPictureBitmap(self.clearance_height_bitmap, "clearance height")
elif w == self.lgthRapidDownToHeight: self.SetPictureBitmap(self.rapid_down_to_bitmap, "rapid down height")
elif w == self.lgthStartDepth: self.SetPictureBitmap(self.start_depth_bitmap, "start depth")
elif w == self.lgthFinalDepth: self.SetPictureBitmap(self.final_depth_bitmap, "final depth")
elif w == self.lgthStepDown: self.SetPictureBitmap(self.step_down_bitmap, "step down")
else: self.SetPictureBitmap(self.general_bitmap, "general")
| bsd-3-clause |
EmuxEvans/Wox | PythonHome/Lib/site-packages/requests/packages/urllib3/packages/ordered_dict.py | 1093 | 8936 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
realsaiko/odoo | addons/l10n_fr_hr_payroll/report/__init__.py | 424 | 1091 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fiche_paye
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ericleasemorgan/EEBO-TCP-Workset-Browser | bin/make-index.py | 1 | 2042 | #!/usr/bin/env python
# make-index.py - read EEBO TEI files and output word frequencies as well as a "book"
# Eric Lease Morgan <emorgan@nd.edu>
# June 8, 2015 - first investigations; bases on HathiTrust work
# configure
STOPWORDS = './etc/stopwords-en.txt'
# require
import operator
import re
import sys
import libxml2
# sanity check
if ( len( sys.argv ) != 2 ) | ( sys.stdin.isatty() ) :
print "Usage: cat <xml> |", sys.argv[ 0 ], '<-b|-d>'
quit()
# get input; sanity check
flag = sys.argv[ 1 ]
# build a book?
if flag == '-b' : build_book = 1
elif flag == '-d' : build_book = 0
else :
print "Usage: cat <xml> |", sys.argv[ 0 ], '<-b|-d>'
quit()
# create an xpath parser with an xml file
xml = sys.stdin.read()
tei = libxml2.parseMemory( xml, len( xml ) )
context = tei.xpathNewContext()
context.xpathRegisterNs( 't', 'http://www.tei-c.org/ns/1.0' )
# parse
title = context.xpathEval( '/t:TEI/t:teiHeader/t:fileDesc/t:titleStmt/t:title/text()' )[ 0 ]
text = context.xpathEval( '/t:TEI/t:text' )[ 0 ].content
# normalize the text
text = re.sub( '\s+', ' ', text )
text = text.lower()
text = text.split()
# initialize output
words = {}
book = str( title ) + '\n'
# create a list of (English) stopwords
stopwords = {}
with open ( STOPWORDS ) as DATABASE :
for record in DATABASE : stopwords[ record.rstrip() ] = 1
# process each word in the text
for word in text :
# normalize some more; probably not 100% accurate
word = word.rstrip( '?:!.,;)' )
word = word.lstrip( '?:!.,;(' )
# filter out unwanted words
if len( word ) < 2 : continue
if re.match( '\d|\W', word ) : continue
if word in stopwords : continue
# build text file
if build_book : book = book + word + ' '
# or update the dictionary
else : words[ word ] = words.get( word, 0 ) + 1
# output book, or
if build_book : print book
# output the dictionary
else :
for tuple in sorted( words.items(), key=operator.itemgetter( 1 ), reverse=True ) :
print( tuple[ 0 ] + '\t' + str( tuple[ 1 ] ) )
# done
quit()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.