repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
rcgee/oq-hazardlib | openquake/hazardlib/tests/geo/surface/gc2_test.py | 1 | 12206 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# The Hazard Library
# Copyright (C) 2013-2016 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Generalized coordinate systems require an additional level of testing under
a variety of fault conditions - we separate these out away from the main
fault surface testing modules
"""
import os
import unittest
import numpy
from openquake.hazardlib.geo.surface.multi import MultiSurface
from openquake.hazardlib.geo import Mesh, Point, Line, PlanarSurface,\
SimpleFaultSurface
from openquake.hazardlib.geo.surface.base import (downsample_mesh,
downsample_trace)
PNT1 = Point(-64.78365, -0.45236, 0.0)
PNT2 = Point(-64.80164, -0.45236, 0.0)
PNT3 = Point(-64.90498, -0.36564, 0.0)
PNT4 = Point(-65.00000, -0.16188, 0.0)
PNT5 = Point(-65.00000, 0.00000, 0.0)
AS_ARRAY = numpy.array([[pnt.longitude, pnt.latitude, pnt.depth]
for pnt in [PNT1, PNT2, PNT3, PNT4, PNT5]])
class CartesianTestingMultiSurface(MultiSurface):
"""
This test surface is used to verify the values given by Spudich & Chiou
in their report. Here, the fault is built directly from the cartesian
points so we over-ride the call to the function to render the coordinates
to cartesian
"""
def _get_cartesian_edge_set(self):
return None
def _get_gc2_coordinates_for_rupture(self, edge_sets):
pass
def _setup_peer_test_bending_fault_config():
"""
The GC2 tests will be based on variations of the PEER bending fault
test case:
(Fault is dipping east north east
Point 5 (-65.0, 0.0, 0.0)
o
|
|
|
o Point 4 (-65.0, -0.16188, 0)
\
\
\
\
\
o Point 3 (-64.90498, -0.36564, 0.0)
\__
\__
\__
\__
\__Point 2 (-64.80164, -0.45236, 0.0)
\o---o Point 1 (-64.78365, -0.45236, 0.0)
"""
# Build separate faults
# Get down-dip points - dipping east-noth-east
strike1 = PNT1.azimuth(PNT2)
dipdir1 = (strike1 + 90.) % 360.0
strike2 = PNT2.azimuth(PNT3)
dipdir2 = (strike2 + 90.) % 360.0
strike3 = PNT3.azimuth(PNT4)
dipdir3 = (strike3 + 90.) % 360.0
strike4 = PNT4.azimuth(PNT5)
dipdir4 = (strike4 + 90.) % 360.0
global_strike = PNT1.azimuth(PNT5)
global_dipdir = (global_strike + 90.) % 360.0
# Get lower trace
usd = 0.0
lsd = 12.0
dip = 60.0
as_length = lsd / numpy.tan(numpy.radians(dip))
PNT1b = PNT1.point_at(as_length, lsd, global_dipdir)
PNT2b = PNT2.point_at(as_length, lsd, global_dipdir)
PNT3b = PNT3.point_at(as_length, lsd, global_dipdir)
PNT4b = PNT4.point_at(as_length, lsd, global_dipdir)
PNT5b = PNT5.point_at(as_length, lsd, global_dipdir)
# As simple fault dipping east
mesh_spacing = 0.5
simple_fault1 = SimpleFaultSurface.from_fault_data(
Line([PNT1, PNT2, PNT3, PNT4, PNT5]), usd, lsd, dip, mesh_spacing)
# As a set of planes describing a concordant "Stirling fault"
stirling_planes = [
PlanarSurface.from_corner_points(1.0, PNT1, PNT2, PNT2b, PNT1b),
PlanarSurface.from_corner_points(1.0, PNT2, PNT3, PNT3b, PNT2b),
PlanarSurface.from_corner_points(1.0, PNT3, PNT4, PNT4b, PNT3b),
PlanarSurface.from_corner_points(1.0, PNT4, PNT5, PNT5b, PNT4b)
]
stirling_fault1 = MultiSurface(stirling_planes)
# As a set of planes describing a concordant "Frankel Fault"
# In the Frankel fault each segment is projected to the local dip direction
dipdir2b = (dipdir2 + 180.) % 360.0
frankel_planes = [
PlanarSurface.from_corner_points(
1.0, PNT1, PNT2,
PNT2.point_at(as_length, lsd, dipdir1),
PNT1.point_at(as_length, lsd, dipdir1)
),
PlanarSurface.from_corner_points(
1.0, PNT2, PNT3,
PNT3.point_at(as_length, lsd, dipdir2),
PNT2.point_at(as_length, lsd, dipdir2)
),
PlanarSurface.from_corner_points(
1.0, PNT3, PNT4,
PNT4.point_at(as_length, lsd, dipdir3),
PNT3.point_at(as_length, lsd, dipdir3)
),
PlanarSurface.from_corner_points(
1.0, PNT4, PNT5,
PNT5.point_at(as_length, lsd, dipdir4),
PNT4.point_at(as_length, lsd, dipdir4)
)
]
frankel_fault1 = MultiSurface(frankel_planes)
# Test the case of a discordant Frankel plane
# Swapping the strike of the second segment to change the dip direction
# Also increasing the dip from 60 degrees to 75 degrees
as_length_alt = lsd / numpy.tan(numpy.radians(75.0))
frankel_discordant = [
PlanarSurface.from_corner_points(
1.0, PNT1, PNT2,
PNT2.point_at(as_length, lsd, dipdir1),
PNT1.point_at(as_length, lsd, dipdir1)
),
PlanarSurface.from_corner_points(
1.0, PNT3, PNT2,
PNT2.point_at(as_length_alt, lsd, dipdir2b),
PNT3.point_at(as_length_alt, lsd, dipdir2b)
),
PlanarSurface.from_corner_points(
1.0, PNT3, PNT4,
PNT4.point_at(as_length, lsd, dipdir3),
PNT3.point_at(as_length, lsd, dipdir3)
),
PlanarSurface.from_corner_points(
1.0, PNT4, PNT5,
PNT5.point_at(as_length, lsd, dipdir4),
PNT4.point_at(as_length, lsd, dipdir4)
)
]
frankel_fault2 = MultiSurface(frankel_discordant)
return simple_fault1, stirling_fault1, frankel_fault1, frankel_fault2
SFLT1, STIRFLT1, FRANK1, FRANK2 = _setup_peer_test_bending_fault_config()
class TraceDownSamplingTestCase(unittest.TestCase):
"""
Tests the downsampling algorithm for the Rectangular Mesh test case
"""
def test_downsample_trace(self):
# Use the simple fault case with a tolerance of 1.0 degree
downsampled_trace = downsample_trace(SFLT1.mesh, 1.0)
# Top edge of downsampled mesh should correspond to the five
# points of the simple fault
# Check longitudes
numpy.testing.assert_array_almost_equal(downsampled_trace[:, 0],
AS_ARRAY[:, 0],
5)
# Check latitude
numpy.testing.assert_array_almost_equal(downsampled_trace[:, 1],
AS_ARRAY[:, 1],
5)
# Check depths
numpy.testing.assert_array_almost_equal(downsampled_trace[:, 2],
AS_ARRAY[:, 2],
5)
class MeshDownSamplingTestCase(unittest.TestCase):
"""
Tests the downsample algorithm for the mesh
"""
def test_downsample_mesh(self):
# Use the simple fault case with a tolerance of 1.0 degree
numpy.testing.assert_array_almost_equal(
downsample_mesh(SFLT1.mesh, 1.0).lons[0, :],
AS_ARRAY[:, 0],
5)
numpy.testing.assert_array_almost_equal(
downsample_mesh(SFLT1.mesh, 1.0).lats[0, :],
AS_ARRAY[:, 1],
5)
numpy.testing.assert_array_almost_equal(
downsample_mesh(SFLT1.mesh, 1.0).depths[0, :],
AS_ARRAY[:, 2],
5)
class GC2SetupTestCase(unittest.TestCase):
"""
Tests the basic setup of the GC2 system for a fault by verifying the
against the formulation example in the Spudich & Chiou (2015) report
"""
def setUp(self):
p1 = numpy.array([2., 2., 0.])
p2 = numpy.array([3.00, 3.732, 0.])
p3 = numpy.array([6.654, 3.328, 0.])
p4 = numpy.array([7.939, 4.860, 0.])
p5 = numpy.array([4.000, 4.165, 0.])
p6 = numpy.array([0.0, 0.0, 0.])
p7 = numpy.array([1.0, 0.0, 0.])
p8 = numpy.array([1.0, 1.0, 0.])
p9 = numpy.array([2.0, 1.0, 0.])
# Defines three traces
trace1 = numpy.vstack([p1, p2])
trace2 = numpy.vstack([p3, p4, p5])
trace3 = numpy.vstack([p6, p7, p8, p9])
self.model = CartesianTestingMultiSurface(STIRFLT1.surfaces)
self.model.cartesian_edges = [trace1, trace2, trace3]
self.model.cartesian_endpoints = [numpy.vstack([p1, p2]),
numpy.vstack([p3, p5]),
numpy.vstack([p6, p9])]
def test_spudich_chiou_calculations(self):
"""
Verify that the core unit vectors are being correctly calculated
and interpreted in the Spudich and Chiou test case - presented in
page 6 of Spudich & Chiou
"""
self.model._setup_gc2_framework()
# Test GC2 configuration params
numpy.testing.assert_array_almost_equal(
self.model.gc2_config["b_hat"], numpy.array([0.948, 0.318]), 3)
numpy.testing.assert_array_almost_equal(
self.model.gc2_config["a_hat"], numpy.array([0.894, 0.447]), 3)
numpy.testing.assert_array_almost_equal(
self.model.gc2_config["ejs"],
numpy.array([1.669, -1.999, 2.236]),
3)
self.assertAlmostEqual(self.model.gc2_config["e_tot"], 1.9059, 4)
numpy.testing.assert_array_almost_equal(self.model.p0,
numpy.array([0., 0.]))
CONCORDANT_FILE = os.path.join(os.path.dirname(__file__),
"GC2Test_Concordant.csv")
DISCORDANT_FILE = os.path.join(os.path.dirname(__file__),
"GC2Test_Discordant.csv")
class ConcordantSurfaceTestCase(unittest.TestCase):
"""
Tests the verification of the GC2 module for the Concordant Test case
"""
def setUp(self):
self.data = numpy.genfromtxt(CONCORDANT_FILE, delimiter=",")
self.mesh = Mesh(self.data[:, 0], self.data[:, 1], self.data[:, 2])
self.model = MultiSurface(FRANK1.surfaces)
def test_gc2_coords(self):
"""
Verifies the GC2U, GC2T coordinate for the concordant case
"""
expected_t = self.data[:, 3]
expected_u = self.data[:, 4]
gc2t, gc2u = self.model.get_generalised_coordinates(self.mesh.lons,
self.mesh.lats)
numpy.testing.assert_array_almost_equal(expected_t, gc2t)
numpy.testing.assert_array_almost_equal(expected_u, gc2u)
def test_gc2_rx(self):
"""
Verifies Rx for the concordant case
"""
expected_rx = self.data[:, 5]
r_x = self.model.get_rx_distance(self.mesh)
numpy.testing.assert_array_almost_equal(expected_rx, r_x)
def test_gc2_ry0(self):
"""
Verifies Ry0 for the concordant case
"""
expected_ry0 = self.data[:, 6]
ry0 = self.model.get_ry0_distance(self.mesh)
numpy.testing.assert_array_almost_equal(expected_ry0, ry0)
class DiscordantSurfaceTestCase(unittest.TestCase):
"""
Tests the verification of the GC2 module for the Concordant Test case
"""
def setUp(self):
self.data = numpy.genfromtxt(DISCORDANT_FILE, delimiter=",")
self.mesh = Mesh(self.data[:, 0], self.data[:, 1], self.data[:, 2])
self.model = MultiSurface(FRANK2.surfaces)
| agpl-3.0 |
udrg/crazyflie-clients-python | lib/cfclient/ui/__init__.py | 32 | 1342 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Create a plugin helper that is passed to all the tabs and toolboxes for easy #
access to objects that are needed.
"""
__author__ = 'Bitcraze AB'
__all__ = []
from cfclient.ui.pluginhelper import PluginHelper
pluginhelper = PluginHelper()
| gpl-2.0 |
asimshankar/tensorflow | tensorflow/python/client/virtual_gpu_test.py | 6 | 9651 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multiple virtual GPU support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class VirtualGpuTestUtil(object):
def __init__(self,
dim=1000,
num_ops=100,
virtual_devices_per_gpu=None,
device_probabilities=None):
self._dim = dim
self._num_ops = num_ops
if virtual_devices_per_gpu is None:
self._virtual_devices_per_gpu = [3]
else:
self._virtual_devices_per_gpu = virtual_devices_per_gpu
self._visible_device_list = [
i for i in range(len(self._virtual_devices_per_gpu))
]
gpu_devices = [
('/gpu:' + str(i)) for i in range(sum(self._virtual_devices_per_gpu))
]
self.devices = ['/cpu:0'] + gpu_devices
self._num_devices = len(self.devices)
# Each virtual device gets 2GB memory.
self._mem_limits_mb = [
([1 << 11] * i) for i in self._virtual_devices_per_gpu
]
self.config = self._GetSessionConfig()
if device_probabilities is not None:
self._device_probabilities = list(device_probabilities) # Deep copy
for i in range(1, self._num_devices):
self._device_probabilities[i] += self._device_probabilities[i - 1]
else:
# Each device gets same probability to be assigned an operation.
step = 1.0 / self._num_devices
self._device_probabilities = [
(x + 1) * step for x in range(self._num_devices)
]
# To prevent rounding error causing problems.
self._device_probabilities[self._num_devices - 1] = 1.1
logging.info('dim: %d', self._dim)
logging.info('num_ops: %d', self._num_ops)
logging.info('visible_device_list: %s', str(self._visible_device_list))
logging.info('virtual_devices_per_gpu: %s',
str(self._virtual_devices_per_gpu))
logging.info('mem_limits: %s', str(self._mem_limits_mb))
logging.info('devices: %s', str(self.devices))
logging.info('config: %s', text_format.MessageToString(self.config))
logging.info('device_probabilities: %s', str(self._device_probabilities))
# Creates virtual GPU devices
def _GetSessionConfig(self):
virtual_device_gpu_options = config_pb2.GPUOptions(
visible_device_list=','.join(str(d) for d in self._visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(virtual_devices=[
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=i) for i in self._mem_limits_mb
]))
return config_pb2.ConfigProto(gpu_options=virtual_device_gpu_options)
# Generates a list of 3-tuples, each tuple contains the source and destination
# device index for a binary operation like 'add', like:
# (src_devcie_1, src_device_2, dst_device)
def _GenerateOperationPlacement(self):
result = []
for unused_i in range(self._num_ops):
op_device = ()
for unused_j in range(3):
random_num = random.random()
for device_index in range(self._num_devices):
if self._device_probabilities[device_index] > random_num:
op_device += (device_index,)
break
result.append(op_device)
return result
# Logs part of the matrix for debugging purposes.
def _LogMatrix(self, mat, dim):
logging.info('---- printing the first 10*10 submatrix ----')
for i in range(min(10, dim)):
row = ''
for j in range(min(10, dim)):
row += ' ' + str(mat[i][j])
logging.info(row)
# Runs a list of 'add' operations where each operation satisfies the device
# placement constraints in `op_placement`, and returns the result.
def _TestRandomGraphWithDevices(self,
sess,
seed,
op_placement,
devices,
debug_mode=False):
data = []
shape = (self._dim, self._dim)
feed_dict = {}
# Initialize the matrices
for i in range(len(devices)):
with ops.device(devices[i]):
var = array_ops.placeholder(dtypes.float32, shape=shape)
np.random.seed(seed + i)
feed_dict[var] = np.random.uniform(
low=0, high=0.1, size=shape).astype(np.float32)
data.append(var)
# Run the 'add' operations on those matrices
for op in op_placement:
with ops.device(devices[op[2]]):
data[op[2]] = math_ops.add(data[op[0]], data[op[1]])
with ops.device('/cpu:0'):
s = data[0]
for i in range(1, len(data)):
s = math_ops.add(s, data[i])
if debug_mode:
logging.info(ops.get_default_graph().as_graph_def())
result = sess.run(s, feed_dict=feed_dict)
self._LogMatrix(result, self._dim)
return result
# Generates a random graph with `self._num_ops` 'add' operations with each
# operation placed on different virtual device, test that the result is
# identical to the result obtained by running the same graph on cpu only.
def TestRandomGraph(self, sess, op_placement=None, random_seed=None):
debug_mode = False
if op_placement is None:
op_placement = self._GenerateOperationPlacement()
else:
debug_mode = True
if random_seed is None:
random_seed = random.randint(0, 1 << 31)
else:
debug_mode = True
logging.info('Virtual gpu functional test for random graph...')
logging.info('operation placement: %s', str(op_placement))
logging.info('random seed: %d', random_seed)
# Run with multiple virtual gpus.
result_vgd = self._TestRandomGraphWithDevices(
sess, random_seed, op_placement, self.devices, debug_mode=debug_mode)
# Run with single cpu.
result_cpu = self._TestRandomGraphWithDevices(
sess,
random_seed,
op_placement, ['/cpu:0'] * self._num_devices,
debug_mode=debug_mode)
# Test the result
for i in range(self._dim):
for j in range(self._dim):
if result_vgd[i][j] != result_cpu[i][j]:
logging.error(
'Result mismatch at row %d column %d: expected %f, actual %f', i,
j, result_cpu[i][j], result_vgd[i][j])
logging.error('Devices: %s', self.devices)
logging.error('Memory limits (in MB): %s', self._mem_limits_mb)
return False
return True
class VirtualGpuTest(test_util.TensorFlowTestCase):
def __init__(self, method_name):
super(VirtualGpuTest, self).__init__(method_name)
self._util = VirtualGpuTestUtil()
def testStatsContainAllDeviceNames(self):
with self.session(config=self._util.config) as sess:
# TODO(laigd): b/70811538. The is_gpu_available() call will invoke
# DeviceFactory::AddDevices() with a default SessionOption, which prevents
# adding virtual devices in the future, thus must be called within a
# context of a session within which virtual devices are created. Same in
# the following test case.
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
mat_shape = [10, 10]
data = []
for d in self._util.devices:
with ops.device(d):
var = variables.Variable(random_ops.random_uniform(mat_shape))
self.evaluate(var.initializer)
data.append(var)
s = data[0]
for i in range(1, len(data)):
s = math_ops.add(s, data[i])
sess.run(s, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:1' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:2' in devices)
def testLargeRandomGraph(self):
with self.session(config=self._util.config) as sess:
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
for _ in range(5):
if not self._util.TestRandomGraph(sess):
return
if __name__ == '__main__':
test.main()
| apache-2.0 |
cetic/ansible | lib/ansible/utils/module_docs_fragments/dellos10.py | 303 | 2451 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- User to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Password to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
ssh_keyfile:
description:
- Path to an ssh key used to authenticate the SSH session to the remote
device. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
timeout:
description:
- Specifies idle timeout (in seconds) for the connection. Useful if the
console freezes before continuing. For example when saving
configurations.
default: 10
"""
| gpl-3.0 |
kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Lib/site-packages/numpy/lib/twodim_base.py | 2 | 27339 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
import functools
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
@set_module('numpy')
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
return (x, y, bins, weights)
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> from matplotlib.image import NonUniformImage
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
@set_module('numpy')
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
@set_module('numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@set_module('numpy')
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
yuantw/DataCollecting | spider/SpiderQYer.py | 1 | 6919 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@author: Tengwei Yuan
1. Get all links of summary pages - from start search page result
2. From each summary page get all links of detail product pages
3. From each detail product page get basic information and the number of review pages
4. From each review page get review information
5. Save all extracted information into file
'''
import csv
import re
import string
import time
from urllib import urlopen
import requests
from bs4 import BeautifulSoup
from lxml import html
def clean_input(input):
input = re.sub('\n+', " ", input)
input = re.sub('\[[0-9]*\]', "", input)
input = re.sub(' +', " ", input)
input = bytes(input, "UTF-8")
input = input.decode("ascii", "ignore")
cleanInput = []
input = input.split(' ')
for item in input:
item = item.strip(string.punctuation)
if len(item) > 1 or (item.lower() == 'a' or item.lower() == 'i'):
cleanInput.append(item)
return cleanInput
def clean(item):
clean_item = ' '.join(' '.join(item).split()) # ' '.join(item).lstrip(' ').lstrip('\n').rstrip(' ').rstrip('\n')
return clean_item
### get all needed urls from current search page and also url to next page
def get_urls_re(start_url):
time.sleep(1)
html = urlopen(start_url)
base_url = 'http://place.qyer.com'
bsObj = BeautifulSoup(html, 'lxml')
# get needed urls from current page
for a in bsObj.findAll('a', href=True):
if re.findall('poi', a['href']):
print("Found the URL:", a['href'])
# get next page url
for link in bsObj.findAll("a", attrs={'class': 'ui_page_item ui_page_next'}):
if 'href' in link.attrs:
next_page_link = base_url + link.attrs['href']
print(next_page_link)
get_urls_re(next_page_link)
return
# 1
next_page_urls = set()
def get_next_page_urls(start_url):
cont = requests.get(start_url).content
tree = html.fromstring(cont)
base_url = 'http://place.qyer.com'
next_page_url_xpath = '//*[@title="下一页"]/@href'
next_page_url = tree.xpath(next_page_url_xpath)
if next_page_url:
next_page_url = base_url + ''.join(next_page_url)
print(next_page_url)
next_page_urls.add(next_page_url)
get_next_page_urls(next_page_url)
# 2
page_urls = set()
def get_url_xpath(start_url):
time.sleep(1)
page_url_xpath = '//*[@id="poiLists"]/li[*]/div/h3/a/@href'
cont = requests.get(start_url).content
tree = html.fromstring(cont)
for input in tree.xpath(page_url_xpath):
page_url = clean_input(input)[0]
page_urls.add(page_url)
print(page_url)
# 3 get all needed items in current page
items_list = []
def get_items(page_url):
page = requests.get(page_url).content
tree = html.fromstring(page)
title_xpath = '//*[@class="poiDet-largeTit"]/h1[@class="en"]/a/text()'
body_xpath = '//*[@class="poiDet-detail"]/text()'
rank_title_xpath = '//div[@class="infos"]/div[1]/ul/li[@class="rank"]/text()'
rank_info_xpath = '//div[@class="infos"]/div[1]/ul/li[@class="rank"]/span/text()'
tips_xpath = '//*[@class="poiDet-tips"]/li[*]/div[@class="content"]/p/text()'
review_stars_xpath = '//span[@class="summery"]//text()'
review_counts_xpath = '//span[@class="summery"]/b/text()'
title = tree.xpath(title_xpath)
body = tree.xpath(body_xpath)
rank_title = tree.xpath(rank_title_xpath)
rank_info = tree.xpath(rank_info_xpath)
tips = tree.xpath(tips_xpath)
review_stars = tree.xpath(review_stars_xpath)
review_counts = tree.xpath(review_counts_xpath)
print(review_stars)
print(review_counts)
title = clean(title)
body = clean(body)
rank_title = clean(rank_title)
rank_info = clean(rank_info)
tips = clean(tips)
row = [title, rank_title + ': ' + rank_info, body, tips]
print(row)
return row
# 4
reviews = []
def get_review_items_from_page_url(page_url, initial_page_flag,
page): # if the page has been retrieved, use it instead of requesting it again
if not initial_page_flag:
page = requests.get(page_url).content
xml_tree = html.fromstring(page)
print(xml_tree)
# review_star_xpath = '//div[@class="hiddenComment"]/div/span/text()'
review_text_xpath = '//div[@class="hiddenComment"]/div/text()|//div[@class="hiddenComment"]/div/span/text()'
# review_star = xml_tree.xpath(review_star_xpath)
review_text = xml_tree.xpath(review_text_xpath)
print(review_text)
reviews.append([review_text])
def get_review_items_from_page_source(
page_source): # if the page has been retrieved, use it instead of requesting it again
xml_tree = html.fromstring(page_source)
# save_html(page)
review_star_xpath = '//div[@class="hiddenComment"]/div/span/text()'
review_text_xpath = '//div[@class="hiddenComment"]/div/text()'
review_star = xml_tree.xpath(review_star_xpath)
review_text = xml_tree.xpath(review_text_xpath)
print([review_star, review_text])
reviews.append([review_star, review_text])
def get_review_by_post(review_url, review_params):
response = requests.post(review_url, data=review_params)
content = response.text
content = bytes(content, "UTF-8")
content = content.decode("unicode-escape")
print(content)
from selenium import webdriver
def get_review_items_selenium(page_url):
browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
try:
browser.get(page_url)
time.sleep(10)
print(browser.find_element_by_class_name("ui_page_next").text)
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(3)
browser.find_element_by_xpath('//div[@class="qui-popup-closeIcon"]').click()
time.sleep(3)
browser.find_element_by_class_name("ui_page_next").click()
time.sleep(10)
finally:
browser.close()
def save_csv(list_data):
with open('reviews_qyer.csv', "a") as csv_file:
writer = csv.writer(csv_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(list_data)
csv_file.close()
def save_bytes(page):
f = open('test.html', 'wb')
f.write(page)
f.close()
def main(start_url):
next_page_urls.add(start_url)
get_next_page_urls(start_url)
print('all next page urls')
print(next_page_urls)
for next_page_url in next_page_urls:
get_url_xpath(next_page_url)
page_urls.add(start_url)
for page_url in page_urls:
get_items(page_url)
print(reviews)
if __name__ == '__main__':
start_url = 'http://place.qyer.com/vancouver/food/?page=68'
start_url = 'http://place.qyer.com/poi/V2EJalFiBz9TZg/' ##温哥华渔人码头 30 reviews 3 review pages
get_items(start_url)
| mit |
thaim/ansible | lib/ansible/modules/cloud/docker/docker_network.py | 3 | 25612 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: docker_network
version_added: "2.2"
short_description: Manage Docker networks
description:
- Create/remove Docker networks and connect containers to them.
- Performs largely the same function as the "docker network" CLI subcommand.
options:
name:
description:
- Name of the network to operate on.
type: str
required: yes
aliases:
- network_name
connected:
description:
- List of container names or container IDs to connect to a network.
type: list
elements: str
aliases:
- containers
driver:
description:
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
type: str
default: bridge
driver_options:
description:
- Dictionary of network settings. Consult docker docs for valid options and values.
type: dict
force:
description:
- With state I(absent) forces disconnecting all containers from the
network prior to deleting the network. With state I(present) will
disconnect all containers, delete the network and re-create the
network. This option is required if you have changed the IPAM or
driver options and want an existing network to be updated to use the
new options.
type: bool
default: no
appends:
description:
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
Use C(appends) to leave existing containers connected.
type: bool
default: no
aliases:
- incremental
enable_ipv6:
description:
- Enable IPv6 networking.
type: bool
version_added: "2.8"
ipam_driver:
description:
- Specify an IPAM driver.
type: str
ipam_driver_options:
description:
- Dictionary of IPAM driver options.
type: dict
version_added: "2.8"
ipam_options:
description:
- Dictionary of IPAM options.
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(ipam_config) instead. In Docker 1.10.0, IPAM
options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses
the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options)
parameter.
type: dict
suboptions:
subnet:
description:
- IP subset in CIDR notation.
type: str
iprange:
description:
- IP address range in CIDR notation.
type: str
gateway:
description:
- IP gateway address.
type: str
aux_addresses:
description:
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
type: dict
ipam_config:
description:
- List of IPAM config blocks. Consult
L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
type: list
elements: dict
suboptions:
subnet:
description:
- IP subset in CIDR notation.
type: str
iprange:
description:
- IP address range in CIDR notation.
type: str
gateway:
description:
- IP gateway address.
type: str
aux_addresses:
description:
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
type: dict
version_added: "2.8"
state:
description:
- I(absent) deletes the network. If a network has connected containers, it
cannot be deleted. Use the C(force) option to disconnect all containers
and delete the network.
- I(present) creates the network, if it does not already exist with the
specified parameters, and connects the list of containers provided via
the connected parameter. Containers not on the list will be disconnected.
An empty list will leave no containers connected to the network. Use the
C(appends) option to leave existing containers connected. Use the C(force)
options to force re-creation of the network.
type: str
default: present
choices:
- absent
- present
internal:
description:
- Restrict external access to the network.
type: bool
version_added: "2.8"
labels:
description:
- Dictionary of labels.
type: dict
version_added: "2.8"
scope:
description:
- Specify the network's scope.
type: str
choices:
- local
- global
- swarm
version_added: "2.8"
attachable:
description:
- If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
type: bool
version_added: "2.8"
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
author:
- "Ben Keith (@keitwb)"
- "Chris Houseknecht (@chouseknecht)"
- "Dave Bendit (@DBendit)"
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "The docker server >= 1.10.0"
'''
EXAMPLES = '''
- name: Create a network
docker_network:
name: network_one
- name: Remove all but selected list of containers
docker_network:
name: network_one
connected:
- container_a
- container_b
- container_c
- name: Remove a single container
docker_network:
name: network_one
connected: "{{ fulllist|difference(['container_a']) }}"
- name: Add a container to a network, leaving existing containers connected
docker_network:
name: network_one
connected:
- container_a
appends: yes
- name: Create a network with driver options
docker_network:
name: network_two
driver_options:
com.docker.network.bridge.name: net2
- name: Create a network with custom IPAM config
docker_network:
name: network_three
ipam_config:
- subnet: 172.3.27.0/24
gateway: 172.3.27.2
iprange: 172.3.27.0/26
aux_addresses:
host1: 172.3.27.3
host2: 172.3.27.4
- name: Create a network with labels
docker_network:
name: network_four
labels:
key1: value1
key2: value2
- name: Create a network with IPv6 IPAM config
docker_network:
name: network_ipv6_one
enable_ipv6: yes
ipam_config:
- subnet: fdd1:ac8c:0557:7ce1::/64
- name: Create a network with IPv6 and custom IPv4 IPAM config
docker_network:
name: network_ipv6_two
enable_ipv6: yes
ipam_config:
- subnet: 172.4.27.0/24
- subnet: fdd1:ac8c:0557:7ce2::/64
- name: Delete a network, disconnecting all containers
docker_network:
name: network_one
state: absent
force: yes
'''
RETURN = '''
network:
description:
- Network inspection results for the affected network.
- Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
are also accessible directly as C(docker_network). Note that the returned fact will be removed in Ansible 2.12.
returned: success
type: dict
sample: {}
'''
import re
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
DockerBaseClass,
docker_version,
DifferenceTracker,
clean_dict_booleans_for_docker_api,
RequestException,
)
try:
from docker import utils
from docker.errors import DockerException
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
from docker.types import IPAMPool, IPAMConfig
except Exception:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.name = None
self.connected = None
self.driver = None
self.driver_options = None
self.ipam_driver = None
self.ipam_driver_options = None
self.ipam_options = None
self.ipam_config = None
self.appends = None
self.force = None
self.internal = None
self.labels = None
self.debug = None
self.enable_ipv6 = None
self.scope = None
self.attachable = None
for key, value in client.module.params.items():
setattr(self, key, value)
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
def get_ip_version(cidr):
"""Gets the IP version of a CIDR string
:param cidr: Valid CIDR
:type cidr: str
:return: ``ipv4`` or ``ipv6``
:rtype: str
:raises ValueError: If ``cidr`` is not a valid CIDR
"""
if CIDR_IPV4.match(cidr):
return 'ipv4'
elif CIDR_IPV6.match(cidr):
return 'ipv6'
raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
def normalize_ipam_config_key(key):
"""Normalizes IPAM config keys returned by Docker API to match Ansible keys
:param key: Docker API key
:type key: str
:return Ansible module key
:rtype str
"""
special_cases = {
'AuxiliaryAddresses': 'aux_addresses'
}
return special_cases.get(key, key.lower())
class DockerNetworkManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.diff_tracker = DifferenceTracker()
self.diff_result = dict()
self.existing_network = self.get_existing_network()
if not self.parameters.connected and self.existing_network:
self.parameters.connected = container_names_in_network(self.existing_network)
if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
self.parameters.ipam_config = [self.parameters.ipam_options]
if self.parameters.driver_options:
self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
if self.diff or self.check_mode or self.parameters.debug:
if self.diff:
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff_result
def get_existing_network(self):
return self.client.get_network(name=self.parameters.name)
def has_different_config(self, net):
'''
Evaluates an existing network and returns a tuple containing a boolean
indicating if the configuration is different and a list of differences.
:param net: the inspection output for an existing network
:return: (bool, list)
'''
differences = DifferenceTracker()
if self.parameters.driver and self.parameters.driver != net['Driver']:
differences.add('driver',
parameter=self.parameters.driver,
active=net['Driver'])
if self.parameters.driver_options:
if not net.get('Options'):
differences.add('driver_options',
parameter=self.parameters.driver_options,
active=net.get('Options'))
else:
for key, value in self.parameters.driver_options.items():
if not (key in net['Options']) or value != net['Options'][key]:
differences.add('driver_options.%s' % key,
parameter=value,
active=net['Options'].get(key))
if self.parameters.ipam_driver:
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
differences.add('ipam_driver',
parameter=self.parameters.ipam_driver,
active=net.get('IPAM'))
if self.parameters.ipam_driver_options is not None:
ipam_driver_options = net['IPAM'].get('Options') or {}
if ipam_driver_options != self.parameters.ipam_driver_options:
differences.add('ipam_driver_options',
parameter=self.parameters.ipam_driver_options,
active=ipam_driver_options)
if self.parameters.ipam_config is not None and self.parameters.ipam_config:
if not net.get('IPAM') or not net['IPAM']['Config']:
differences.add('ipam_config',
parameter=self.parameters.ipam_config,
active=net.get('IPAM', {}).get('Config'))
else:
for idx, ipam_config in enumerate(self.parameters.ipam_config):
net_config = dict()
try:
ip_version = get_ip_version(ipam_config['subnet'])
for net_ipam_config in net['IPAM']['Config']:
if ip_version == get_ip_version(net_ipam_config['Subnet']):
net_config = net_ipam_config
except ValueError as e:
self.client.fail(str(e))
for key, value in ipam_config.items():
if value is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
camelkey = None
for net_key in net_config:
if key == normalize_ipam_config_key(net_key):
camelkey = net_key
break
if not camelkey or net_config.get(camelkey) != value:
differences.add('ipam_config[%s].%s' % (idx, key),
parameter=value,
active=net_config.get(camelkey) if camelkey else None)
if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
differences.add('enable_ipv6',
parameter=self.parameters.enable_ipv6,
active=net.get('EnableIPv6', False))
if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
differences.add('internal',
parameter=self.parameters.internal,
active=net.get('Internal'))
if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
differences.add('scope',
parameter=self.parameters.scope,
active=net.get('Scope'))
if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
differences.add('attachable',
parameter=self.parameters.attachable,
active=net.get('Attachable'))
if self.parameters.labels:
if not net.get('Labels'):
differences.add('labels',
parameter=self.parameters.labels,
active=net.get('Labels'))
else:
for key, value in self.parameters.labels.items():
if not (key in net['Labels']) or value != net['Labels'][key]:
differences.add('labels.%s' % key,
parameter=value,
active=net['Labels'].get(key))
return not differences.empty, differences
def create_network(self):
if not self.existing_network:
params = dict(
driver=self.parameters.driver,
options=self.parameters.driver_options,
)
ipam_pools = []
if self.parameters.ipam_config:
for ipam_pool in self.parameters.ipam_config:
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
ipam_pools.append(IPAMPool(**ipam_pool))
else:
ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
# Only add ipam parameter if a driver was specified or if IPAM parameters
# were specified. Leaving this parameter away can significantly speed up
# creation; on my machine creation with this option needs ~15 seconds,
# and without just a few seconds.
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools,
options=self.parameters.ipam_driver_options)
else:
params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
if self.parameters.enable_ipv6 is not None:
params['enable_ipv6'] = self.parameters.enable_ipv6
if self.parameters.internal is not None:
params['internal'] = self.parameters.internal
if self.parameters.scope is not None:
params['scope'] = self.parameters.scope
if self.parameters.attachable is not None:
params['attachable'] = self.parameters.attachable
if self.parameters.labels:
params['labels'] = self.parameters.labels
if not self.check_mode:
resp = self.client.create_network(self.parameters.name, **params)
self.client.report_warnings(resp, ['Warning'])
self.existing_network = self.client.get_network(network_id=resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
self.results['changed'] = True
def remove_network(self):
if self.existing_network:
self.disconnect_all_containers()
if not self.check_mode:
self.client.remove_network(self.parameters.name)
self.results['actions'].append("Removed network %s" % (self.parameters.name,))
self.results['changed'] = True
def is_container_connected(self, container_name):
return container_name in container_names_in_network(self.existing_network)
def connect_containers(self):
for name in self.parameters.connected:
if not self.is_container_connected(name):
if not self.check_mode:
self.client.connect_container_to_network(name, self.parameters.name)
self.results['actions'].append("Connected container %s" % (name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(name),
parameter=True,
active=False)
def disconnect_missing(self):
if not self.existing_network:
return
containers = self.existing_network['Containers']
if not containers:
return
for c in containers.values():
name = c['Name']
if name not in self.parameters.connected:
self.disconnect_container(name)
def disconnect_all_containers(self):
containers = self.client.get_network(name=self.parameters.name)['Containers']
if not containers:
return
for cont in containers.values():
self.disconnect_container(cont['Name'])
def disconnect_container(self, container_name):
if not self.check_mode:
self.client.disconnect_container_from_network(container_name, self.parameters.name)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(container_name),
parameter=False,
active=True)
def present(self):
different = False
differences = DifferenceTracker()
if self.existing_network:
different, differences = self.has_different_config(self.existing_network)
self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
if self.parameters.force or different:
self.remove_network()
self.existing_network = None
self.create_network()
self.connect_containers()
if not self.parameters.appends:
self.disconnect_missing()
if self.diff or self.check_mode or self.parameters.debug:
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
self.diff_tracker.merge(differences)
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
network_facts = self.get_existing_network()
self.results['ansible_facts'] = {u'docker_network': network_facts}
self.results['network'] = network_facts
def absent(self):
self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
self.remove_network()
def main():
argument_spec = dict(
name=dict(type='str', required=True, aliases=['network_name']),
connected=dict(type='list', default=[], elements='str', aliases=['containers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str'),
ipam_driver_options=dict(type='dict'),
ipam_options=dict(type='dict', default={}, options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
), removed_in_version='2.12'),
ipam_config=dict(type='list', elements='dict', options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
enable_ipv6=dict(type='bool'),
internal=dict(type='bool'),
labels=dict(type='dict', default={}),
debug=dict(type='bool', default=False),
scope=dict(type='str', choices=['local', 'global', 'swarm']),
attachable=dict(type='bool'),
)
mutually_exclusive = [
('ipam_config', 'ipam_options')
]
option_minimal_versions = dict(
scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
labels=dict(docker_api_version='1.23'),
ipam_driver_options=dict(docker_py_version='2.0.0'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
min_docker_version='1.10.0',
min_docker_api_version='1.22',
# "The docker server >= 1.10.0"
option_minimal_versions=option_minimal_versions,
)
try:
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| mit |
sdaftuar/bitcoin | qa/rpc-tests/invalidtxrequest.py | 108 | 2576 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
'''
In this test we connect to one node over p2p, and test tx requests.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidTxRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
# b'\x64' is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000)
yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]])
# TODO: test further transactions...
if __name__ == '__main__':
InvalidTxRequestTest().main()
| mit |
SujaySKumar/django | tests/auth_tests/test_validators.py | 229 | 7546 | from __future__ import unicode_literals
import os
from django.contrib.auth.models import User
from django.contrib.auth.password_validation import (
CommonPasswordValidator, MinimumLengthValidator, NumericPasswordValidator,
UserAttributeSimilarityValidator, get_default_password_validators,
get_password_validators, password_changed,
password_validators_help_text_html, password_validators_help_texts,
validate_password,
)
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from django.utils._os import upath
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
class PasswordValidationTest(TestCase):
def test_get_default_password_validators(self):
validators = get_default_password_validators()
self.assertEqual(len(validators), 2)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(validators[1].__class__.__name__, 'MinimumLengthValidator')
self.assertEqual(validators[1].min_length, 12)
def test_get_password_validators_custom(self):
validator_config = [{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'}]
validators = get_password_validators(validator_config)
self.assertEqual(len(validators), 1)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(get_password_validators([]), [])
def test_validate_password(self):
self.assertIsNone(validate_password('sufficiently-long'))
msg_too_short = 'This password is too short. It must contain at least 12 characters.'
with self.assertRaises(ValidationError) as cm:
validate_password('django4242')
self.assertEqual(cm.exception.messages, [msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
validate_password('password')
self.assertEqual(cm.exception.messages, ['This password is too common.', msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
self.assertIsNone(validate_password('password', password_validators=[]))
def test_password_changed(self):
self.assertIsNone(password_changed('password'))
def test_password_validators_help_texts(self):
help_texts = password_validators_help_texts()
self.assertEqual(len(help_texts), 2)
self.assertIn('12 characters', help_texts[1])
self.assertEqual(password_validators_help_texts(password_validators=[]), [])
def test_password_validators_help_text_html(self):
help_text = password_validators_help_text_html()
self.assertEqual(help_text.count('<li>'), 2)
self.assertIn('12 characters', help_text)
class MinimumLengthValidatorTest(TestCase):
def test_validate(self):
expected_error = "This password is too short. It must contain at least %d characters."
self.assertIsNone(MinimumLengthValidator().validate('12345678'))
self.assertIsNone(MinimumLengthValidator(min_length=3).validate('123'))
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator().validate('1234567')
self.assertEqual(cm.exception.messages, [expected_error % 8])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator(min_length=3).validate('12')
self.assertEqual(cm.exception.messages, [expected_error % 3])
def test_help_text(self):
self.assertEqual(
MinimumLengthValidator().get_help_text(),
"Your password must contain at least 8 characters."
)
class UserAttributeSimilarityValidatorTest(TestCase):
def test_validate(self):
user = User.objects.create(
username='testclient', first_name='Test', last_name='Client', email='testclient@example.com',
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
)
expected_error = "The password is too similar to the %s."
self.assertIsNone(UserAttributeSimilarityValidator().validate('testclient'))
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('testclient', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "username"])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_similar')
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('example.com', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "email address"])
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=['first_name'],
max_similarity=0.3,
).validate('testclient', user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
self.assertIsNone(
UserAttributeSimilarityValidator(user_attributes=['first_name']).validate('testclient', user=user)
)
def test_help_text(self):
self.assertEqual(
UserAttributeSimilarityValidator().get_help_text(),
"Your password can't be too similar to your other personal information."
)
class CommonPasswordValidatorTest(TestCase):
def test_validate(self):
expected_error = "This password is too common."
self.assertIsNone(CommonPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
CommonPasswordValidator().validate('godzilla')
self.assertEqual(cm.exception.messages, [expected_error])
def test_validate_custom_list(self):
path = os.path.join(os.path.dirname(os.path.realpath(upath(__file__))), 'common-passwords-custom.txt')
validator = CommonPasswordValidator(password_list_path=path)
expected_error = "This password is too common."
self.assertIsNone(validator.validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
validator.validate('from-my-custom-list')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
def test_help_text(self):
self.assertEqual(
CommonPasswordValidator().get_help_text(),
"Your password can't be a commonly used password."
)
class NumericPasswordValidatorTest(TestCase):
def test_validate(self):
expected_error = "This password is entirely numeric."
self.assertIsNone(NumericPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
NumericPasswordValidator().validate('42424242')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_entirely_numeric')
def test_help_text(self):
self.assertEqual(
NumericPasswordValidator().get_help_text(),
"Your password can't be entirely numeric."
)
| bsd-3-clause |
cfenoy/easybuild-easyblocks | easybuild/easyblocks/j/java.py | 12 | 1652 | ##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBlock for installing Java, implemented as an easyblock
@author: Jens Timmerman (Ghent University)
"""
from easybuild.easyblocks.generic.packedbinary import PackedBinary
class EB_Java(PackedBinary):
"""Support for installing Java as a packed binary file (.tar.gz)
Use the PackedBinary easyblock and set some extra paths.
"""
def make_module_extra(self):
"""
Set JAVA_HOME to install dir
"""
txt = PackedBinary.make_module_extra(self)
txt += self.module_generator.set_environment('JAVA_HOME', self.installdir)
return txt
| gpl-2.0 |
defionscode/ansible | lib/ansible/modules/storage/infinidat/infini_host.py | 70 | 3806 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_host
version_added: 2.3
short_description: Create, Delete and Modify Hosts on Infinibox
description:
- This module creates, deletes or modifies hosts on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Host Name
required: true
state:
description:
- Creates/Modifies Host when present or removes when absent
required: false
default: present
choices: [ "present", "absent" ]
wwns:
description:
- List of wwns of the host
required: false
volume:
description:
- Volume name to map to the host
required: false
extends_documentation_fragment:
- infinibox
'''
EXAMPLES = '''
- name: Create new new host
infini_host:
name: foo.example.com
user: admin
password: secret
system: ibox001
- name: Make sure host bar is available with wwn ports
infini_host:
name: bar.example.com
wwns:
- "00:00:00:00:00:00:00"
- "11:11:11:11:11:11:11"
system: ibox01
user: admin
password: secret
- name: Map host foo.example.com to volume bar
infini_host:
name: foo.example.com
volume: bar
system: ibox01
user: admin
password: secret
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_host(module, system):
host = None
for h in system.hosts.to_list():
if h.get_name() == module.params['name']:
host = h
break
return host
@api_wrapper
def create_host(module, system):
changed = True
if not module.check_mode:
host = system.hosts.create(name=module.params['name'])
if module.params['wwns']:
for p in module.params['wwns']:
host.add_fc_port(p)
if module.params['volume']:
host.map_volume(system.volumes.get(name=module.params['volume']))
module.exit_json(changed=changed)
@api_wrapper
def update_host(module, host):
changed = False
module.exit_json(changed=changed)
@api_wrapper
def delete_host(module, host):
changed = True
if not module.check_mode:
host.delete()
module.exit_json(changed=changed)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
wwns=dict(type='list'),
volume=dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
state = module.params['state']
system = get_system(module)
host = get_host(module, system)
if module.params['volume']:
try:
system.volumes.get(name=module.params['volume'])
except:
module.fail_json(msg='Volume {} not found'.format(module.params['volume']))
if host and state == 'present':
update_host(module, host)
elif host and state == 'absent':
delete_host(module, host)
elif host is None and state == 'absent':
module.exit_json(changed=False)
else:
create_host(module, system)
if __name__ == '__main__':
main()
| gpl-3.0 |
dynm/miasm | miasm2/analysis/disasm_cb.py | 7 | 3953 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from miasm2.expression.expression import ExprInt, ExprId, ExprMem, MatchExpr
from miasm2.expression.simplifications import expr_simp
from miasm2.core.asmbloc \
import asm_symbol_pool, asm_constraint_next, asm_constraint_to
from miasm2.core.utils import upck32
# from miasm2.core.graph import DiGraph
def get_ira(mnemo, attrib):
arch = mnemo.name, attrib
if arch == ("arm", "arm"):
from miasm2.arch.arm.ira import ir_a_arm_base as ira
elif arch == ("x86", 32):
from miasm2.arch.x86.ira import ir_a_x86_32 as ira
elif arch == ("x86", 64):
from miasm2.arch.x86.ira import ir_a_x86_64 as ira
else:
raise ValueError('unknown architecture: %s' % mnemo.name)
return ira
def arm_guess_subcall(
mnemo, attrib, pool_bin, cur_bloc, offsets_to_dis, symbol_pool):
ira = get_ira(mnemo, attrib)
sp = asm_symbol_pool()
ir_arch = ira(sp)
print '###'
print cur_bloc
ir_arch.add_bloc(cur_bloc)
ir_blocs = ir_arch.blocs.values()
# flow_graph = DiGraph()
to_add = set()
for irb in ir_blocs:
# print 'X'*40
# print irb
pc_val = None
lr_val = None
for exprs in irb.irs:
for e in exprs:
if e.dst == ir_arch.pc:
pc_val = e.src
if e.dst == mnemo.regs.LR:
lr_val = e.src
if pc_val is None or lr_val is None:
continue
if not isinstance(lr_val, ExprInt):
continue
l = cur_bloc.lines[-1]
if lr_val.arg != l.offset + l.l:
continue
# print 'IS CALL!'
l = symbol_pool.getby_offset_create(int(lr_val.arg))
c = asm_constraint_next(l)
to_add.add(c)
offsets_to_dis.add(int(lr_val.arg))
# if to_add:
# print 'R'*70
for c in to_add:
# print c
cur_bloc.addto(c)
def arm_guess_jump_table(
mnemo, attrib, pool_bin, cur_bloc, offsets_to_dis, symbol_pool):
ira = get_ira(mnemo, attrib)
jra = ExprId('jra')
jrb = ExprId('jrb')
sp = asm_symbol_pool()
ir_arch = ira(sp)
ir_arch.add_bloc(cur_bloc)
ir_blocs = ir_arch.blocs.values()
for irb in ir_blocs:
# print 'X'*40
# print irb
pc_val = None
# lr_val = None
for exprs in irb.irs:
for e in exprs:
if e.dst == ir_arch.pc:
pc_val = e.src
# if e.dst == mnemo.regs.LR:
# lr_val = e.src
if pc_val is None:
continue
if not isinstance(pc_val, ExprMem):
continue
assert(pc_val.size == 32)
print pc_val
ad = pc_val.arg
ad = expr_simp(ad)
print ad
res = MatchExpr(ad, jra + jrb, set([jra, jrb]))
if res is False:
raise NotImplementedError('not fully functional')
print res
if not isinstance(res[jrb], ExprInt):
raise NotImplementedError('not fully functional')
base_ad = int(res[jrb].arg)
print base_ad
addrs = set()
i = -1
max_table_entry = 10000
max_diff_addr = 0x100000 # heuristic
while i < max_table_entry:
i += 1
try:
ad = upck32(pool_bin.getbytes(base_ad + 4 * i, 4))
except:
break
if abs(ad - base_ad) > max_diff_addr:
break
addrs.add(ad)
print [hex(x) for x in addrs]
for ad in addrs:
offsets_to_dis.add(ad)
l = symbol_pool.getby_offset_create(ad)
c = asm_constraint_to(l)
cur_bloc.addto(c)
guess_funcs = []
def guess_multi_cb(
mnemo, attrib, pool_bin, cur_bloc, offsets_to_dis, symbol_pool):
for f in guess_funcs:
f(mnemo, attrib, pool_bin, cur_bloc, offsets_to_dis, symbol_pool)
| gpl-2.0 |
CTSRD-CHERI/u-boot | doc/sphinx/cdomain.py | 296 | 5596 | # -*- coding: utf-8; mode: python -*-
# pylint: disable=W0141,C0113,C0103,C0325
u"""
cdomain
~~~~~~~
Replacement for the sphinx c-domain.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
List of customizations:
* Moved the *duplicate C object description* warnings for function
declarations in the nitpicky mode. See Sphinx documentation for
the config values for ``nitpick`` and ``nitpick_ignore``.
* Add option 'name' to the "c:function:" directive. With option 'name' the
ref-name of a function can be modified. E.g.::
.. c:function:: int ioctl( int fd, int request )
:name: VIDIOC_LOG_STATUS
The func-name (e.g. ioctl) remains in the output but the ref-name changed
from 'ioctl' to 'VIDIOC_LOG_STATUS'. The function is referenced by::
* :c:func:`VIDIOC_LOG_STATUS` or
* :any:`VIDIOC_LOG_STATUS` (``:any:`` needs sphinx 1.3)
* Handle signatures of function-like macros well. Don't try to deduce
arguments types of function-like macros.
"""
from docutils import nodes
from docutils.parsers.rst import directives
import sphinx
from sphinx import addnodes
from sphinx.domains.c import c_funcptr_sig_re, c_sig_re
from sphinx.domains.c import CObject as Base_CObject
from sphinx.domains.c import CDomain as Base_CDomain
__version__ = '1.0'
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
def setup(app):
app.override_domain(CDomain)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
class CObject(Base_CObject):
"""
Description of a C language object.
"""
option_spec = {
"name" : directives.unchanged
}
def handle_func_like_macro(self, sig, signode):
u"""Handles signatures of function-like macros.
If the objtype is 'function' and the the signature ``sig`` is a
function-like macro, the name of the macro is returned. Otherwise
``False`` is returned. """
if not self.objtype == 'function':
return False
m = c_funcptr_sig_re.match(sig)
if m is None:
m = c_sig_re.match(sig)
if m is None:
raise ValueError('no match')
rettype, fullname, arglist, _const = m.groups()
arglist = arglist.strip()
if rettype or not arglist:
return False
arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
arglist = [a.strip() for a in arglist.split(",")]
# has the first argument a type?
if len(arglist[0].split(" ")) > 1:
return False
# This is a function-like macro, it's arguments are typeless!
signode += addnodes.desc_name(fullname, fullname)
paramlist = addnodes.desc_parameterlist()
signode += paramlist
for argname in arglist:
param = addnodes.desc_parameter('', '', noemph=True)
# separate by non-breaking space in the output
param += nodes.emphasis(argname, argname)
paramlist += param
return fullname
def handle_signature(self, sig, signode):
"""Transform a C signature into RST nodes."""
fullname = self.handle_func_like_macro(sig, signode)
if not fullname:
fullname = super(CObject, self).handle_signature(sig, signode)
if "name" in self.options:
if self.objtype == 'function':
fullname = self.options["name"]
else:
# FIXME: handle :name: value of other declaration types?
pass
return fullname
def add_target_and_index(self, name, sig, signode):
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata['c']['objects']
if (name in inv and self.env.config.nitpicky):
if self.objtype == 'function':
if ('c:func', name) not in self.env.config.nitpick_ignore:
self.state_machine.reporter.warning(
'duplicate C object description of %s, ' % name +
'other instance in ' + self.env.doc2path(inv[name][0]),
line=self.lineno)
inv[name] = (self.env.docname, self.objtype)
indextext = self.get_index_text(name)
if indextext:
if major == 1 and minor < 4:
# indexnode's tuple changed in 1.4
# https://github.com/sphinx-doc/sphinx/commit/e6a5a3a92e938fcd75866b4227db9e0524d58f7c
self.indexnode['entries'].append(
('single', indextext, targetname, ''))
else:
self.indexnode['entries'].append(
('single', indextext, targetname, '', None))
class CDomain(Base_CDomain):
"""C language domain."""
name = 'c'
label = 'C'
directives = {
'function': CObject,
'member': CObject,
'macro': CObject,
'type': CObject,
'var': CObject,
}
| gpl-2.0 |
jvassev/dd-agent | tests/core/test_flare.py | 24 | 4408 | # stdlib
import os.path
import unittest
# 3p
import mock
from nose.plugins.attrib import attr
# project
from utils.flare import Flare
def get_mocked_config():
return {
'api_key': 'APIKEY',
'dd_url': 'https://app.datadoghq.com',
}
def get_mocked_version():
return '6.6.6'
def get_mocked_temp():
return os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'fixtures',
'flare'
)
def mocked_strftime(t):
return '1'
class FakeResponse(object):
def __init__(self, status_code=200):
self.status_code = status_code
self.text = '{"case_id":1337}'
def json(self):
return {'case_id': 1337}
def raise_for_status(self):
return None
class FlareTest(unittest.TestCase):
@mock.patch('utils.flare.strftime', side_effect=mocked_strftime)
@mock.patch('tempfile.gettempdir', side_effect=get_mocked_temp)
@mock.patch('config.get_version', side_effect=get_mocked_version)
@mock.patch('utils.flare.get_config', side_effect=get_mocked_config)
def test_init(self, mock_config, mock_version, mock_tempdir, mock_strftime):
f = Flare(case_id=1337)
conf = mock_config()
self.assertEqual(f._case_id, 1337)
self.assertEqual(f._api_key, conf['api_key'])
self.assertEqual(f._url, 'https://6-6-6-flare.agent.datadoghq.com/support/flare')
self.assertEqual(f._tar_path, os.path.join(get_mocked_temp(), "datadog-agent-1.tar.bz2"))
@mock.patch('utils.flare.requests.post', return_value=FakeResponse())
@mock.patch('config.get_version', side_effect=get_mocked_version)
@mock.patch('utils.flare.strftime', side_effect=mocked_strftime)
@mock.patch('tempfile.gettempdir', side_effect=get_mocked_temp)
@mock.patch('utils.flare.get_config', side_effect=get_mocked_config)
def test_upload_with_case(self, mock_config, mock_tempdir, mock_stfrtime, mock_version, mock_requests):
f = Flare(case_id=1337)
f._ask_for_email = lambda: 'test@example.com'
assert not mock_requests.called
f.upload()
assert mock_requests.called
args, kwargs = mock_requests.call_args_list[0]
self.assertEqual(
args,
('https://6-6-6-flare.agent.datadoghq.com/support/flare/1337?api_key=APIKEY',)
)
self.assertEqual(
kwargs['files']['flare_file'].name,
os.path.join(get_mocked_temp(), "datadog-agent-1.tar.bz2")
)
self.assertEqual(kwargs['data']['case_id'], 1337)
self.assertEqual(kwargs['data']['email'], 'test@example.com')
assert kwargs['data']['hostname']
@mock.patch('utils.flare.requests.post', return_value=FakeResponse())
@mock.patch('config.get_version', side_effect=get_mocked_version)
@mock.patch('utils.flare.strftime', side_effect=mocked_strftime)
@mock.patch('tempfile.gettempdir', side_effect=get_mocked_temp)
@mock.patch('utils.flare.get_config', side_effect=get_mocked_config)
def test_upload_no_case(self, mock_config, mock_tempdir, mock_stfrtime, mock_version, mock_requests):
f = Flare()
f._ask_for_email = lambda: 'test@example.com'
assert not mock_requests.called
f.upload()
assert mock_requests.called
args, kwargs = mock_requests.call_args_list[0]
self.assertEqual(
args,
('https://6-6-6-flare.agent.datadoghq.com/support/flare?api_key=APIKEY',)
)
self.assertEqual(
kwargs['files']['flare_file'].name,
os.path.join(get_mocked_temp(), "datadog-agent-1.tar.bz2")
)
self.assertEqual(kwargs['data']['case_id'], None)
self.assertEqual(kwargs['data']['email'], 'test@example.com')
assert kwargs['data']['hostname']
@attr(requires='core_integration')
@mock.patch('utils.flare.strftime', side_effect=mocked_strftime)
@mock.patch('tempfile.gettempdir', side_effect=get_mocked_temp)
@mock.patch('utils.flare.get_config', side_effect=get_mocked_config)
def test_endpoint(self, mock_config, mock_temp, mock_stfrtime):
f = Flare()
f._ask_for_email = lambda: None
try:
f.upload()
raise Exception('Should fail before')
except Exception, e:
self.assertEqual(str(e), "Your request is incorrect: Invalid inputs: 'API key unknown'")
| bsd-3-clause |
benhoff/simpleyapsy | pluginmanager/file_filters/with_info_file.py | 2 | 3952 |
import os
from pluginmanager import util
from pluginmanager.compat import ConfigParser, FILE_ERROR
from . import PLUGIN_FORBIDDEN_NAME
class WithInfoFileFilter(object):
"""
Only gets files that have configuration files ending with specific
extensions, nominally 'yapsy-plugin'
"""
def __init__(self, extensions='yapsy-plugin'):
self.extensions = util.return_list(extensions)
def set_file_extensions(self, extensions):
extensions = util.return_list(extensions)
self.extensions = extensions
def add_file_extensions(self, extensions):
extensions = util.return_list(extensions)
self.extensions.extend(extensions)
def __call__(self, filepaths):
return self.get_plugin_filepaths(filepaths)
def get_info_and_filepaths(self, filepaths):
plugin_information = self.get_plugin_infos(filepaths)
plugin_filepaths = self.get_plugin_filepaths(filepaths,
plugin_information)
return plugin_information, plugin_filepaths
def get_plugin_filepaths(self, filepaths, plugin_infos=None):
# Enforce uniqueness of filepaths in `PluginLocator`
plugin_filepaths = set()
# if we've already got the infos list, get plugin filepath from those
if plugin_infos is None:
plugin_infos = self.get_plugin_infos(filepaths)
for plugin_info in plugin_infos:
path = plugin_info['path']
plugin_filepaths.add(path)
return plugin_filepaths
def plugin_valid(self, filepath):
"""
checks to see if plugin ends with one of the
approved extensions
"""
plugin_valid = False
for extension in self.extensions:
if filepath.endswith(".{}".format(extension)):
plugin_valid = True
break
return plugin_valid
def get_plugin_infos(self, filepaths):
plugin_infos = []
config_parser = ConfigParser()
config_filepaths = config_parser.read(filepaths)
for config_filepath in config_filepaths:
if self.plugin_valid(config_filepath):
dir_path, _ = os.path.split(config_filepath)
config_dict = {}
with open(config_filepath) as f:
config_parser.read_file(f)
config_dict = self._parse_config_details(config_parser,
dir_path)
if self._valid_config(config_dict):
plugin_infos.append(config_dict)
return plugin_infos
def _parse_config_details(self, config_parser, dir_path):
# get all data out of config_parser
config_dict = {s: {k: v for k, v in config_parser.items(s)} for s in config_parser.sections()} # noqa
# now remove and parse data stored in "Core" key
core_config = config_dict.pop("Core")
# change and store the relative path in Module to absolute
relative_path = core_config.pop('module')
path = os.path.join(dir_path, relative_path)
if os.path.isfile(path + '.py'):
path += '.py'
elif (os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py'))):
path = os.path.join(path, '__init__.py')
else:
raise FILE_ERROR('{} not a `.py` file or a dir'.format(path))
config_dict['path'] = path
# grab and store the name, strip whitespace
config_dict['name'] = core_config["name"].strip()
return config_dict
def _valid_config(self, config):
valid_config = False
if "name" in config and "path" in config:
valid_config = True
name = config['name']
name = name.strip()
if PLUGIN_FORBIDDEN_NAME in name:
valid_config = False
return valid_config
| gpl-3.0 |
jumpojoy/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py | 17 | 4026 | # Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_log import log as logging
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
# Field name mappings (from Ryu to ovs-ofctl)
_keywords = {
'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'table_id': 'table',
}
class OpenFlowSwitchMixin(object):
"""Mixin to provide common convenient routines for an openflow switch."""
@staticmethod
def _conv_args(kwargs):
for our_name, ovs_ofctl_name in _keywords.items():
if our_name in kwargs:
kwargs[ovs_ofctl_name] = kwargs.pop(our_name)
return kwargs
def dump_flows(self, table_id):
return self.dump_flows_for_table(table_id)
def dump_flows_all_tables(self):
return self.dump_all_flows()
def install_goto_next(self, table_id):
self.install_goto(table_id=table_id, dest_table_id=table_id + 1)
def install_output(self, port, table_id=0, priority=0, **kwargs):
self.add_flow(table=table_id,
priority=priority,
actions="output:%s" % port,
**self._conv_args(kwargs))
def install_normal(self, table_id=0, priority=0, **kwargs):
self.add_flow(table=table_id,
priority=priority,
actions="normal",
**self._conv_args(kwargs))
def install_goto(self, dest_table_id, table_id=0, priority=0, **kwargs):
self.add_flow(table=table_id,
priority=priority,
actions="resubmit(,%s)" % dest_table_id,
**self._conv_args(kwargs))
def install_drop(self, table_id=0, priority=0, **kwargs):
self.add_flow(table=table_id,
priority=priority,
actions="drop",
**self._conv_args(kwargs))
def delete_flows(self, **kwargs):
# NOTE(yamamoto): super() points to ovs_lib.OVSBridge.
# See ovs_bridge.py how this class is actually used.
if kwargs:
super(OpenFlowSwitchMixin, self).delete_flows(
**self._conv_args(kwargs))
else:
super(OpenFlowSwitchMixin, self).remove_all_flows()
def _filter_flows(self, flows):
LOG.debug("Agent uuid stamp used to filter flows: %s",
self.agent_uuid_stamp)
cookie_re = re.compile('cookie=(0x[A-Fa-f0-9]*)')
table_re = re.compile('table=([0-9]*)')
for flow in flows:
fl_cookie = cookie_re.search(flow)
if not fl_cookie:
continue
fl_cookie = fl_cookie.group(1)
if int(fl_cookie, 16) != self.agent_uuid_stamp:
fl_table = table_re.search(flow)
if not fl_table:
continue
fl_table = fl_table.group(1)
yield flow, fl_cookie, fl_table
def cleanup_flows(self):
flows = self.dump_flows_all_tables()
for flow, cookie, table in self._filter_flows(flows):
# deleting a stale flow should be rare.
# it might deserve some attention
LOG.warning(_LW("Deleting flow %s"), flow)
self.delete_flows(cookie=cookie + '/-1', table=table)
| apache-2.0 |
gonboy/sl4a | python/src/PC/VS8.0/build_ssl.py | 32 | 9154 | # Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PCBuild directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
fh = os.popen(perl + ' -e "use Win32;"')
fh.read()
rc = fh.close()
if rc:
continue
return perl
print("Can not find a suitable PERL:")
if perls:
print(" the following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print(" NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
return None
# Locate the best SSL directory given a few roots to look into.
def find_best_ssl_dir(sources):
candidates = []
for s in sources:
try:
# note: do not abspath s; the build will fail if any
# higher up directory name has spaces in it.
fnames = os.listdir(s)
except os.error:
fnames = []
for fname in fnames:
fqn = os.path.join(s, fname)
if os.path.isdir(fqn) and fname.startswith("openssl-"):
candidates.append(fqn)
# Now we have all the candidates, locate the best.
best_parts = []
best_name = None
for c in candidates:
parts = re.split("[.-]", os.path.basename(c))[1:]
# eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers
if len(parts) >= 4:
continue
if parts > best_parts:
best_parts = parts
best_name = c
if best_name is not None:
print("Found an SSL directory at '%s'" % (best_name,))
else:
print("Could not find an SSL directory in '%s'" % (sources,))
sys.stdout.flush()
return best_name
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure)
os.system("perl Configure "+configure)
print(do_script)
os.system(do_script)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl is None:
print("No Perl installation was found. Existing Makefiles are used.")
print("Found a working perl at '%s'" % (perl,))
sys.stdout.flush()
# Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
ssl_dir = find_best_ssl_dir(("..\\..\\..",))
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
shutil.copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
shutil.copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# Now run make.
if arch == "amd64":
rc = os.system(r"ml64 -c -Foms\uptable.obj ms\uptable.asm")
if rc:
print("ml64 assembler has failed.")
sys.exit(rc)
shutil.copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
shutil.copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
#makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
makeCommand = "nmake /nologo -f \"%s\"" % makefile
print("Executing ssl makefiles:", makeCommand)
sys.stdout.flush()
rc = os.system(makeCommand)
if rc:
print("Executing "+makefile+" failed")
print(rc)
sys.exit(rc)
finally:
os.chdir(old_cd)
sys.exit(rc)
if __name__=='__main__':
main()
| apache-2.0 |
cybojenix/SlimBot | disabled_stuff/rdio.py | 6 | 4696 | import urllib
import json
import re
import oauth2 as oauth
from util import hook
def getdata(inp, types, api_key, api_secret):
consumer = oauth.Consumer(api_key, api_secret)
client = oauth.Client(consumer)
response = client.request('http://api.rdio.com/1/', 'POST',
urllib.urlencode({'method': 'search', 'query': inp, 'types': types, 'count': '1'}))
data = json.loads(response[1])
return data
@hook.command
def rdio(inp, bot=None):
""" rdio <search term> - alternatives: .rdiot (track), .rdioar (artist), .rdioal (album) """
api_key = bot.config.get("api_keys", {}).get("rdio_key")
api_secret = bot.config.get("api_keys", {}).get("rdio_secret")
if not api_key:
return "error: no api key set"
data = getdata(inp, "Track,Album,Artist", api_key, api_secret)
try:
info = data['result']['results'][0]
except IndexError:
return "No results."
if 'name' in info:
if 'artist' in info and 'album' in info: # Track
name = info['name']
artist = info['artist']
album = info['album']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {} {}".format(name, artist, album, url)
elif 'artist' in info and not 'album' in info: # Album
name = info['name']
artist = info['artist']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
else: # Artist
name = info['name']
url = info['shortUrl']
return u"\x02{}\x02 - {}".format(name, url)
@hook.command
def rdiot(inp, bot=None):
""" rdiot <search term> - Search for tracks on rdio """
api_key = bot.config.get("api_keys", {}).get("rdio_key")
api_secret = bot.config.get("api_keys", {}).get("rdio_secret")
if not api_key:
return "error: no api key set"
data = getdata(inp, "Track", api_key, api_secret)
try:
info = data['result']['results'][0]
except IndexError:
return "No results."
name = info['name']
artist = info['artist']
album = info['album']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {} - {}".format(name, artist, album, url)
@hook.command
def rdioar(inp, bot=None):
""" rdioar <search term> - Search for artists on rdio """
api_key = bot.config.get("api_keys", {}).get("rdio_key")
api_secret = bot.config.get("api_keys", {}).get("rdio_secret")
if not api_key:
return "error: no api key set"
data = getdata(inp, "Artist", api_key, api_secret)
try:
info = data['result']['results'][0]
except IndexError:
return "No results."
name = info['name']
url = info['shortUrl']
return u"\x02{}\x02 - {}".format(name, url)
@hook.command
def rdioal(inp, bot=None):
""" rdioal <search term> - Search for albums on rdio """
api_key = bot.config.get("api_keys", {}).get("rdio_key")
api_secret = bot.config.get("api_keys", {}).get("rdio_secret")
if not api_key:
return "error: no api key set"
data = getdata(inp, "Album", api_key, api_secret)
try:
info = data['result']['results'][0]
except IndexError:
return "No results."
name = info['name']
artist = info['artist']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
rdio_re = (r'(.*:)//(rd.io|www.rdio.com|rdio.com)(:[0-9]+)?(.*)', re.I)
@hook.regex(*rdio_re)
def rdio_url(match, bot=None):
api_key = bot.config.get("api_keys", {}).get("rdio_key")
api_secret = bot.config.get("api_keys", {}).get("rdio_secret")
if not api_key:
return None
url = match.group(1) + "//" + match.group(2) + match.group(4)
consumer = oauth.Consumer(api_key, api_secret)
client = oauth.Client(consumer)
response = client.request('http://api.rdio.com/1/', 'POST',
urllib.urlencode({'method': 'getObjectFromUrl', 'url': url}))
data = json.loads(response[1])
info = data['result']
if 'name' in info:
if 'artist' in info and 'album' in info: # Track
name = info['name']
artist = info['artist']
album = info['album']
return u"Rdio track: \x02{}\x02 by \x02{}\x02 - {}".format(name, artist, album)
elif 'artist' in info and not 'album' in info: # Album
name = info['name']
artist = info['artist']
return u"Rdio album: \x02{}\x02 by \x02{}\x02".format(name, artist)
else: # Artist
name = info['name']
return u"Rdio artist: \x02{}\x02".format(name)
| gpl-3.0 |
fly19890211/edx-platform | lms/djangoapps/instructor_task/tasks_helper.py | 26 | 65045 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
import re
from collections import OrderedDict
from datetime import datetime
from django.conf import settings
from eventtracking import tracker
from itertools import chain
from time import time
import unicodecsv
import logging
from celery import Task, current_task
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.files.storage import DefaultStorage
from django.db import transaction, reset_queries
from django.db.models import Q
import dogstats_wrapper as dog_stats_api
from pytz import UTC
from StringIO import StringIO
from edxmako.shortcuts import render_to_string
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
from shoppingcart.models import (
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
)
from track.views import task_track
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
from xblock.runtime import KvsFieldData
from xmodule.modulestore.django import modulestore
from xmodule.split_test_module import get_split_user_partitions
from django.utils.translation import ugettext as _
from certificates.models import (
CertificateWhitelist,
certificate_info_for_user,
CertificateStatuses
)
from certificates.api import generate_user_certificates
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import (
enrolled_students_features,
get_proctored_exam_results,
list_may_enroll,
list_problem_responses
)
from instructor_analytics.csvs import format_dictlist
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
from student.models import CourseEnrollment, CourseAccessRole
from verify_student.models import SoftwareSecurePhotoVerification
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
class TaskProgress(object):
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
'action_name', and 'duration_ms' values.
"""
def __init__(self, action_name, total, start_time):
self.action_name = action_name
self.total = total
self.start_time = start_time
self.attempted = 0
self.succeeded = 0
self.skipped = 0
self.failed = 0
def update_task_state(self, extra_meta=None):
"""
Update the current celery task's state to the progress state
specified by the current object. Returns the progress
dictionary for use by `run_main_task` and
`BaseInstructorTask.on_success`.
Arguments:
extra_meta (dict): Extra metadata to pass to `update_state`
Returns:
dict: The current task's progress dict
"""
progress_dict = {
'action_name': self.action_name,
'attempted': self.attempted,
'succeeded': self.succeeded,
'skipped': self.skipped,
'failed': self.failed,
'total': self.total,
'duration_ms': int((time() - self.start_time) * 1000),
}
if extra_meta is not None:
progress_dict.update(extra_meta)
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
return progress_dict
def run_main_task(entry_id, task_fcn, action_name):
"""
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
Arguments passed to `task_fcn` are:
`entry_id` : the primary key for the InstructorTask entry representing the task.
`course_id` : the id for the course.
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
`action_name` : past-tense verb to use for constructing status messages.
If no exceptions are raised, the `task_fcn` should return a dict containing
the task's result with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages.
Should be past-tense. Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
"""
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
entry.task_state = PROGRESS
entry.save_now()
# Get inputs to use in this task from the entry
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in question
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
"""Gets prefix to use when constructing xqueue_callback_url."""
return xmodule_instance_args.get('xqueue_callback_url_prefix', '') if xmodule_instance_args is not None else ''
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
"""
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
"""
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None, course=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
student_data = KvsFieldData(DjangoKeyValueStore(field_data_cache))
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(
user=student,
descriptor=module_descriptor,
student_data=student_data,
course_id=course_id,
track_function=make_track_function(),
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type,
# This module isn't being used for front-end rendering
request_token=None,
# pass in a loaded course for override enabling
course=course
)
@transaction.autocommit
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: unexpected response %(msg)s",
dict(
msg=result,
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
else:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@transaction.autocommit
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Resets problem attempts to zero for specified `student_module`.
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
"""
update_status = UPDATE_STATUS_SKIPPED
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
track_function('problem_reset_attempts', event_info)
update_status = UPDATE_STATUS_SUCCEEDED
return update_status
@transaction.autocommit
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Delete the StudentModule entry.
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
track_function('problem_delete_state', {})
return UPDATE_STATUS_SUCCEEDED
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
report_store.store_rows(
course_id,
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
),
rows
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
config={
'content_type': 'text/html',
'content_encoding': None,
}
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a grades CSV file for all students that
are enrolled, and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
As we start to add more CSV downloads, it will probably be worthwhile to
make a more general CSVDoc class instead of building out the rows like we
do here.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
# Loop over all our students and build our CSV lists in memory
header = None
rows = []
err_rows = [["id", "username", "error_msg"]]
current_step = {'step': 'Calculating Grades'}
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after each student is graded to get a sense
# of the task's progress
student_counter += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
if gradeset:
# We were able to successfully grade this student for this course.
task_progress.succeeded += 1
if not header:
header = [section['label'] for section in gradeset[u'section_breakdown']]
rows.append(
["id", "email", "username", "grade"] + header + cohorts_header +
group_configs_header + ['Enrollment Track', 'Verification Status'] + certificate_info_header
)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
cohorts_group_name = []
if course_is_cohorted:
group = get_cohort(student, course_id, assign=False)
cohorts_group_name.append(group.name if group else '')
group_configs_group_names = []
for partition in experiment_partitions:
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
group_configs_group_names.append(group.name if group else '')
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
student,
course_id,
enrollment_mode
)
certificate_info = certificate_info_for_user(
student,
course_id,
gradeset['grade'],
student.id in whitelisted_user_ids
)
# Not everybody has the same gradable items. If the item is not
# found in the user's gradeset, just assume it's a 0. The aggregated
# grades for their sections and overall course will be calculated
# without regard for the item they didn't have access to, so it's
# possible for a student to have a 0.0 show up in their row but
# still have 100% for the course.
row_percents = [percents.get(label, 0.0) for label in header]
rows.append(
[student.id, student.email, student.username, gradeset['percent']] +
row_percents + cohorts_group_name + group_configs_group_names +
[enrollment_mode] + [verification_status] + certificate_info
)
else:
# An empty gradeset means we failed to grade a student.
task_progress.failed += 1
err_rows.append([student.id, student.username, err_msg])
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
# If there are any error rows (don't count the header), write them out as well
if len(err_rows) > 1:
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def _order_problems(blocks):
"""
Sort the problems by the assignment type and assignment that it belongs to.
Args:
blocks (OrderedDict) - A course structure containing blocks that have been ordered
(i.e. when we iterate over them, we will see them in the order
that they appear in the course).
Returns:
an OrderedDict that maps a problem id to its headers in the final report.
"""
problems = OrderedDict()
assignments = dict()
# First, sort out all the blocks into their correct assignments and all the
# assignments into their correct types.
for block in blocks:
# Put the assignments in order into the assignments list.
if blocks[block]['block_type'] == 'sequential':
block_format = blocks[block]['format']
if block_format not in assignments:
assignments[block_format] = OrderedDict()
assignments[block_format][block] = list()
# Put the problems into the correct order within their assignment.
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
current = blocks[block]['parent']
# crawl up the tree for the sequential block
while blocks[current]['block_type'] != 'sequential':
current = blocks[current]['parent']
current_format = blocks[current]['format']
assignments[current_format][current].append(block)
# Now that we have a sorting and an order for the assignments and problems,
# iterate through them in order to generate the header row.
for assignment_type in assignments:
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
for problem in assignments[assignment_type][assignment]:
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
block=blocks[problem]['display_name'],
assignment_type=assignment_type,
assignment_index=assignment_index,
assignment_name=blocks[assignment]['display_name']
)
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
return problems
def upload_problem_responses_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
all student answers to a given problem, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating students answers to problem'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
problem_location = task_input.get('problem_location')
student_data = list_problem_responses(course_id, problem_location)
features = ['username', 'state']
header, rows = format_dictlist(student_data, features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
problem_location = re.sub(r'[:/]', '_', problem_location)
csv_name = 'student_state_from_{}'.format(problem_location)
upload_csv_to_report_store(rows, csv_name, course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
try:
course_structure = CourseStructure.objects.get(course_id=course_id)
blocks = course_structure.ordered_blocks
problems = _order_problems(blocks)
except CourseStructure.DoesNotExist:
return task_progress.update_task_state(
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
# There was an error grading this student.
# Generally there will be a non-empty err_msg, but that is not always the case.
if not err_msg:
err_msg = u"Unknown error"
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
problem_score = problem_scores[problem_id]
earned_possible_values.append([problem_score.earned, problem_score.possible])
except KeyError:
# The student has not been graded on this problem. For example,
# iterate_grades_for skips problems that students have never
# seen in order to speed up report generation. It could also be
# the case that the student does not have access to it (e.g. A/B
# test or cohorted courseware).
earned_possible_values.append(['N/A', 'N/A'])
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating Profile Info'}
task_progress.update_task_state(extra_meta=current_step)
# compute the student features table and format it
query_features = task_input.get('features')
student_data = enrolled_students_features(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
# Loop over all our students and build our CSV lists in memory
rows = []
header = None
current_step = {'step': 'Gathering Profile Information'}
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
total_students = students_in_course.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
task_info_string,
action_name,
current_step,
total_students
)
for student in students_in_course:
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after certain intervals to get a hint that task is in progress
student_counter += 1
if student_counter % 100 == 0:
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
user_data = enrollment_report_provider.get_user_profile(student.id)
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
# display name map for the column headers
enrollment_report_headers = {
'User ID': _('User ID'),
'Username': _('Username'),
'Full Name': _('Full Name'),
'First Name': _('First Name'),
'Last Name': _('Last Name'),
'Company Name': _('Company Name'),
'Title': _('Title'),
'Language': _('Language'),
'Year of Birth': _('Year of Birth'),
'Gender': _('Gender'),
'Level of Education': _('Level of Education'),
'Mailing Address': _('Mailing Address'),
'Goals': _('Goals'),
'City': _('City'),
'Country': _('Country'),
'Enrollment Date': _('Enrollment Date'),
'Currently Enrolled': _('Currently Enrolled'),
'Enrollment Source': _('Enrollment Source'),
'Enrollment Role': _('Enrollment Role'),
'List Price': _('List Price'),
'Payment Amount': _('Payment Amount'),
'Coupon Codes Used': _('Coupon Codes Used'),
'Registration Code Used': _('Registration Code Used'),
'Payment Status': _('Payment Status'),
'Transaction Reference Number': _('Transaction Reference Number')
}
if not header:
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
display_headers = []
for header_element in header:
# translate header into a localizable display string
display_headers.append(enrollment_report_headers.get(header_element, header_element))
rows.append(display_headers)
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
task_progress.succeeded += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
information about students who may enroll but have not done so
yet, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about students who may enroll'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = task_input.get('features')
student_data = list_may_enroll(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def get_executive_report(course_id):
"""
Returns dict containing information about the course executive summary.
"""
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
course_id, status='refunded'
)
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
course_id, status='refunded'
)
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
course_id,
status='refunded'
)
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
unused_registration_codes = 0
for registration_code in bulk_purchased_codes:
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
unused_registration_codes += 1
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
self_purchases_percentage = 0.0
bulk_purchases_percentage = 0.0
invoice_purchases_percentage = 0.0
avg_price_paid = 0.0
if total_seats != 0:
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
avg_price_paid = gross_revenue / total_seats
course = get_course_by_id(course_id, depth=0)
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
return {
'display_name': course.display_name,
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
'total_seats': total_seats,
'currency': currency,
'gross_revenue': float(gross_revenue),
'gross_paid_revenue': float(gross_paid_revenue),
'gross_pending_revenue': gross_pending_revenue,
'total_seats_refunded': total_seats_refunded,
'total_amount_refunded': float(total_amount_refunded),
'average_paid_price': float(avg_price_paid),
'discount_codes_data': top_discounted_codes,
'total_seats_using_discount_codes': total_coupon_codes_purchases,
'total_self_purchase_seats': self_purchased_seat_count,
'total_bulk_purchase_seats': bulk_purchased_seat_count,
'total_invoiced_seats': total_invoiced_seats,
'unused_bulk_purchase_code_count': unused_registration_codes,
'self_purchases_percentage': self_purchases_percentage,
'bulk_purchases_percentage': bulk_purchases_percentage,
'invoice_purchases_percentage': invoice_purchases_percentage,
}
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
"""
For a given `course_id`, generate a html report containing information,
which provides a snapshot of how the course is doing.
"""
start_time = time()
report_generation_date = datetime.now(UTC)
status_interval = 100
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
true_enrollment_count = 0
for user in enrolled_users:
if not user.is_staff and not CourseAccessRole.objects.filter(
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
).exists():
true_enrollment_count += 1
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
current_step = {'step': 'Gathering executive summary report information'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating executive summary report',
task_info_string,
action_name,
current_step
)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# get the course executive summary report information.
data_dict = get_executive_report(course_id)
data_dict.update(
{
'total_enrollments': true_enrollment_count,
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
}
)
# By this point, we've got the data that we need to generate html report.
current_step = {'step': 'Uploading executive summary report HTML file'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
task_progress.succeeded += 1
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_proctored_exam_results_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
"""
For a given `course_id`, generate a CSV file containing
information about proctored exam results, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about proctored exam results in a course'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = _task_input.get('features')
student_data = get_proctored_exam_results(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'proctored_exam_results_report', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def generate_students_certificates(
_xmodule_instance_args, _entry_id, course_id, task_input, action_name): # pylint: disable=unused-argument
"""
For a given `course_id`, generate certificates for all students
that are enrolled.
"""
start_time = time()
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating students already have certificates'}
task_progress.update_task_state(extra_meta=current_step)
students_require_certs = students_require_certificate(course_id, enrolled_students)
task_progress.skipped = task_progress.total - len(students_require_certs)
current_step = {'step': 'Generating Certificates'}
task_progress.update_task_state(extra_meta=current_step)
course = modulestore().get_course(course_id, depth=0)
# Generate certificate for each student
for student in students_require_certs:
task_progress.attempted += 1
status = generate_user_certificates(
student,
course_id,
course=course
)
if status in [CertificateStatuses.generating, CertificateStatuses.downloadable]:
task_progress.succeeded += 1
else:
task_progress.failed += 1
return task_progress.update_task_state(extra_meta=current_step)
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
Within a given course, cohort students in bulk, then upload the results
using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
total_assignments += 1
task_progress = TaskProgress(action_name, total_assignments, start_time)
current_step = {'step': 'Cohorting Students'}
task_progress.update_task_state(extra_meta=current_step)
# cohorts_status is a mapping from cohort_name to metadata about
# that cohort. The metadata will include information about users
# successfully added to the cohort, users not found, and a cached
# reference to the corresponding cohort object to prevent
# redundant cohort queries.
cohorts_status = {}
with DefaultStorage().open(task_input['file_name']) as f:
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
username_or_email = row.get('email') or row.get('username')
cohort_name = row.get('cohort') or ''
task_progress.attempted += 1
if not cohorts_status.get(cohort_name):
cohorts_status[cohort_name] = {
'Cohort Name': cohort_name,
'Students Added': 0,
'Students Not Found': set()
}
try:
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=cohort_name
)
cohorts_status[cohort_name]["Exists"] = True
except CourseUserGroup.DoesNotExist:
cohorts_status[cohort_name]["Exists"] = False
if not cohorts_status[cohort_name]['Exists']:
task_progress.failed += 1
continue
try:
with transaction.commit_on_success():
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
cohorts_status[cohort_name]['Students Added'] += 1
task_progress.succeeded += 1
except User.DoesNotExist:
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
task_progress.failed += 1
except ValueError:
# Raised when the user is already in the given cohort
task_progress.skipped += 1
task_progress.update_task_state(extra_meta=current_step)
current_step['step'] = 'Uploading CSV'
task_progress.update_task_state(extra_meta=current_step)
# Filter the output of `add_users_to_cohorts` in order to upload the result.
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
output_rows = [
[
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in cohorts_status.iteritems()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def students_require_certificate(course_id, enrolled_students):
""" Returns list of students where certificates needs to be generated.
Removing those students who have their certificate already generated
from total enrolled students for given course.
:param course_id:
:param enrolled_students:
"""
# compute those students where certificates already generated
students_already_have_certs = User.objects.filter(
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
generatedcertificate__course_id=course_id)
return list(set(enrolled_students) - set(students_already_have_certs))
| agpl-3.0 |
jonasjberg/autonameow | autonameow/vendor/unidecode/x016.py | 252 | 4140 | data = (
'kka', # 0x00
'kk', # 0x01
'nu', # 0x02
'no', # 0x03
'ne', # 0x04
'nee', # 0x05
'ni', # 0x06
'na', # 0x07
'mu', # 0x08
'mo', # 0x09
'me', # 0x0a
'mee', # 0x0b
'mi', # 0x0c
'ma', # 0x0d
'yu', # 0x0e
'yo', # 0x0f
'ye', # 0x10
'yee', # 0x11
'yi', # 0x12
'ya', # 0x13
'ju', # 0x14
'ju', # 0x15
'jo', # 0x16
'je', # 0x17
'jee', # 0x18
'ji', # 0x19
'ji', # 0x1a
'ja', # 0x1b
'jju', # 0x1c
'jjo', # 0x1d
'jje', # 0x1e
'jjee', # 0x1f
'jji', # 0x20
'jja', # 0x21
'lu', # 0x22
'lo', # 0x23
'le', # 0x24
'lee', # 0x25
'li', # 0x26
'la', # 0x27
'dlu', # 0x28
'dlo', # 0x29
'dle', # 0x2a
'dlee', # 0x2b
'dli', # 0x2c
'dla', # 0x2d
'lhu', # 0x2e
'lho', # 0x2f
'lhe', # 0x30
'lhee', # 0x31
'lhi', # 0x32
'lha', # 0x33
'tlhu', # 0x34
'tlho', # 0x35
'tlhe', # 0x36
'tlhee', # 0x37
'tlhi', # 0x38
'tlha', # 0x39
'tlu', # 0x3a
'tlo', # 0x3b
'tle', # 0x3c
'tlee', # 0x3d
'tli', # 0x3e
'tla', # 0x3f
'zu', # 0x40
'zo', # 0x41
'ze', # 0x42
'zee', # 0x43
'zi', # 0x44
'za', # 0x45
'z', # 0x46
'z', # 0x47
'dzu', # 0x48
'dzo', # 0x49
'dze', # 0x4a
'dzee', # 0x4b
'dzi', # 0x4c
'dza', # 0x4d
'su', # 0x4e
'so', # 0x4f
'se', # 0x50
'see', # 0x51
'si', # 0x52
'sa', # 0x53
'shu', # 0x54
'sho', # 0x55
'she', # 0x56
'shee', # 0x57
'shi', # 0x58
'sha', # 0x59
'sh', # 0x5a
'tsu', # 0x5b
'tso', # 0x5c
'tse', # 0x5d
'tsee', # 0x5e
'tsi', # 0x5f
'tsa', # 0x60
'chu', # 0x61
'cho', # 0x62
'che', # 0x63
'chee', # 0x64
'chi', # 0x65
'cha', # 0x66
'ttsu', # 0x67
'ttso', # 0x68
'ttse', # 0x69
'ttsee', # 0x6a
'ttsi', # 0x6b
'ttsa', # 0x6c
'X', # 0x6d
'.', # 0x6e
'qai', # 0x6f
'ngai', # 0x70
'nngi', # 0x71
'nngii', # 0x72
'nngo', # 0x73
'nngoo', # 0x74
'nnga', # 0x75
'nngaa', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
' ', # 0x80
'b', # 0x81
'l', # 0x82
'f', # 0x83
's', # 0x84
'n', # 0x85
'h', # 0x86
'd', # 0x87
't', # 0x88
'c', # 0x89
'q', # 0x8a
'm', # 0x8b
'g', # 0x8c
'ng', # 0x8d
'z', # 0x8e
'r', # 0x8f
'a', # 0x90
'o', # 0x91
'u', # 0x92
'e', # 0x93
'i', # 0x94
'ch', # 0x95
'th', # 0x96
'ph', # 0x97
'p', # 0x98
'x', # 0x99
'p', # 0x9a
'<', # 0x9b
'>', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'f', # 0xa0
'v', # 0xa1
'u', # 0xa2
'yr', # 0xa3
'y', # 0xa4
'w', # 0xa5
'th', # 0xa6
'th', # 0xa7
'a', # 0xa8
'o', # 0xa9
'ac', # 0xaa
'ae', # 0xab
'o', # 0xac
'o', # 0xad
'o', # 0xae
'oe', # 0xaf
'on', # 0xb0
'r', # 0xb1
'k', # 0xb2
'c', # 0xb3
'k', # 0xb4
'g', # 0xb5
'ng', # 0xb6
'g', # 0xb7
'g', # 0xb8
'w', # 0xb9
'h', # 0xba
'h', # 0xbb
'h', # 0xbc
'h', # 0xbd
'n', # 0xbe
'n', # 0xbf
'n', # 0xc0
'i', # 0xc1
'e', # 0xc2
'j', # 0xc3
'g', # 0xc4
'ae', # 0xc5
'a', # 0xc6
'eo', # 0xc7
'p', # 0xc8
'z', # 0xc9
's', # 0xca
's', # 0xcb
's', # 0xcc
'c', # 0xcd
'z', # 0xce
't', # 0xcf
't', # 0xd0
'd', # 0xd1
'b', # 0xd2
'b', # 0xd3
'p', # 0xd4
'p', # 0xd5
'e', # 0xd6
'm', # 0xd7
'm', # 0xd8
'm', # 0xd9
'l', # 0xda
'l', # 0xdb
'ng', # 0xdc
'ng', # 0xdd
'd', # 0xde
'o', # 0xdf
'ear', # 0xe0
'ior', # 0xe1
'qu', # 0xe2
'qu', # 0xe3
'qu', # 0xe4
's', # 0xe5
'yr', # 0xe6
'yr', # 0xe7
'yr', # 0xe8
'q', # 0xe9
'x', # 0xea
'.', # 0xeb
':', # 0xec
'+', # 0xed
'17', # 0xee
'18', # 0xef
'19', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
kaichogami/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
jjmleiro/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/tests/relatedapp/models.py | 121 | 1801 | from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Location(models.Model):
point = models.PointField()
objects = models.GeoManager()
def __str__(self): return self.point.wkt
@python_2_unicode_compatible
class City(models.Model):
name = models.CharField(max_length=50)
state = models.CharField(max_length=2)
location = models.ForeignKey(Location)
objects = models.GeoManager()
def __str__(self): return self.name
class AugmentedLocation(Location):
extra_text = models.TextField(blank=True)
objects = models.GeoManager()
class DirectoryEntry(models.Model):
listing_text = models.CharField(max_length=50)
location = models.ForeignKey(AugmentedLocation)
objects = models.GeoManager()
@python_2_unicode_compatible
class Parcel(models.Model):
name = models.CharField(max_length=30)
city = models.ForeignKey(City)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column='mycenter')
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
objects = models.GeoManager()
def __str__(self): return self.name
# These use the GeoManager but do not have any geographic fields.
class Author(models.Model):
name = models.CharField(max_length=100)
dob = models.DateField()
objects = models.GeoManager()
class Article(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, unique=True)
objects = models.GeoManager()
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, related_name='books', null=True)
objects = models.GeoManager()
| apache-2.0 |
petemounce/ansible | test/units/module_utils/basic/test_log.py | 63 | 10557 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import sys
import json
import syslog
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from units.mock.procenv import swap_stdin_and_argv
import ansible.module_utils.basic
try:
# Python 3.4+
from importlib import reload
except ImportError:
# Python 2 has reload as a builtin
# Ignoring python3.0-3.3 (those have imp.reload if we decide we care)
pass
class TestAnsibleModuleSysLogSmokeTest(unittest.TestCase):
def setUp(self):
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
ansible.module_utils.basic._ANSIBLE_ARGS = None
self.am = ansible.module_utils.basic.AnsibleModule(
argument_spec=dict(),
)
self.am._name = 'unittest'
self.has_journal = ansible.module_utils.basic.has_journal
if self.has_journal:
# Systems with journal can still test syslog
ansible.module_utils.basic.has_journal = False
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
ansible.module_utils.basic.has_journal = self.has_journal
def test_smoketest_syslog(self):
# These talk to the live daemons on the system. Need to do this to
# show that what we send doesn't cause an issue once it gets to the
# daemon. These are just smoketests to test that we don't fail.
self.am.log(u'Text string')
self.am.log(u'Toshio くらとみ non-ascii test')
self.am.log(b'Byte string')
self.am.log(u'Toshio くらとみ non-ascii test'.encode('utf-8'))
self.am.log(b'non-utf8 :\xff: test')
class TestAnsibleModuleJournaldSmokeTest(unittest.TestCase):
def setUp(self):
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
ansible.module_utils.basic._ANSIBLE_ARGS = None
self.am = ansible.module_utils.basic.AnsibleModule(
argument_spec=dict(),
)
self.am._name = 'unittest'
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
@unittest.skipUnless(ansible.module_utils.basic.has_journal, 'python systemd bindings not installed')
def test_smoketest_journal(self):
# These talk to the live daemons on the system. Need to do this to
# show that what we send doesn't cause an issue once it gets to the
# daemon. These are just smoketests to test that we don't fail.
self.am.log(u'Text string')
self.am.log(u'Toshio くらとみ non-ascii test')
self.am.log(b'Byte string')
self.am.log(u'Toshio くらとみ non-ascii test'.encode('utf-8'))
self.am.log(b'non-utf8 :\xff: test')
class TestAnsibleModuleLogSyslog(unittest.TestCase):
"""Test the AnsibleModule Log Method"""
py2_output_data = {
u'Text string': b'Text string',
u'Toshio くらとみ non-ascii test': u'Toshio くらとみ non-ascii test'.encode('utf-8'),
b'Byte string': b'Byte string',
u'Toshio くらとみ non-ascii test'.encode('utf-8'): u'Toshio くらとみ non-ascii test'.encode('utf-8'),
b'non-utf8 :\xff: test': b'non-utf8 :\xff: test'.decode('utf-8', 'replace').encode('utf-8'),
}
py3_output_data = {
u'Text string': u'Text string',
u'Toshio くらとみ non-ascii test': u'Toshio くらとみ non-ascii test',
b'Byte string': u'Byte string',
u'Toshio くらとみ non-ascii test'.encode('utf-8'): u'Toshio くらとみ non-ascii test',
b'non-utf8 :\xff: test': b'non-utf8 :\xff: test'.decode('utf-8', 'replace')
}
def setUp(self):
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
ansible.module_utils.basic._ANSIBLE_ARGS = None
self.am = ansible.module_utils.basic.AnsibleModule(
argument_spec=dict(),
)
self.am._name = 'unittest'
self.has_journal = ansible.module_utils.basic.has_journal
if self.has_journal:
# Systems with journal can still test syslog
ansible.module_utils.basic.has_journal = False
def tearDown(self):
# teardown/reset
ansible.module_utils.basic.has_journal = self.has_journal
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
@patch('syslog.syslog', autospec=True)
def test_no_log(self, mock_func):
no_log = self.am.no_log
self.am.no_log = True
self.am.log('unittest no_log')
self.assertFalse(mock_func.called)
self.am.no_log = False
self.am.log('unittest no_log')
mock_func.assert_called_once_with(syslog.LOG_INFO, 'unittest no_log')
self.am.no_log = no_log
def test_output_matches(self):
if sys.version_info >= (3,):
output_data = self.py3_output_data
else:
output_data = self.py2_output_data
for msg, param in output_data.items():
with patch('syslog.syslog', autospec=True) as mock_func:
self.am.log(msg)
mock_func.assert_called_once_with(syslog.LOG_INFO, param)
class TestAnsibleModuleLogJournal(unittest.TestCase):
"""Test the AnsibleModule Log Method"""
output_data = {
u'Text string': u'Text string',
u'Toshio くらとみ non-ascii test': u'Toshio くらとみ non-ascii test',
b'Byte string': u'Byte string',
u'Toshio くらとみ non-ascii test'.encode('utf-8'): u'Toshio くらとみ non-ascii test',
b'non-utf8 :\xff: test': b'non-utf8 :\xff: test'.decode('utf-8', 'replace')
}
# overriding run lets us use context managers for setup/teardown-esque behavior
def setUp(self):
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
ansible.module_utils.basic._ANSIBLE_ARGS = None
self.am = ansible.module_utils.basic.AnsibleModule(
argument_spec=dict(),
)
self.am._name = 'unittest'
self.has_journal = ansible.module_utils.basic.has_journal
ansible.module_utils.basic.has_journal = True
self.module_patcher = None
# In case systemd-python is not installed
if not self.has_journal:
self.module_patcher = patch.dict('sys.modules', {'systemd': MagicMock(), 'systemd.journal': MagicMock()})
self.module_patcher.start()
try:
reload(ansible.module_utils.basic)
except NameError:
self._fake_out_reload(ansible.module_utils.basic)
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
# teardown/reset
ansible.module_utils.basic.has_journal = self.has_journal
if self.module_patcher:
self.module_patcher.stop()
reload(ansible.module_utils.basic)
@patch('systemd.journal.send')
def test_no_log(self, mock_func):
no_log = self.am.no_log
self.am.no_log = True
self.am.log('unittest no_log')
self.assertFalse(mock_func.called)
self.am.no_log = False
self.am.log('unittest no_log')
self.assertEqual(mock_func.called, 1)
# Message
# call_args is a 2-tuple of (arg_list, kwarg_dict)
self.assertTrue(mock_func.call_args[0][0].endswith('unittest no_log'), msg='Message was not sent to log')
# log adds this journal field
self.assertIn('MODULE', mock_func.call_args[1])
self.assertIn('basic.py', mock_func.call_args[1]['MODULE'])
self.am.no_log = no_log
def test_output_matches(self):
for msg, param in self.output_data.items():
with patch('systemd.journal.send', autospec=True) as mock_func:
self.am.log(msg)
self.assertEqual(mock_func.call_count, 1, msg='journal.send not called exactly once')
self.assertTrue(mock_func.call_args[0][0].endswith(param))
@patch('systemd.journal.send')
def test_log_args(self, mock_func):
self.am.log('unittest log_args', log_args=dict(TEST='log unittest'))
self.assertEqual(mock_func.called, 1)
self.assertTrue(mock_func.call_args[0][0].endswith('unittest log_args'), msg='Message was not sent to log')
# log adds this journal field
self.assertIn('MODULE', mock_func.call_args[1])
self.assertIn('basic.py', mock_func.call_args[1]['MODULE'])
# We added this journal field
self.assertIn('TEST', mock_func.call_args[1])
self.assertIn('log unittest', mock_func.call_args[1]['TEST'])
| gpl-3.0 |
bhaskar24/ns-3_RARED | examples/realtime/realtime-udp-echo.py | 195 | 3526 | #
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Network topology
#
# n0 n1 n2 n3
# | | | |
# =================
# LAN
#
# - UDP flows from n0 to n1 and back
# - DropTail queues
# - Tracing of queues and packet receptions to file "udp-echo.tr"
import ns.applications
import ns.core
import ns.csma
import ns.internet
import ns.network
def main(argv):
#
# Allow the user to override any of the defaults and the above Bind() at
# run-time, via command-line arguments
#
cmd = ns.core.CommandLine()
cmd.Parse(argv)
#
# But since this is a realtime script, don't allow the user to mess with
# that.
#
ns.core.GlobalValue.Bind("SimulatorImplementationType", ns.core.StringValue("ns3::RealtimeSimulatorImpl"))
#
# Explicitly create the nodes required by the topology (shown above).
#
print "Create nodes."
n = ns.network.NodeContainer()
n.Create(4)
internet = ns.internet.InternetStackHelper()
internet.Install(n)
#
# Explicitly create the channels required by the topology (shown above).
#
print ("Create channels.")
csma = ns.csma.CsmaHelper()
csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000)))
csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2)));
csma.SetDeviceAttribute("Mtu", ns.core.UintegerValue(1400))
d = csma.Install(n)
#
# We've got the "hardware" in place. Now we need to add IP addresses.
#
print ("Assign IP Addresses.")
ipv4 = ns.internet.Ipv4AddressHelper()
ipv4.SetBase(ns.network.Ipv4Address("10.1.1.0"), ns.network.Ipv4Mask("255.255.255.0"))
i = ipv4.Assign(d)
print ("Create Applications.")
#
# Create a UdpEchoServer application on node one.
#
port = 9 # well-known echo port number
server = ns.applications.UdpEchoServerHelper(port)
apps = server.Install(n.Get(1))
apps.Start(ns.core.Seconds(1.0))
apps.Stop(ns.core.Seconds(10.0))
#
# Create a UdpEchoClient application to send UDP datagrams from node zero to
# node one.
#
packetSize = 1024
maxPacketCount = 500
interPacketInterval = ns.core.Seconds(0.01)
client = ns.applications.UdpEchoClientHelper(i.GetAddress (1), port)
client.SetAttribute("MaxPackets", ns.core.UintegerValue(maxPacketCount))
client.SetAttribute("Interval", ns.core.TimeValue(interPacketInterval))
client.SetAttribute("PacketSize", ns.core.UintegerValue(packetSize))
apps = client.Install(n.Get(0))
apps.Start(ns.core.Seconds(2.0))
apps.Stop(ns.core.Seconds(10.0))
ascii = ns.network.AsciiTraceHelper()
csma.EnableAsciiAll(ascii.CreateFileStream("realtime-udp-echo.tr"))
csma.EnablePcapAll("realtime-udp-echo", False)
#
# Now, do the actual simulation.
#
print ("Run Simulation.")
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
print ("Done.")
if __name__ == '__main__':
import sys
main(sys.argv)
| gpl-2.0 |
prarthitm/edxplatform | common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py | 19 | 35039 | """
Unit tests for the Mongo modulestore
"""
# pylint: disable=protected-access
# pylint: disable=no-name-in-module
# pylint: disable=bad-continuation
from nose.tools import assert_equals, assert_raises, \
assert_not_equals, assert_false, assert_true, assert_greater, assert_is_instance, assert_is_none
# pylint: enable=E0611
from path import Path as path
import pymongo
import logging
import shutil
from tempfile import mkdtemp
from uuid import uuid4
from datetime import datetime
from pytz import UTC
import unittest
from mock import patch
from xblock.core import XBlock
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xblock.runtime import KeyValueStore
from xblock.exceptions import InvalidScopeError
from xmodule.tests import DATA_DIR
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import Location
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.mongo import MongoKeyValueStore
from xmodule.modulestore.draft import DraftModuleStore
from opaque_keys.edx.locations import SlashSeparatedCourseKey, AssetLocation
from opaque_keys.edx.locator import LibraryLocator, CourseLocator
from opaque_keys.edx.keys import UsageKey
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.xml_importer import import_course_from_xml, perform_xlint
from xmodule.contentstore.mongo import MongoContentStore
from nose.tools import assert_in
from xmodule.exceptions import NotFoundError
from git.test.lib.asserts import assert_not_none
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.mongo.base import as_draft
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.tests.utils import LocationMixin, mock_tab_from_json
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import InheritanceMixin
log = logging.getLogger(__name__)
HOST = MONGO_HOST
PORT = MONGO_PORT_NUM
DB = 'test_mongo_%s' % uuid4().hex[:5]
COLLECTION = 'modulestore'
ASSET_COLLECTION = 'assetstore'
FS_ROOT = DATA_DIR # TODO (vshnayder): will need a real fs_root for testing load_item
DEFAULT_CLASS = 'xmodule.raw_module.RawDescriptor'
RENDER_TEMPLATE = lambda t_n, d, ctx=None, nsp='main': ''
class ReferenceTestXBlock(XModuleMixin):
"""
Test xblock type to test the reference field types
"""
has_children = True
reference_link = Reference(default=None, scope=Scope.content)
reference_list = ReferenceList(scope=Scope.content)
reference_dict = ReferenceValueDict(scope=Scope.settings)
class TestMongoModuleStoreBase(unittest.TestCase):
'''
Basic setup for all tests
'''
# Explicitly list the courses to load (don't want the big one)
courses = ['toy', 'simple', 'simple_with_draft', 'test_unicode']
@classmethod
def setupClass(cls):
cls.connection = pymongo.MongoClient(
host=HOST,
port=PORT,
tz_aware=True,
document_class=dict,
)
# NOTE: Creating a single db for all the tests to save time. This
# is ok only as long as none of the tests modify the db.
# If (when!) that changes, need to either reload the db, or load
# once and copy over to a tmp db for each test.
cls.content_store, cls.draft_store = cls.initdb()
@classmethod
def teardownClass(cls):
if cls.connection:
cls.connection.drop_database(DB)
cls.connection.close()
@classmethod
def add_asset_collection(cls, doc_store_config):
"""
No asset collection.
"""
pass
@classmethod
def initdb(cls):
# connect to the db
doc_store_config = {
'host': HOST,
'port': PORT,
'db': DB,
'collection': COLLECTION,
}
cls.add_asset_collection(doc_store_config)
# since MongoModuleStore and MongoContentStore are basically assumed to be together, create this class
# as well
content_store = MongoContentStore(HOST, DB, port=PORT)
#
# Also test draft store imports
#
draft_store = DraftModuleStore(
content_store,
doc_store_config, FS_ROOT, RENDER_TEMPLATE,
default_class=DEFAULT_CLASS,
branch_setting_func=lambda: ModuleStoreEnum.Branch.draft_preferred,
xblock_mixins=(EditInfoMixin, InheritanceMixin, LocationMixin, XModuleMixin)
)
with patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json):
import_course_from_xml(
draft_store,
999,
DATA_DIR,
cls.courses,
static_content_store=content_store
)
# also test a course with no importing of static content
import_course_from_xml(
draft_store,
999,
DATA_DIR,
['test_import_course'],
static_content_store=content_store,
do_import_static=False,
verbose=True
)
# also import a course under a different course_id (especially ORG)
import_course_from_xml(
draft_store,
999,
DATA_DIR,
['test_import_course'],
static_content_store=content_store,
do_import_static=False,
verbose=True,
target_id=SlashSeparatedCourseKey('guestx', 'foo', 'bar')
)
return content_store, draft_store
@staticmethod
def destroy_db(connection):
# Destroy the test db.
connection.drop_database(DB)
def setUp(self):
super(TestMongoModuleStoreBase, self).setUp()
self.dummy_user = ModuleStoreEnum.UserID.test
class TestMongoModuleStore(TestMongoModuleStoreBase):
'''Module store tests'''
@classmethod
def add_asset_collection(cls, doc_store_config):
"""
No asset collection - it's not used in the tests below.
"""
pass
@classmethod
def setupClass(cls):
super(TestMongoModuleStore, cls).setupClass()
@classmethod
def teardownClass(cls):
super(TestMongoModuleStore, cls).teardownClass()
def test_init(self):
'''Make sure the db loads'''
ids = list(self.connection[DB][COLLECTION].find({}, {'_id': True}))
assert_greater(len(ids), 12)
def test_mongo_modulestore_type(self):
store = DraftModuleStore(
None,
{'host': HOST, 'db': DB, 'port': PORT, 'collection': COLLECTION},
FS_ROOT, RENDER_TEMPLATE, default_class=DEFAULT_CLASS
)
assert_equals(store.get_modulestore_type(''), ModuleStoreEnum.Type.mongo)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_courses(self, _from_json):
'''Make sure the course objects loaded properly'''
courses = self.draft_store.get_courses()
assert_equals(len(courses), 6)
course_ids = [course.id for course in courses]
for course_key in [
SlashSeparatedCourseKey(*fields)
for fields in [
['edX', 'simple', '2012_Fall'],
['edX', 'simple_with_draft', '2012_Fall'],
['edX', 'test_import_course', '2012_Fall'],
['edX', 'test_unicode', '2012_Fall'],
['edX', 'toy', '2012_Fall'],
['guestx', 'foo', 'bar'],
]
]:
assert_in(course_key, course_ids)
course = self.draft_store.get_course(course_key)
assert_not_none(course)
assert_true(self.draft_store.has_course(course_key))
mix_cased = SlashSeparatedCourseKey(
course_key.org.upper(), course_key.course.upper(), course_key.run.lower()
)
assert_false(self.draft_store.has_course(mix_cased))
assert_true(self.draft_store.has_course(mix_cased, ignore_case=True))
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_org_courses(self, _from_json):
"""
Make sure that we can query for a filtered list of courses for a given ORG
"""
courses = self.draft_store.get_courses(org='guestx')
assert_equals(len(courses), 1)
course_ids = [course.id for course in courses]
for course_key in [
SlashSeparatedCourseKey(*fields)
for fields in [
['guestx', 'foo', 'bar']
]
]:
assert_in(course_key, course_ids)
courses = self.draft_store.get_courses(org='edX')
assert_equals(len(courses), 5)
course_ids = [course.id for course in courses]
for course_key in [
SlashSeparatedCourseKey(*fields)
for fields in [
['edX', 'simple', '2012_Fall'],
['edX', 'simple_with_draft', '2012_Fall'],
['edX', 'test_import_course', '2012_Fall'],
['edX', 'test_unicode', '2012_Fall'],
['edX', 'toy', '2012_Fall'],
]
]:
assert_in(course_key, course_ids)
def test_no_such_course(self):
"""
Test get_course and has_course with ids which don't exist
"""
for course_key in [
SlashSeparatedCourseKey(*fields)
for fields in [
['edX', 'simple', 'no_such_course'], ['edX', 'no_such_course', '2012_Fall'],
['NO_SUCH_COURSE', 'Test_iMport_courSe', '2012_Fall'],
]
]:
course = self.draft_store.get_course(course_key)
assert_is_none(course)
assert_false(self.draft_store.has_course(course_key))
mix_cased = SlashSeparatedCourseKey(
course_key.org.lower(), course_key.course.upper(), course_key.run.upper()
)
assert_false(self.draft_store.has_course(mix_cased))
assert_false(self.draft_store.has_course(mix_cased, ignore_case=True))
def test_get_mongo_course_with_split_course_key(self):
"""
Test mongo course using split course_key will not able to access it.
"""
course_key = CourseKey.from_string('course-v1:edX+simple+2012_Fall')
with self.assertRaises(ItemNotFoundError):
self.draft_store.get_course(course_key)
def test_has_mongo_course_with_split_course_key(self):
"""
Test `has course` using split course key would return False.
"""
course_key = CourseKey.from_string('course-v1:edX+simple+2012_Fall')
self.assertFalse(self.draft_store.has_course(course_key))
def test_has_course_with_library(self):
"""
Test that has_course() returns False when called with a LibraryLocator.
This is required because MixedModuleStore will use has_course() to check
where a given library are stored.
"""
lib_key = LibraryLocator("TestOrg", "TestLib")
result = self.draft_store.has_course(lib_key)
assert_false(result)
def test_loads(self):
assert_not_none(
self.draft_store.get_item(Location('edX', 'toy', '2012_Fall', 'course', '2012_Fall'))
)
assert_not_none(
self.draft_store.get_item(Location('edX', 'simple', '2012_Fall', 'course', '2012_Fall')),
)
assert_not_none(
self.draft_store.get_item(Location('edX', 'toy', '2012_Fall', 'video', 'Welcome')),
)
def test_unicode_loads(self):
"""
Test that getting items from the test_unicode course works
"""
assert_not_none(
self.draft_store.get_item(Location('edX', 'test_unicode', '2012_Fall', 'course', '2012_Fall')),
)
# All items with ascii-only filenames should load properly.
assert_not_none(
self.draft_store.get_item(Location('edX', 'test_unicode', '2012_Fall', 'video', 'Welcome')),
)
assert_not_none(
self.draft_store.get_item(Location('edX', 'test_unicode', '2012_Fall', 'video', 'Welcome')),
)
assert_not_none(
self.draft_store.get_item(Location('edX', 'test_unicode', '2012_Fall', 'chapter', 'Overview')),
)
def test_find_one(self):
assert_not_none(
self.draft_store._find_one(Location('edX', 'toy', '2012_Fall', 'course', '2012_Fall')),
)
assert_not_none(
self.draft_store._find_one(Location('edX', 'simple', '2012_Fall', 'course', '2012_Fall')),
)
assert_not_none(
self.draft_store._find_one(Location('edX', 'toy', '2012_Fall', 'video', 'Welcome')),
)
def test_xlinter(self):
'''
Run through the xlinter, we know the 'toy' course has violations, but the
number will continue to grow over time, so just check > 0
'''
assert_not_equals(perform_xlint(DATA_DIR, ['toy']), 0)
def test_get_courses_has_no_templates(self):
courses = self.draft_store.get_courses()
for course in courses:
assert_false(
course.location.org == 'edx' and course.location.course == 'templates',
'{0} is a template course'.format(course)
)
def test_contentstore_attrs(self):
"""
Test getting, setting, and defaulting the locked attr and arbitrary attrs.
"""
location = Location('edX', 'toy', '2012_Fall', 'course', '2012_Fall')
course_content, __ = self.content_store.get_all_content_for_course(location.course_key)
assert_true(len(course_content) > 0)
filter_params = _build_requested_filter('Images')
filtered_course_content, __ = self.content_store.get_all_content_for_course(
location.course_key, filter_params=filter_params)
assert_true(len(filtered_course_content) < len(course_content))
# a bit overkill, could just do for content[0]
for content in course_content:
assert not content.get('locked', False)
asset_key = AssetLocation._from_deprecated_son(content.get('content_son', content['_id']), location.run)
assert not self.content_store.get_attr(asset_key, 'locked', False)
attrs = self.content_store.get_attrs(asset_key)
assert_in('uploadDate', attrs)
assert not attrs.get('locked', False)
self.content_store.set_attr(asset_key, 'locked', True)
assert self.content_store.get_attr(asset_key, 'locked', False)
attrs = self.content_store.get_attrs(asset_key)
assert_in('locked', attrs)
assert attrs['locked'] is True
self.content_store.set_attrs(asset_key, {'miscel': 99})
assert_equals(self.content_store.get_attr(asset_key, 'miscel'), 99)
asset_key = AssetLocation._from_deprecated_son(
course_content[0].get('content_son', course_content[0]['_id']),
location.run
)
assert_raises(
AttributeError, self.content_store.set_attr, asset_key,
'md5', 'ff1532598830e3feac91c2449eaa60d6'
)
assert_raises(
AttributeError, self.content_store.set_attrs, asset_key,
{'foo': 9, 'md5': 'ff1532598830e3feac91c2449eaa60d6'}
)
assert_raises(
NotFoundError, self.content_store.get_attr,
Location('bogus', 'bogus', 'bogus', 'asset', 'bogus'),
'displayname'
)
assert_raises(
NotFoundError, self.content_store.set_attr,
Location('bogus', 'bogus', 'bogus', 'asset', 'bogus'),
'displayname', 'hello'
)
assert_raises(
NotFoundError, self.content_store.get_attrs,
Location('bogus', 'bogus', 'bogus', 'asset', 'bogus')
)
assert_raises(
NotFoundError, self.content_store.set_attrs,
Location('bogus', 'bogus', 'bogus', 'asset', 'bogus'),
{'displayname': 'hello'}
)
assert_raises(
NotFoundError, self.content_store.set_attrs,
Location('bogus', 'bogus', 'bogus', 'asset', None),
{'displayname': 'hello'}
)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_courses_for_wiki(self, _from_json):
"""
Test the get_courses_for_wiki method
"""
for course_number in self.courses:
course_locations = self.draft_store.get_courses_for_wiki(course_number)
assert_equals(len(course_locations), 1)
assert_equals(SlashSeparatedCourseKey('edX', course_number, '2012_Fall'), course_locations[0])
course_locations = self.draft_store.get_courses_for_wiki('no_such_wiki')
assert_equals(len(course_locations), 0)
# set toy course to share the wiki with simple course
toy_course = self.draft_store.get_course(SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'))
toy_course.wiki_slug = 'simple'
self.draft_store.update_item(toy_course, ModuleStoreEnum.UserID.test)
# now toy_course should not be retrievable with old wiki_slug
course_locations = self.draft_store.get_courses_for_wiki('toy')
assert_equals(len(course_locations), 0)
# but there should be two courses with wiki_slug 'simple'
course_locations = self.draft_store.get_courses_for_wiki('simple')
assert_equals(len(course_locations), 2)
for course_number in ['toy', 'simple']:
assert_in(SlashSeparatedCourseKey('edX', course_number, '2012_Fall'), course_locations)
# configure simple course to use unique wiki_slug.
simple_course = self.draft_store.get_course(SlashSeparatedCourseKey('edX', 'simple', '2012_Fall'))
simple_course.wiki_slug = 'edX.simple.2012_Fall'
self.draft_store.update_item(simple_course, ModuleStoreEnum.UserID.test)
# it should be retrievable with its new wiki_slug
course_locations = self.draft_store.get_courses_for_wiki('edX.simple.2012_Fall')
assert_equals(len(course_locations), 1)
assert_in(SlashSeparatedCourseKey('edX', 'simple', '2012_Fall'), course_locations)
@XBlock.register_temp_plugin(ReferenceTestXBlock, 'ref_test')
def test_reference_converters(self):
"""
Test that references types get deserialized correctly
"""
course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def setup_test():
course = self.draft_store.get_course(course_key)
# can't use item factory as it depends on django settings
p1ele = self.draft_store.create_item(
99,
course_key,
'problem',
block_id='p1',
runtime=course.runtime
)
p2ele = self.draft_store.create_item(
99,
course_key,
'problem',
block_id='p2',
runtime=course.runtime
)
self.refloc = course.id.make_usage_key('ref_test', 'ref_test')
self.draft_store.create_item(
99,
self.refloc.course_key,
self.refloc.block_type,
block_id=self.refloc.block_id,
runtime=course.runtime,
fields={
'reference_link': p1ele.location,
'reference_list': [p1ele.location, p2ele.location],
'reference_dict': {'p1': p1ele.location, 'p2': p2ele.location},
'children': [p1ele.location, p2ele.location],
}
)
def check_xblock_fields():
def check_children(xblock):
for child in xblock.children:
assert_is_instance(child, UsageKey)
course = self.draft_store.get_course(course_key)
check_children(course)
refele = self.draft_store.get_item(self.refloc)
check_children(refele)
assert_is_instance(refele.reference_link, UsageKey)
assert_greater(len(refele.reference_list), 0)
for ref in refele.reference_list:
assert_is_instance(ref, UsageKey)
assert_greater(len(refele.reference_dict), 0)
for ref in refele.reference_dict.itervalues():
assert_is_instance(ref, UsageKey)
def check_mongo_fields():
def get_item(location):
return self.draft_store._find_one(as_draft(location))
def check_children(payload):
for child in payload['definition']['children']:
assert_is_instance(child, basestring)
refele = get_item(self.refloc)
check_children(refele)
assert_is_instance(refele['definition']['data']['reference_link'], basestring)
assert_greater(len(refele['definition']['data']['reference_list']), 0)
for ref in refele['definition']['data']['reference_list']:
assert_is_instance(ref, basestring)
assert_greater(len(refele['metadata']['reference_dict']), 0)
for ref in refele['metadata']['reference_dict'].itervalues():
assert_is_instance(ref, basestring)
setup_test()
check_xblock_fields()
check_mongo_fields()
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_export_course_image(self, _from_json):
"""
Test to make sure that we have a course image in the contentstore,
then export it to ensure it gets copied to both file locations.
"""
course_key = SlashSeparatedCourseKey('edX', 'simple', '2012_Fall')
location = course_key.make_asset_key('asset', 'images_course_image.jpg')
# This will raise if the course image is missing
self.content_store.find(location)
root_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, root_dir)
export_course_to_xml(self.draft_store, self.content_store, course_key, root_dir, 'test_export')
self.assertTrue(path(root_dir / 'test_export/static/images/course_image.jpg').isfile())
self.assertTrue(path(root_dir / 'test_export/static/images_course_image.jpg').isfile())
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_export_course_image_nondefault(self, _from_json):
"""
Make sure that if a non-default image path is specified that we
don't export it to the static default location
"""
course = self.draft_store.get_course(SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'))
assert_true(course.course_image, 'just_a_test.jpg')
root_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, root_dir)
export_course_to_xml(self.draft_store, self.content_store, course.id, root_dir, 'test_export')
self.assertTrue(path(root_dir / 'test_export/static/just_a_test.jpg').isfile())
self.assertFalse(path(root_dir / 'test_export/static/images/course_image.jpg').isfile())
def test_course_without_image(self):
"""
Make sure we elegantly passover our code when there isn't a static
image
"""
course = self.draft_store.get_course(SlashSeparatedCourseKey('edX', 'simple_with_draft', '2012_Fall'))
root_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, root_dir)
export_course_to_xml(self.draft_store, self.content_store, course.id, root_dir, 'test_export')
self.assertFalse(path(root_dir / 'test_export/static/images/course_image.jpg').isfile())
self.assertFalse(path(root_dir / 'test_export/static/images_course_image.jpg').isfile())
def _create_test_tree(self, name, user_id=None):
"""
Creates and returns a tree with the following structure:
Grandparent
Parent Sibling
Parent
Child
Child Sibling
"""
if user_id is None:
user_id = self.dummy_user
org = 'edX'
course = 'tree{}'.format(name)
run = name
if not self.draft_store.has_course(SlashSeparatedCourseKey(org, course, run)):
self.draft_store.create_course(org, course, run, user_id)
locations = {
'grandparent': Location(org, course, run, 'chapter', 'grandparent'),
'parent_sibling': Location(org, course, run, 'sequential', 'parent_sibling'),
'parent': Location(org, course, run, 'sequential', 'parent'),
'child_sibling': Location(org, course, run, 'vertical', 'child_sibling'),
'child': Location(org, course, run, 'vertical', 'child'),
}
for key in locations:
self.draft_store.create_item(
user_id,
locations[key].course_key,
locations[key].block_type,
block_id=locations[key].block_id
)
grandparent = self.draft_store.get_item(locations['grandparent'])
grandparent.children += [locations['parent_sibling'], locations['parent']]
self.draft_store.update_item(grandparent, user_id=user_id)
parent = self.draft_store.get_item(locations['parent'])
parent.children += [locations['child_sibling'], locations['child']]
self.draft_store.update_item(parent, user_id=user_id)
self.draft_store.publish(locations['parent'], user_id)
self.draft_store.publish(locations['parent_sibling'], user_id)
return locations
def test_migrate_published_info(self):
"""
Tests that blocks that were storing published_date and published_by through CMSBlockMixin are loaded correctly
"""
# Insert the test block directly into the module store
location = Location('edX', 'migration', '2012_Fall', 'html', 'test_html')
published_date = datetime(1970, 1, 1, tzinfo=UTC)
published_by = 123
self.draft_store._update_single_item(
as_draft(location),
{
'definition.data': {},
'metadata': {
# published_date was previously stored as a list of time components, not a datetime
'published_date': list(published_date.timetuple()),
'published_by': published_by,
},
},
allow_not_found=True,
)
# Retrieve the block and verify its fields
component = self.draft_store.get_item(location)
self.assertEqual(component.published_on, published_date)
self.assertEqual(component.published_by, published_by)
def test_draft_modulestore_create_child_with_position(self):
"""
This test is designed to hit a specific set of use cases having to do with
the child positioning logic found in mongo/base.py:create_child()
"""
# Set up the draft module store
course = self.draft_store.create_course("TestX", "ChildTest", "1234_A1", 1)
first_child = self.draft_store.create_child(
self.dummy_user,
course.location,
"chapter",
block_id=course.location.block_id
)
second_child = self.draft_store.create_child(
self.dummy_user,
course.location,
"chapter",
block_id=course.location.block_id,
position=0
)
# First child should have been moved to second position, and better child takes the lead
course = self.draft_store.get_course(course.id)
self.assertEqual(unicode(course.children[1]), unicode(first_child.location))
self.assertEqual(unicode(course.children[0]), unicode(second_child.location))
# Clean up the data so we don't break other tests which apparently expect a particular state
self.draft_store.delete_course(course.id, self.dummy_user)
def test_make_course_usage_key(self):
"""Test that we get back the appropriate usage key for the root of a course key."""
course_key = CourseLocator(org="edX", course="101", run="2015")
root_block_key = self.draft_store.make_course_usage_key(course_key)
self.assertEqual(root_block_key.block_type, "course")
self.assertEqual(root_block_key.name, "2015")
class TestMongoModuleStoreWithNoAssetCollection(TestMongoModuleStore):
'''
Tests a situation where no asset_collection is specified.
'''
@classmethod
def add_asset_collection(cls, doc_store_config):
"""
No asset collection.
"""
pass
@classmethod
def setupClass(cls):
super(TestMongoModuleStoreWithNoAssetCollection, cls).setupClass()
@classmethod
def teardownClass(cls):
super(TestMongoModuleStoreWithNoAssetCollection, cls).teardownClass()
def test_no_asset_collection(self):
courses = self.draft_store.get_courses()
course = courses[0]
# Confirm that no specified asset collection name means empty asset metadata.
self.assertEquals(self.draft_store.get_all_asset_metadata(course.id, 'asset'), [])
def test_no_asset_invalid_key(self):
course_key = CourseLocator(org="edx3", course="test_course", run=None, deprecated=True)
# Confirm that invalid course key raises ItemNotFoundError
self.assertRaises(ItemNotFoundError, lambda: self.draft_store.get_all_asset_metadata(course_key, 'asset')[:1])
class TestMongoKeyValueStore(unittest.TestCase):
"""
Tests for MongoKeyValueStore.
"""
def setUp(self):
super(TestMongoKeyValueStore, self).setUp()
self.data = {'foo': 'foo_value'}
self.course_id = SlashSeparatedCourseKey('org', 'course', 'run')
self.parent = self.course_id.make_usage_key('parent', 'p')
self.children = [self.course_id.make_usage_key('child', 'a'), self.course_id.make_usage_key('child', 'b')]
self.metadata = {'meta': 'meta_val'}
self.kvs = MongoKeyValueStore(self.data, self.parent, self.children, self.metadata)
def test_read(self):
assert_equals(self.data['foo'], self.kvs.get(KeyValueStore.Key(Scope.content, None, None, 'foo')))
assert_equals(self.parent, self.kvs.get(KeyValueStore.Key(Scope.parent, None, None, 'parent')))
assert_equals(self.children, self.kvs.get(KeyValueStore.Key(Scope.children, None, None, 'children')))
assert_equals(self.metadata['meta'], self.kvs.get(KeyValueStore.Key(Scope.settings, None, None, 'meta')))
def test_read_invalid_scope(self):
for scope in (Scope.preferences, Scope.user_info, Scope.user_state):
key = KeyValueStore.Key(scope, None, None, 'foo')
with assert_raises(InvalidScopeError):
self.kvs.get(key)
assert_false(self.kvs.has(key))
def test_read_non_dict_data(self):
self.kvs = MongoKeyValueStore('xml_data', self.parent, self.children, self.metadata)
assert_equals('xml_data', self.kvs.get(KeyValueStore.Key(Scope.content, None, None, 'data')))
def _check_write(self, key, value):
self.kvs.set(key, value)
assert_equals(value, self.kvs.get(key))
def test_write(self):
yield (self._check_write, KeyValueStore.Key(Scope.content, None, None, 'foo'), 'new_data')
yield (self._check_write, KeyValueStore.Key(Scope.children, None, None, 'children'), [])
yield (self._check_write, KeyValueStore.Key(Scope.children, None, None, 'parent'), None)
yield (self._check_write, KeyValueStore.Key(Scope.settings, None, None, 'meta'), 'new_settings')
def test_write_non_dict_data(self):
self.kvs = MongoKeyValueStore('xml_data', self.parent, self.children, self.metadata)
self._check_write(KeyValueStore.Key(Scope.content, None, None, 'data'), 'new_data')
def test_write_invalid_scope(self):
for scope in (Scope.preferences, Scope.user_info, Scope.user_state):
with assert_raises(InvalidScopeError):
self.kvs.set(KeyValueStore.Key(scope, None, None, 'foo'), 'new_value')
def _check_delete_default(self, key, default_value):
self.kvs.delete(key)
assert_equals(default_value, self.kvs.get(key))
assert self.kvs.has(key)
def _check_delete_key_error(self, key):
self.kvs.delete(key)
with assert_raises(KeyError):
self.kvs.get(key)
assert_false(self.kvs.has(key))
def test_delete(self):
yield (self._check_delete_key_error, KeyValueStore.Key(Scope.content, None, None, 'foo'))
yield (self._check_delete_default, KeyValueStore.Key(Scope.children, None, None, 'children'), [])
yield (self._check_delete_key_error, KeyValueStore.Key(Scope.settings, None, None, 'meta'))
def test_delete_invalid_scope(self):
for scope in (Scope.preferences, Scope.user_info, Scope.user_state, Scope.parent):
with assert_raises(InvalidScopeError):
self.kvs.delete(KeyValueStore.Key(scope, None, None, 'foo'))
def _build_requested_filter(requested_filter):
"""
Returns requested filter_params string.
"""
# Files and Uploads type filter values
all_filters = {
"Images": ['image/png', 'image/jpeg', 'image/jpg', 'image/gif', 'image/tiff', 'image/tif', 'image/x-icon'],
"Documents": [
'application/pdf',
'text/plain',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
'application/vnd.openxmlformats-officedocument.presentationml.template',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.openxmlformats-officedocument.spreadsheetml.template',
'application/msword',
'application/vnd.ms-excel',
'application/vnd.ms-powerpoint',
],
}
requested_file_types = all_filters.get(requested_filter, None)
where = ["JSON.stringify(this.contentType).toUpperCase() == JSON.stringify('{}').toUpperCase()".format(
req_filter) for req_filter in requested_file_types]
filter_params = {
"$where": ' || '.join(where),
}
return filter_params
| agpl-3.0 |
kernsuite-debian/obit | python/testObit.py | 1 | 1061 | import OErr, OSystem, Image, InfoList, FArray
err=OErr.OErr()
ObitSys=OSystem.OSystem ("Python", 1, 103, 1, ["None"], 1, ["../PythonData/"], 1, 0, err)
OErr.printErrMsg(err, "Error with Obit startup")
# for debugging
#OErr.Bomb()
list = InfoList.InfoList();
dim = [1,0,0,0,0]
InfoList.PPutLong (list, "longItem", dim, [321], err)
x=InfoList.PGet (list, "longItem")
x=FArray.FArray("MyFArray",[30,50])
FArray.PSetVal(x,[25,25],3.1415926)
pos=[-1,-1]
result = FArray.PMax(x,pos)
print "FArray max is",result, "should be 3.1415926"
result = FArray.PMin(x,pos)
print "FArray min is",result, "should be 0"
file = "testPcube.fits"
disk = 1
image = Image.newPFImage("myImage", file, disk, True, err);
OErr.printErr(err)
Image.POpen(image,1,err)
OErr.printErr(err)
Image.PRead(image,err)
OErr.printErr(err)
Image.PClose(image,err)
data = Image.PGetFArray(image);
pos=[-1,-1]
result = FArray.PMax(data,pos)
print "Max in",file,"is",result
result = FArray.PRMS(data)
print "RMS in",file,"is",result
OErr.printErr(err)
del err
del ObitSys
#OSystem.Shutdown(ObitSys)
| gpl-2.0 |
cfg2015/EPT-2015-2 | doc/_themes/odoodoc/html_domain.py | 129 | 4109 | # -*- coding: utf-8 -*-
from docutils import nodes, utils
from docutils.parsers.rst import Directive, directives, docutils
from docutils.parsers.rst.directives.body import LineBlock
import sphinx.roles
from sphinx.domains import Domain
def setup(app):
app.add_domain(HtmlDomain)
app.add_node(div, html=(
lambda self, node: self.body.append(self.starttag(node, 'div')),
lambda self, node: self.body.append('</div>\n')))
app.add_node(address, html=(
lambda self, node: self.body.append(self.starttag(node, 'address')),
lambda self, node: self.body.append('</address>\n')
))
app.add_node(cite, html=(visit_cite, depart_cite))
for name, node in [('mark', mark), ('ins', insert), ('del', delete),
('s', strikethrough), ('u', underline), ('small', small),
('kbd', kbd), ('var', var), ('samp', samp)]:
addnode(app, node, name)
class div(nodes.General, nodes.Element): pass
class Div(Directive):
optional_arguments = 1
final_argument_whitespace = 1
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = div(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class address(nodes.General, nodes.Element): pass
class Address(LineBlock):
def run(self):
[node] = super(Address, self).run()
ad = address(node.rawsource, *node.children)
return [ad]
class mark(nodes.Inline, nodes.TextElement): pass
class insert(nodes.Inline, nodes.TextElement): pass
class delete(nodes.Inline, nodes.TextElement): pass
class strikethrough(nodes.Inline, nodes.TextElement): pass
class underline(nodes.Inline, nodes.TextElement): pass
class small(nodes.Inline, nodes.TextElement): pass
class kbd(nodes.Inline, nodes.FixedTextElement): pass
class var(nodes.Inline, nodes.FixedTextElement): pass
class samp(nodes.Inline, nodes.FixedTextElement): pass
def makerole(node):
return lambda name, rawtext, text, lineno, inliner, options=None, content=None:\
([node(rawtext.strip(), text.strip())], [])
def addnode(app, node, nodename):
app.add_node(node, html=(
lambda self, n: self.body.append(self.starttag(n, nodename)),
lambda self, n: self.body.append('</%s>' % nodename)
))
def initialism(*args, **kwargs):
nodes, _ = sphinx.roles.abbr_role(*args, **kwargs)
[abbr] = nodes
abbr.attributes.setdefault('classes', []).append('initialism')
return [abbr], []
def cite_role(typ, rawtext, text, lineno, inliner, options=None, content=None):
text = utils.unescape(text)
m = sphinx.roles._abbr_re.search(text)
if m is None:
return [cite(text, text, **(options or {}))], []
content = text[:m.start()].strip()
source = m.group(1)
return [cite(content, content, source=source)], []
class cite(nodes.Inline, nodes.TextElement): pass
def visit_cite(self, node):
attrs = {}
if node.hasattr('source'):
attrs['title'] = node['source']
self.body.append(self.starttag(node, 'cite', '', **attrs))
def depart_cite(self, node):
self.body.append('</abbr>')
class HtmlDomain(Domain):
name = 'h'
label = 'HTML'
directives = {
'div': Div,
'address': Address,
}
roles = {
'mark': makerole(mark),
'ins': makerole(insert),
'del': makerole(delete),
's': makerole(strikethrough),
'u': makerole(underline),
'small': makerole(small),
'initialism': initialism,
'cite': cite_role,
'kbd': makerole(kbd),
'var': makerole(var),
'samp': makerole(samp),
}
| agpl-3.0 |
gomowjames/angular-barrettj.co | node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| mit |
aimejeux/enigma2 | lib/python/Components/RecordingConfig.py | 23 | 1313 | from config import ConfigSelectionNumber, ConfigYesNo, ConfigSubsection, ConfigSelection, config
def InitRecordingConfig():
config.recording = ConfigSubsection()
# actually this is "recordings always have priority". "Yes" does mean: don't ask. The RecordTimer will ask when value is 0.
config.recording.asktozap = ConfigYesNo(default=True)
config.recording.margin_before = ConfigSelectionNumber(min = 0, max = 120, stepwidth = 1, default = 3, wraparound = True)
config.recording.margin_after = ConfigSelectionNumber(min = 0, max = 120, stepwidth = 1, default = 5, wraparound = True)
config.recording.ascii_filenames = ConfigYesNo(default = False)
config.recording.keep_timers = ConfigSelectionNumber(min = 1, max = 120, stepwidth = 1, default = 7, wraparound = True)
config.recording.filename_composition = ConfigSelection(default = "standard", choices = [
("standard", _("standard")),
("short", _("Short filenames")),
("long", _("Long filenames")) ] )
config.recording.offline_decode_delay = ConfigSelectionNumber(min = 1, max = 10000, stepwidth = 10, default = 1000, wraparound = True)
config.recording.ecm_data = ConfigSelection(choices = [("normal", _("normal")), ("descrambled+ecm", _("descramble and record ecm")), ("scrambled+ecm", _("don't descramble, record ecm"))], default = "normal")
| gpl-2.0 |
deepsrd/android_kernel_nx507j | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
zlsun/XX-Net | code/default/smart_router/local/pipe_socks.py | 1 | 7876 | import threading
import select
import time
import utils
import global_var as g
from xlog import getLogger
xlog = getLogger("smart_router")
class PipeSocks(object):
def __init__(self, buf_size=16*1024):
self.buf_size = buf_size
self.sock_dict = {}
self.read_set = []
self.write_set = []
self.error_set = []
self.running = True
def __str__(self):
outs = ["Pipe Sockets:"]
outs.append("buf_size=%d" % self.buf_size)
outs.append("running=%d" % self.running)
outs.append("")
outs.append("socket dict:")
for s in self.sock_dict:
outs.append(" %s =%s" % (s, self.sock_dict[s]))
outs.append("read dict:")
for s in self.read_set:
outs.append(" %s" % s)
outs.append("write dict:")
for s in self.write_set:
outs.append(" %s" % s)
outs.append("error dict:")
for s in self.error_set:
outs.append(" %s" % s)
return "\n".join(outs)
def run(self):
self.down_th = threading.Thread(target=self.pipe)
self.down_th.start()
def stop(self):
self.running = False
def add_socks(self, s1, s2):
s1.setblocking(0)
s2.setblocking(0)
self.read_set.append(s1)
self.read_set.append(s2)
self.error_set.append(s1)
self.error_set.append(s2)
self.sock_dict[s1] = s2
self.sock_dict[s2] = s1
def try_remove(self, l, s):
try:
l.remove(s)
except:
pass
def close(self, s1, e):
if s1 not in self.sock_dict:
# xlog.warn("sock not in dict")
return
s2 = self.sock_dict[s1]
if utils.is_private_ip(s1.ip):
local_sock = s1
remote_sock = s2
else:
local_sock = s2
remote_sock = s1
create_time = time.time() - remote_sock.create_time
xlog.debug("pipe close %s->%s run_time:%d upload:%d,%d download:%d,%d, by remote:%d, left:%d e:%r",
local_sock, remote_sock, create_time,
local_sock.recved_data, local_sock.recved_times,
remote_sock.recved_data, remote_sock.recved_times,
s1==remote_sock, s1.buf_size, e)
if local_sock.recved_data > 0 and local_sock.recved_times == 1 and remote_sock.port == 443 and \
((s1 == local_sock and create_time > 30) or (s1 == remote_sock)):
host = remote_sock.host
xlog.debug("SNI:%s fail.", host)
#g.domain_cache.update_rule(host, 443, "gae")
del self.sock_dict[s1]
self.try_remove(self.read_set, s1)
self.try_remove(self.write_set, s1)
self.try_remove(self.error_set, s1)
s1.close()
if s2.buf_size:
xlog.debug("pipe close %s e:%s, but s2:%s have data(%d) to send",
s1, e, s2, s2.buf_size)
s2.add_dat("")
return
if s2 in self.sock_dict:
del self.sock_dict[s2]
self.try_remove(self.read_set, s2)
self.try_remove(self.write_set, s2)
self.try_remove(self.error_set, s2)
s2.close()
def pipe(self):
def flush_send_s(s2, d1):
s2.setblocking(1)
s2.settimeout(1)
s2.sendall(d1)
s2.setblocking(0)
while self.running:
if not self.error_set:
time.sleep(1)
continue
try:
r, w, e = select.select(self.read_set, self.write_set, self.error_set, 0.1)
for s1 in list(r):
if s1 not in self.read_set:
continue
try:
d = s1.recv(65535)
except Exception as e:
self.close(s1, "r")
continue
if not d:
# socket closed by peer.
self.close(s1, "r")
continue
s1.recved_data += len(d)
s1.recved_times += 1
s2 = self.sock_dict[s1]
if s2.is_closed():
continue
if g.config.direct_split_SNI and\
s1.recved_times == 1 and \
s2.port == 443 and \
d[0] == '\x16' and \
g.gfwlist.check(s2.host):
p1 = d.find(s2.host)
if p1 > 1:
if "google" in s2.host:
p2 = d.find("google") + 3
else:
p2 = p1 + len(s2.host) - 6
d1 = d[:p2]
d2 = d[p2:]
try:
flush_send_s(s2, d1)
except Exception as e:
xlog.warn("send split SNI:%s fail:%r", s2.host, e)
self.close(s2, "w")
continue
s2.add_dat(d2)
d = ""
xlog.debug("pipe send split SNI:%s", s2.host)
if s2.buf_size == 0:
try:
sended = s2.send(d)
# xlog.debug("direct send %d to %s from:%s", sended, s2, s1)
except Exception as e:
self.close(s2, "w")
continue
if sended == len(d):
continue
else:
d_view = memoryview(d)
d = d_view[sended:]
if d:
if not isinstance(d, memoryview):
d = memoryview(d)
s2.add_dat(d)
if s2 not in self.write_set:
self.write_set.append(s2)
if s2.buf_size > self.buf_size:
self.try_remove(self.read_set, s1)
for s1 in list(w):
if s1 not in self.write_set:
continue
if s1.buf_num == 0:
self.try_remove(self.write_set, s1)
continue
while True:
dat = s1.get_dat()
if not dat:
self.close(s1, "n")
break
try:
sended = s1.send(dat)
except Exception as e:
self.close(s1, "w")
break
if len(dat) - sended > 0:
s1.restore_dat(dat[sended:])
break
if s1.buf_size == 0:
self.try_remove(self.write_set, s1)
if s1.buf_size < self.buf_size:
if s1 not in self.sock_dict:
continue
s2 = self.sock_dict[s1]
if s2 not in self.read_set and s2 in self.sock_dict:
self.read_set.append(s2)
for s1 in list(e):
self.close(s1, "e")
except Exception as e:
xlog.exception("pipe except:%r", e)
for s in list(self.error_set):
self.close(s, "stop")
xlog.info("pipe stopped.") | bsd-2-clause |
vipmike007/avocado-vt | virttest/libvirt_xml/devices/input.py | 24 | 1517 | """
input device support class(es)
http://libvirt.org/formatdomain.html#elementsInput
"""
from virttest.libvirt_xml import accessors
from virttest.libvirt_xml.devices import base, librarian
class Input(base.TypedDeviceBase):
__slots__ = ('input_bus', 'address')
def __init__(self, type_name, virsh_instance=base.base.virsh):
super(Input, self).__init__(device_tag='input',
type_name=type_name,
virsh_instance=virsh_instance)
accessors.XMLAttribute(property_name="input_bus",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='input',
attribute='bus')
accessors.XMLElementNest('address', self, parent_xpath='/',
tag_name='address', subclass=self.Address,
subclass_dargs={'type_name': 'usb',
'virsh_instance': virsh_instance})
# For convenience
Address = librarian.get('address')
def new_input_address(self, type_name='usb', **dargs):
"""
Return a new input Address instance and set properties from dargs
"""
new_one = self.Address(type_name=type_name, virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
| gpl-2.0 |
likelyzhao/mxnet | example/image-classification/benchmark.py | 16 | 9452 | from __future__ import print_function
import logging
import argparse
import os
import time
import sys
import shutil
import csv
import re
import subprocess, threading
import pygal
import importlib
import collections
import threading
import copy
'''
Setup Logger and LogLevel
'''
def setup_logging(log_loc):
if os.path.exists(log_loc):
shutil.move(log_loc, log_loc + "_" + str(int(os.path.getctime(log_loc))))
os.makedirs(log_loc)
log_file = '{}/benchmark.log'.format(log_loc)
LOGGER = logging.getLogger('benchmark')
LOGGER.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s:%(name)s %(message)s')
file_handler = logging.FileHandler(log_file)
console_handler = logging.StreamHandler()
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
LOGGER.addHandler(file_handler)
LOGGER.addHandler(console_handler)
return LOGGER
'''
Runs the command given in the cmd_args for specified timeout period
and terminates after
'''
class RunCmd(threading.Thread):
def __init__(self, cmd_args, logfile):
threading.Thread.__init__(self)
self.cmd_args = cmd_args
self.logfile = logfile
self.process = None
def run(self):
LOGGER = logging.getLogger('benchmark')
LOGGER.info('started running %s', ' '.join(self.cmd_args))
log_fd = open(self.logfile, 'w')
self.process = subprocess.Popen(self.cmd_args, stdout=log_fd, stderr=subprocess.STDOUT, universal_newlines=True)
for line in self.process.communicate():
LOGGER.debug(line)
log_fd.close()
LOGGER.info('finished running %s', ' '.join(self.cmd_args))
def startCmd(self, timeout):
LOGGER.debug('Attempting to start Thread to run %s', ' '.join(self.cmd_args))
self.start()
self.join(timeout)
if self.is_alive():
LOGGER.debug('Terminating process running %s', ' '.join(self.cmd_args))
self.process.terminate()
self.join()
time.sleep(1)
return
log_loc = './benchmark'
LOGGER = setup_logging(log_loc)
class Network(object):
def __init__(self, name, img_size, batch_size):
self.name = name
self.img_size = img_size
self.batch_size = batch_size
self.gpu_speedup = collections.OrderedDict()
def parse_args():
class NetworkArgumentAction(argparse.Action):
def validate(self, attrs):
args = attrs.split(':')
if len(args) != 3 or isinstance(args[0], str) == False:
print('expected network attributes in format network_name:batch_size:image_size \
\nThe network_name is a valid model defined as network_name.py in the image-classification/symbol folder.')
sys.exit(1)
try:
#check if the network exists
importlib.import_module('symbols.'+ args[0])
batch_size = int(args[1])
img_size = int(args[2])
return Network(name=args[0], batch_size=batch_size, img_size=img_size)
except Exception as e:
print('expected network attributes in format network_name:batch_size:image_size \
\nThe network_name is a valid model defined as network_name.py in the image-classification/symbol folder.')
print(e)
sys.exit(1)
def __init__(self, *args, **kw):
kw['nargs'] = '+'
argparse.Action.__init__(self, *args, **kw)
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, list) == True:
setattr(namespace, self.dest, map(self.validate, values))
else:
setattr(namespace, self.dest, self.validate(values))
parser = argparse.ArgumentParser(description='Run Benchmark on various imagenet networks using train_imagenent.py')
parser.add_argument('--networks', dest='networks', nargs= '+', type=str, help= 'one or more networks in the format network_name:batch_size:image_size \
\nThe network_name is a valid model defined as network_name.py in the image-classification/symbol folder.',action=NetworkArgumentAction)
parser.add_argument('--worker_file', type=str, help='file that contains a list of worker hostnames or list of worker ip addresses that can be sshed without a password.',required=True)
parser.add_argument('--worker_count', type=int, help='number of workers to run benchmark on.', required=True)
parser.add_argument('--gpu_count', type=int, help='number of gpus on each worker to use.', required=True)
args = parser.parse_args()
return args
def series(max_count):
i=1
s=[]
while i <= max_count:
s.append(i)
i=i*2
if s[-1] < max_count:
s.append(max_count)
return s
'''
Choose the middle iteration to get the images processed per sec
'''
def images_processed(log_loc):
f=open(log_loc)
img_per_sec = re.findall("(?:Batch\s+\[30\]\\\\tSpeed:\s+)(\d+\.\d+)(?:\s+)", str(f.readlines()))
f.close()
img_per_sec = map(float, img_per_sec)
total_img_per_sec = sum(img_per_sec)
return total_img_per_sec
def generate_hosts_file(num_nodes, workers_file, args_workers_file):
f = open(workers_file, 'w')
output = subprocess.check_output(['head', '-n', str(num_nodes), args_workers_file])
f.write(output)
f.close()
return
def stop_old_processes(hosts_file):
stop_args = ['python', '../../tools/kill-mxnet.py', hosts_file]
stop_args_str = ' '.join(stop_args)
LOGGER.info('killing old remote processes\n %s', stop_args_str)
stop = subprocess.check_output(stop_args, stderr=subprocess.STDOUT)
LOGGER.debug(stop)
time.sleep(1)
def run_imagenet(kv_store, data_shape, batch_size, num_gpus, num_nodes, network, args_workers_file):
imagenet_args=['python', 'train_imagenet.py', '--gpus', ','.join(str(i) for i in range(num_gpus)), \
'--network', network, '--batch-size', str(batch_size * num_gpus), \
'--image-shape', '3,' + str(data_shape) + ',' + str(data_shape), '--num-epochs', '1' ,'--kv-store', kv_store, '--benchmark', '1', '--disp-batches', '10']
log = log_loc + '/' + network + '_' + str(num_nodes*num_gpus) + '_log'
hosts = log_loc + '/' + network + '_' + str(num_nodes*num_gpus) + '_workers'
generate_hosts_file(num_nodes, hosts, args_workers_file)
stop_old_processes(hosts)
launch_args = ['../../tools/launch.py', '-n', str(num_nodes), '-s', str(num_nodes*2), '-H', hosts, ' '.join(imagenet_args) ]
#use train_imagenet when running on a single node
if kv_store == 'device':
imagenet = RunCmd(imagenet_args, log)
imagenet.startCmd(timeout = 60 * 10)
else:
launch = RunCmd(launch_args, log)
launch.startCmd(timeout = 60 * 10)
stop_old_processes(hosts)
img_per_sec = images_processed(log)
LOGGER.info('network: %s, num_gpus: %d, image/sec: %f', network, num_gpus*num_nodes, img_per_sec)
return img_per_sec
def plot_graph(args):
speedup_chart = pygal.Line(x_title ='gpus',y_title ='speedup', logarithmic=True)
speedup_chart.x_labels = map(str, series(args.worker_count * args.gpu_count))
speedup_chart.add('ideal speedup', series(args.worker_count * args.gpu_count))
for net in args.networks:
image_single_gpu = net.gpu_speedup[1] if 1 in net.gpu_speedup or not net.gpu_speedup[1] else 1
y_values = [ each/image_single_gpu for each in net.gpu_speedup.values() ]
LOGGER.info('%s: image_single_gpu:%.2f' %(net.name, image_single_gpu))
LOGGER.debug('network:%s, y_values: %s' % (net.name, ' '.join(map(str, y_values))))
speedup_chart.add(net.name , y_values \
, formatter= lambda y_val, img = copy.deepcopy(image_single_gpu), batch_size = copy.deepcopy(net.batch_size): 'speedup:%.2f, img/sec:%.2f, batch/gpu:%d' % \
(0 if y_val is None else y_val, 0 if y_val is None else y_val * img, batch_size))
speedup_chart.render_to_file(log_loc + '/speedup.svg')
def write_csv(log_loc, args):
for net in args.networks:
with open(log_loc + '/' + net.name + '.csv', 'wb') as f:
w = csv.writer(f)
w.writerow(['num_gpus', 'img_processed_per_sec'])
w.writerows(net.gpu_speedup.items())
def main():
args = parse_args()
for net in args.networks:
#use kv_store='device' when running on 1 node
for num_gpus in series(args.gpu_count):
imgs_per_sec = run_imagenet(kv_store='device', data_shape=net.img_size, batch_size=net.batch_size, \
num_gpus=num_gpus, num_nodes=1, network=net.name, args_workers_file=args.worker_file)
net.gpu_speedup[num_gpus] = imgs_per_sec
for num_nodes in series(args.worker_count)[1::]:
imgs_per_sec = run_imagenet(kv_store='dist_sync_device', data_shape=net.img_size, batch_size=net.batch_size, \
num_gpus=args.gpu_count, num_nodes=num_nodes, network=net.name, args_workers_file=args.worker_file)
net.gpu_speedup[num_nodes * args.gpu_count] = imgs_per_sec
LOGGER.info('Network: %s (num_gpus, images_processed): %s', net.name, ','.join(map(str, net.gpu_speedup.items())))
write_csv(log_loc, args)
plot_graph(args)
if __name__ == '__main__':
main()
| apache-2.0 |
kobolabs/calibre | src/calibre/gui2/store/search/results_view.py | 9 | 1392 | # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from functools import partial
from PyQt4.Qt import (pyqtSignal, QMenu, QTreeView)
from calibre.gui2.metadata.single_download import RichTextDelegate
from calibre.gui2.store.search.models import Matches
class ResultsView(QTreeView):
download_requested = pyqtSignal(object)
open_requested = pyqtSignal(object)
def __init__(self, *args):
QTreeView.__init__(self,*args)
self._model = Matches()
self.setModel(self._model)
self.rt_delegate = RichTextDelegate(self)
for i in self._model.HTML_COLS:
self.setItemDelegateForColumn(i, self.rt_delegate)
def contextMenuEvent(self, event):
index = self.indexAt(event.pos())
if not index.isValid():
return
result = self.model().get_result(index)
menu = QMenu()
da = menu.addAction(_('Download...'), partial(self.download_requested.emit, result))
if not result.downloads:
da.setEnabled(False)
menu.addSeparator()
menu.addAction(_('Goto in store...'), partial(self.open_requested.emit, result))
menu.exec_(event.globalPos())
| gpl-3.0 |
atosorigin/ansible | lib/ansible/plugins/inventory/__init__.py | 2 | 20585 | # (c) 2017, Red Hat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import hashlib
import os
import string
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.inventory.group import to_safe_group_name as original_safe
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins import AnsiblePlugin
from ansible.plugins.cache import CachePluginAdjudicator as CacheObject
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars, load_extra_vars
display = Display()
# Helper methods
def to_safe_group_name(name):
# placeholder for backwards compat
return original_safe(name, force=True, silent=True)
def detect_range(line=None):
'''
A helper function that checks a given host line to see if it contains
a range pattern described in the docstring above.
Returns True if the given line contains a pattern, else False.
'''
return '[' in line
def expand_hostname_range(line=None):
'''
A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#hosts-and-groups
'''
all_hosts = []
if line:
# A hostname such as db[1:6]-node is considered to consists
# three parts:
# head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node'
# Add support for multiple ranges in a host so:
# db[01:10:3]node-[01:10]
# - to do this we split off at the first [...] set, getting the list
# of hosts and then repeat until none left.
# - also add an optional third parameter which contains the step. (Default: 1)
# so range can be [01:10:2] -> 01 03 05 07 09
(head, nrange, tail) = line.replace('[', '|', 1).replace(']', '|', 1).split('|')
bounds = nrange.split(":")
if len(bounds) != 2 and len(bounds) != 3:
raise AnsibleError("host range must be begin:end or begin:end:step")
beg = bounds[0]
end = bounds[1]
if len(bounds) == 2:
step = 1
else:
step = bounds[2]
if not beg:
beg = "0"
if not end:
raise AnsibleError("host range must specify end value")
if beg[0] == '0' and len(beg) > 1:
rlen = len(beg) # range length formatting hint
if rlen != len(end):
raise AnsibleError("host range must specify equal-length begin and end formats")
def fill(x):
return str(x).zfill(rlen) # range sequence
else:
fill = str
try:
i_beg = string.ascii_letters.index(beg)
i_end = string.ascii_letters.index(end)
if i_beg > i_end:
raise AnsibleError("host range must have begin <= end")
seq = list(string.ascii_letters[i_beg:i_end + 1:int(step)])
except ValueError: # not an alpha range
seq = range(int(beg), int(end) + 1, int(step))
for rseq in seq:
hname = ''.join((head, fill(rseq), tail))
if detect_range(hname):
all_hosts.extend(expand_hostname_range(hname))
else:
all_hosts.append(hname)
return all_hosts
def get_cache_plugin(plugin_name, **kwargs):
try:
cache = CacheObject(plugin_name, **kwargs)
except AnsibleError as e:
if 'fact_caching_connection' in to_native(e):
raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set "
"to a writeable directory path:\nansible.cfg:\n[default]: fact_caching_connection,\n"
"[inventory]: cache_connection;\nEnvironment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\n"
"ANSIBLE_CACHE_PLUGIN_CONNECTION." % plugin_name)
else:
raise e
if plugin_name != 'memory' and kwargs and not getattr(cache._plugin, '_options', None):
raise AnsibleError('Unable to use cache plugin {0} for inventory. Cache options were provided but may not reconcile '
'correctly unless set via set_options. Refer to the porting guide if the plugin derives user settings '
'from ansible.constants.'.format(plugin_name))
return cache
class BaseInventoryPlugin(AnsiblePlugin):
""" Parses an Inventory Source"""
TYPE = 'generator'
_sanitize_group_name = staticmethod(to_safe_group_name)
def __init__(self):
super(BaseInventoryPlugin, self).__init__()
self._options = {}
self.inventory = None
self.display = display
self._vars = {}
def parse(self, inventory, loader, path, cache=True):
''' Populates inventory from the given data. Raises an error on any parse failure
:arg inventory: a copy of the previously accumulated inventory data,
to be updated with any new data this plugin provides.
The inventory can be empty if no other source/plugin ran successfully.
:arg loader: a reference to the DataLoader, which can read in YAML and JSON files,
it also has Vault support to automatically decrypt files.
:arg path: the string that represents the 'inventory source',
normally a path to a configuration file for this inventory,
but it can also be a raw string for this plugin to consume
:arg cache: a boolean that indicates if the plugin should use the cache or not
you can ignore if this plugin does not implement caching.
'''
self.loader = loader
self.inventory = inventory
self.templar = Templar(loader=loader)
self._vars = load_extra_vars(loader)
def verify_file(self, path):
''' Verify if file is usable by this plugin, base does minimal accessibility check
:arg path: a string that was passed as an inventory source,
it normally is a path to a config file, but this is not a requirement,
it can also be parsed itself as the inventory data to process.
So only call this base class if you expect it to be a file.
'''
valid = False
b_path = to_bytes(path, errors='surrogate_or_strict')
if (os.path.exists(b_path) and os.access(b_path, os.R_OK)):
valid = True
else:
self.display.vvv('Skipping due to inventory source not existing or not being readable by the current user')
return valid
def _populate_host_vars(self, hosts, variables, group=None, port=None):
if not isinstance(variables, Mapping):
raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(variables))
for host in hosts:
self.inventory.add_host(host, group=group, port=port)
for k in variables:
self.inventory.set_variable(host, k, variables[k])
def _read_config_data(self, path):
''' validate config and set options as appropriate
:arg path: path to common yaml format config file for this plugin
'''
config = {}
try:
# avoid loader cache so meta: refresh_inventory can pick up config changes
# if we read more than once, fs cache should be good enough
config = self.loader.load_from_file(path, cache=False)
except Exception as e:
raise AnsibleParserError(to_native(e))
# a plugin can be loaded via many different names with redirection- if so, we want to accept any of those names
valid_names = getattr(self, '_redirected_names') or [self.NAME]
if not config:
# no data
raise AnsibleParserError("%s is empty" % (to_native(path)))
elif config.get('plugin') not in valid_names:
# this is not my config file
raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found'))
elif not isinstance(config, Mapping):
# configs are dictionaries
raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config))
self.set_options(direct=config, var_options=self._vars)
if 'cache' in self._options and self.get_option('cache'):
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(self.get_option('cache_plugin'), **cache_options)
return config
def _consume_options(self, data):
''' update existing options from alternate configuration sources not normally used by Ansible.
Many API libraries already have existing configuration sources, this allows plugin author to leverage them.
:arg data: key/value pairs that correspond to configuration options for this plugin
'''
for k in self._options:
if k in data:
self._options[k] = data.pop(k)
def _expand_hostpattern(self, hostpattern):
'''
Takes a single host pattern and returns a list of hostnames and an
optional port number that applies to all of them.
'''
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except Exception:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more hostnames, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
hostnames = expand_hostname_range(pattern)
else:
hostnames = [pattern]
return (hostnames, port)
class BaseFileInventoryPlugin(BaseInventoryPlugin):
""" Parses a File based Inventory Source"""
TYPE = 'storage'
def __init__(self):
super(BaseFileInventoryPlugin, self).__init__()
class DeprecatedCache(object):
def __init__(self, real_cacheable):
self.real_cacheable = real_cacheable
def get(self, key):
display.deprecated('InventoryModule should utilize self._cache as a dict instead of self.cache. '
'When expecting a KeyError, use self._cache[key] instead of using self.cache.get(key). '
'self._cache is a dictionary and will return a default value instead of raising a KeyError '
'when the key does not exist', version='2.12', collection_name='ansible.builtin')
return self.real_cacheable._cache[key]
def set(self, key, value):
display.deprecated('InventoryModule should utilize self._cache as a dict instead of self.cache. '
'To set the self._cache dictionary, use self._cache[key] = value instead of self.cache.set(key, value). '
'To force update the underlying cache plugin with the contents of self._cache before parse() is complete, '
'call self.set_cache_plugin and it will use the self._cache dictionary to update the cache plugin',
version='2.12', collection_name='ansible.builtin')
self.real_cacheable._cache[key] = value
self.real_cacheable.set_cache_plugin()
def __getattr__(self, name):
display.deprecated('InventoryModule should utilize self._cache instead of self.cache',
version='2.12', collection_name='ansible.builtin')
return self.real_cacheable._cache.__getattribute__(name)
class Cacheable(object):
_cache = CacheObject()
@property
def cache(self):
return DeprecatedCache(self)
def load_cache_plugin(self):
plugin_name = self.get_option('cache_plugin')
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(plugin_name, **cache_options)
def get_cache_key(self, path):
return "{0}_{1}".format(self.NAME, self._get_cache_prefix(path))
def _get_cache_prefix(self, path):
''' create predictable unique prefix for plugin/inventory '''
m = hashlib.sha1()
m.update(to_bytes(self.NAME, errors='surrogate_or_strict'))
d1 = m.hexdigest()
n = hashlib.sha1()
n.update(to_bytes(path, errors='surrogate_or_strict'))
d2 = n.hexdigest()
return 's_'.join([d1[:5], d2[:5]])
def clear_cache(self):
self._cache.flush()
def update_cache_if_changed(self):
self._cache.update_cache_if_changed()
def set_cache_plugin(self):
self._cache.set_cache()
class Constructable(object):
def _compose(self, template, variables):
''' helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars'''
t = self.templar
try:
use_extra = self.get_option('use_extra_vars')
except Exception:
use_extra = False
if use_extra:
t.available_variables = combine_vars(variables, self._vars)
else:
t.available_variables = variables
return t.template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string), disable_lookups=True)
def _set_composite_vars(self, compose, variables, host, strict=False):
''' loops over compose entries to create vars for hosts '''
if compose and isinstance(compose, dict):
for varname in compose:
try:
composite = self._compose(compose[varname], variables)
except Exception as e:
if strict:
raise AnsibleError("Could not set %s for host %s: %s" % (varname, host, to_native(e)))
continue
self.inventory.set_variable(host, varname, composite)
def _add_host_to_composed_groups(self, groups, variables, host, strict=False):
''' helper to create complex groups for plugins based on jinja2 conditionals, hosts that meet the conditional are added to group'''
# process each 'group entry'
if groups and isinstance(groups, dict):
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
self.templar.available_variables = variables
for group_name in groups:
conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
group_name = self._sanitize_group_name(group_name)
try:
result = boolean(self.templar.template(conditional))
except Exception as e:
if strict:
raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
continue
if result:
# ensure group exists, use sanitized name
group_name = self.inventory.add_group(group_name)
# add host to group
self.inventory.add_child(group_name, host)
def _add_host_to_keyed_groups(self, keys, variables, host, strict=False):
''' helper to create groups for plugins based on variable values and add the corresponding hosts to it'''
if keys and isinstance(keys, list):
for keyed in keys:
if keyed and isinstance(keyed, dict):
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
try:
key = self._compose(keyed.get('key'), variables)
except Exception as e:
if strict:
raise AnsibleParserError("Could not generate group for host %s from %s entry: %s" % (host, keyed.get('key'), to_native(e)))
continue
if key:
prefix = keyed.get('prefix', '')
sep = keyed.get('separator', '_')
raw_parent_name = keyed.get('parent_group', None)
if raw_parent_name:
try:
raw_parent_name = self.templar.template(raw_parent_name)
except AnsibleError as e:
if strict:
raise AnsibleParserError("Could not generate parent group %s for group %s: %s" % (raw_parent_name, key, to_native(e)))
continue
new_raw_group_names = []
if isinstance(key, string_types):
new_raw_group_names.append(key)
elif isinstance(key, list):
for name in key:
new_raw_group_names.append(name)
elif isinstance(key, Mapping):
for (gname, gval) in key.items():
name = '%s%s%s' % (gname, sep, gval)
new_raw_group_names.append(name)
else:
raise AnsibleParserError("Invalid group name format, expected a string or a list of them or dictionary, got: %s" % type(key))
for bare_name in new_raw_group_names:
if prefix == '' and self.get_option('leading_separator') is False:
sep = ''
gname = self._sanitize_group_name('%s%s%s' % (prefix, sep, bare_name))
result_gname = self.inventory.add_group(gname)
self.inventory.add_host(host, result_gname)
if raw_parent_name:
parent_name = self._sanitize_group_name(raw_parent_name)
self.inventory.add_group(parent_name)
self.inventory.add_child(parent_name, result_gname)
else:
# exclude case of empty list and dictionary, because these are valid constructions
# simply no groups need to be constructed, but are still falsy
if strict and key not in ([], {}):
raise AnsibleParserError("No key or key resulted empty for %s in host %s, invalid entry" % (keyed.get('key'), host))
else:
raise AnsibleParserError("Invalid keyed group entry, it must be a dictionary: %s " % keyed)
| gpl-3.0 |
SEL-Columbia/commcare-hq | corehq/apps/hqwebapp/templatetags/hq_shared_tags.py | 1 | 6328 | from datetime import datetime, timedelta
import json
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.logging import notify_exception
from dimagi.utils.web import json_handler
import corehq.apps.style.utils as style_utils
register = template.Library()
@register.filter
def JSON(obj):
return mark_safe(json.dumps(obj, default=json_handler))
@register.filter
def to_javascript_string(obj):
# seriously: http://stackoverflow.com/a/1068548/8207
return mark_safe(JSON(obj).replace('</script>', '<" + "/script>'))
@register.filter
def BOOL(obj):
try:
obj = obj.to_json()
except AttributeError:
pass
return 'true' if obj else 'false'
@register.filter
def dict_lookup(dict, key):
'''Get an item from a dictionary.'''
return dict.get(key)
@register.filter
def array_lookup(array, index):
'''Get an item from an array.'''
if index < len(array):
return array[index]
@register.simple_tag
def dict_as_query_string(dict, prefix=""):
'''Convert a dictionary to a query string, minus the initial ?'''
return "&".join(["%s%s=%s" % (prefix, key, value) for key, value in dict.items()])
@register.filter
def add_days(date, days=1):
'''Return a date with some days added'''
span = timedelta(days=days)
try:
return date + span
except:
return datetime.strptime(date,'%m/%d/%Y').date() + span
@register.filter
def concat(str1, str2):
"""Concatenate two strings"""
return "%s%s" % (str1, str2)
try:
from resource_versions import resource_versions
except (ImportError, SyntaxError):
resource_versions = {}
@register.simple_tag
def static(url):
resource_url = url
version = resource_versions.get(resource_url)
url = settings.STATIC_URL + url
is_less = url.endswith('.less')
if version and not is_less:
url += "?version=%s" % version
return url
@register.simple_tag
def cachebuster(url):
return resource_versions.get(url, "")
@register.simple_tag
def new_static(url, **kwargs):
"""Caching must explicitly be defined on tags with any of the extensions
that could be compressed by django compressor. The static tag above will
eventually turn into this tag.
:param url:
:param kwargs:
:return:
"""
can_be_compressed = url.endswith(('.less', '.css', '.js'))
use_cache = kwargs.pop('cache', False)
use_versions = not can_be_compressed or use_cache
resource_url = url
url = settings.STATIC_URL + url
if use_versions:
version = resource_versions.get(resource_url)
if version:
url += "?version=%s" % version
return url
@register.simple_tag
def domains_for_user(request, selected_domain=None):
"""
Generate pulldown menu for domains.
Cache the entire string alongside the couch_user's doc_id that can get invalidated when
the user doc updates via save.
"""
domain_list = []
if selected_domain != 'public':
cached_domains = cache_core.get_cached_prop(request.couch_user.get_id, 'domain_list')
if cached_domains:
domain_list = [Domain.wrap(x) for x in cached_domains]
else:
try:
domain_list = Domain.active_for_user(request.couch_user)
cache_core.cache_doc_prop(request.couch_user.get_id, 'domain_list', [x.to_json() for x in domain_list])
except Exception:
if settings.DEBUG:
raise
else:
domain_list = Domain.active_for_user(request.user)
notify_exception(request)
domain_list = [dict(
url=reverse('domain_homepage', args=[d.name]),
name=d.long_display_name()
) for d in domain_list]
context = {
'is_public': selected_domain == 'public',
'domain_list': domain_list,
'current_domain': selected_domain,
}
template = {
style_utils.BOOTSTRAP_2: 'hqwebapp/partials/domain_list_dropdown.html',
style_utils.BOOTSTRAP_3: 'style/includes/domain_list_dropdown.html',
}[style_utils.bootstrap_version(request)]
return mark_safe(render_to_string(template, context))
@register.simple_tag
def list_my_orgs(request):
org_list = request.couch_user.get_organizations()
lst = list()
lst.append('<ul class="nav nav-pills nav-stacked">')
for org in org_list:
default_url = reverse("orgs_landing", args=[org.name])
lst.append('<li><a href="%s">%s</a></li>' % (default_url, org.title))
lst.append('</ul>')
return "".join(lst)
@register.simple_tag
def commcare_user():
return _(settings.COMMCARE_USER_TERM)
@register.simple_tag
def hq_web_user():
return _(settings.WEB_USER_TERM)
@register.filter
def mod(value, arg):
return value % arg
# This is taken from https://code.djangoproject.com/ticket/15583
@register.filter(name='sort')
def listsort(value):
if isinstance(value, dict):
new_dict = SortedDict()
key_list = value.keys()
key_list.sort()
for key in key_list:
new_dict[key] = value[key]
return new_dict
elif isinstance(value, list):
new_list = list(value)
new_list.sort()
return new_list
else:
return value
listsort.is_safe = True
@register.filter(name='getattr')
def get_attribute(obj, arg):
""" Get attribute from obj
Usage: {{ couch_user|getattr:"full_name" }}
"""
return getattr(obj, arg, None)
@register.filter
def pretty_doc_info(doc_info):
return render_to_string('hqwebapp/pretty_doc_info.html', {
'doc_info': doc_info,
})
@register.filter
def toggle_enabled(request, toggle_name):
import corehq.toggles
toggle = getattr(corehq.toggles, toggle_name)
return (
(hasattr(request, 'user') and toggle.enabled(request.user.username)) or
(hasattr(request, 'domain') and toggle.enabled(request.domain))
)
| bsd-3-clause |
oxtopus/nupic | nupic/data/CategoryFilter.py | 17 | 2279 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
'''
A category filter can be applied to any categorical field
The basic operation is assumed to be: OR
In the final version users may input Boolean algebra to define this
behaviour
If your field is 'animals'
and your values are
1 - dogs
2 - cat
3 - mouse
4 - giraffe
5 - hippo
A category filter for dog,giraffe
would return records 1 and 4
Note that we're using a substring search so that dogs ~= dog
We can't know all the categories before hand so we present to the user a
freeform input box.
'''
class CategoryFilter(object):
def __init__(self, filterDict):
"""
TODO describe filterDict schema
"""
self.filterDict = filterDict
def match(self, record):
'''
Returns True if the record matches any of the provided filters
'''
for field, meta in self.filterDict.iteritems():
index = meta['index']
categories = meta['categories']
for category in categories:
# Record might be blank, handle this
if not record:
continue
if record[index].find(category) != -1:
'''
This field contains the string we're searching for
so we'll keep the records
'''
return True
# None of the categories were found in this record
return False
| gpl-3.0 |
abhattad4/Digi-Menu | build/lib.linux-x86_64-2.7/django/contrib/gis/db/backends/base/operations.py | 100 | 3924 | class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
disallowed_aggregates = ()
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box, srid):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box, srid):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value, compiler):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_placeholder() method')
def check_expression_support(self, expression):
if isinstance(expression, self.disallowed_aggregates):
raise NotImplementedError(
"%s spatial aggregation is not supported by this database backend." % expression.name
)
super(BaseSpatialOperations, self).check_expression_support(expression)
def spatial_aggregate_name(self, agg_name):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide geometry_columns() method')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
| bsd-3-clause |
Qalthos/ansible | docs/bin/dump_config.py | 52 | 2360 | #!/usr/bin/env python
import optparse
import os
import sys
import yaml
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils._text import to_bytes
from ansible.utils._build_helpers import update_file_if_different
DEFAULT_TEMPLATE_FILE = 'config.rst.j2'
def generate_parser():
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options]',
description='Generate module documentation from metadata',
)
p.add_option("-t", "--template-file", action="store", dest="template_file", default=DEFAULT_TEMPLATE_FILE, help="directory containing Jinja2 templates")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files")
p.add_option("-d", "--docs-source", action="store", dest="docs", default=None, help="Source for attribute docs")
(options, args) = p.parse_args()
return p
def fix_description(config_options):
'''some descriptions are strings, some are lists. workaround it...'''
for config_key in config_options:
description = config_options[config_key].get('description', [])
if isinstance(description, list):
desc_list = description
else:
desc_list = [description]
config_options[config_key]['description'] = desc_list
return config_options
def main(args):
parser = generate_parser()
(options, args) = parser.parse_args()
output_dir = os.path.abspath(options.output_dir)
template_file_full_path = os.path.abspath(options.template_file)
template_file = os.path.basename(template_file_full_path)
template_dir = os.path.dirname(os.path.abspath(template_file_full_path))
if options.docs:
with open(options.docs) as f:
docs = yaml.safe_load(f)
else:
docs = {}
config_options = docs
config_options = fix_description(config_options)
env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True,)
template = env.get_template(template_file)
output_name = os.path.join(output_dir, template_file.replace('.j2', ''))
temp_vars = {'config_options': config_options}
data = to_bytes(template.render(temp_vars))
update_file_if_different(output_name, data)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[:]))
| gpl-3.0 |
ASlave2Audio/Restaurant-App | mingw/bin/lib/warnings.py | 265 | 14044 | """Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
import types
__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
"resetwarnings", "catch_warnings"]
def warnpy3k(message, category=None, stacklevel=1):
"""Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option.
"""
if sys.py3kwarning:
if category is None:
category = DeprecationWarning
warn(message, category, stacklevel+1)
def _show_warning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
# Keep a working version around in case the deprecation of the old API is
# triggered.
showwarning = _show_warning
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=0):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, basestring), "message must be a string"
assert isinstance(category, (type, types.ClassType)), \
"category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, basestring), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=0):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError, msg:
print >>sys.stderr, "Invalid -W option ignored:", msg
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, default_action, once_registry,
warn, warn_explicit)
defaultaction = default_action
onceregistry = once_registry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
# Don't silence DeprecationWarning if -3 or -Q was used.
if not sys.py3kwarning and not sys.flags.division_warning:
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
del _warnings_defaults
| mit |
onedata/helpers | test/integration/key_value_canonical_test_base.py | 1 | 10786 | """This module contains test for KeyValue helpers with canonical paths
and blockSize set to 0."""
__author__ = "Bartek Kryza"
__copyright__ = """(C) 2019 ACK CYFRONET AGH,
This software is released under the MIT license cited in 'LICENSE.txt'."""
from test_common import *
from common_test_base import *
from posix_test_types import *
import stat
import pytest
THREAD_NUMBER = 8
BLOCK_SIZE = 1024
def to_python_list(listobjects_result):
r = [e for e in listobjects_result]
r.sort(key=lambda x: x[0])
return r
def test_mknod_should_create_empty_file(helper, file_id, server):
data = ''
helper.mknod(file_id, 0664, maskToFlags(stat.S_IFREG))
helper.access(file_id)
assert helper.getattr(file_id).st_size == 0
def test_mknod_should_throw_eexist_error(helper, file_id, server):
flags = maskToFlags(stat.S_IFREG)
helper.mknod(file_id, 0664, flags)
with pytest.raises(RuntimeError) as excinfo:
helper.mknod(file_id, 0664, flags)
assert 'File exists' in str(excinfo.value)
def test_write_should_write_multiple_blocks(helper, file_id, server):
block_num = 20
seed = random_str(BLOCK_SIZE)
data = seed * block_num
assert helper.write(file_id, data, 0) == len(data)
assert helper.read(file_id, 0, len(data)) == data
def test_unlink_should_delete_data(helper, file_id, server):
data = random_str()
offset = random_int()
file_id2 = random_str()
assert helper.write(file_id, data, offset) == len(data)
helper.unlink(file_id, offset+len(data))
assert helper.write(file_id2, data, offset) == len(data)
helper.unlink('/'+file_id2, offset+len(data))
def test_unlink_should_delete_empty_file(helper, file_id, server):
data = random_str()
offset = random_int()
file_id2 = random_str()
helper.mknod(file_id, 0664, maskToFlags(stat.S_IFREG))
helper.unlink(file_id, 0)
with pytest.raises(RuntimeError) as excinfo:
helper.getattr(file_id)
assert 'Object not found' in str(excinfo.value)
def test_truncate_should_truncate_to_size(helper, file_id, server):
blocks_num = 10
size = blocks_num * BLOCK_SIZE
helper.mknod(file_id, 0654, maskToFlags(stat.S_IFREG))
helper.truncate(file_id, size, 0)
assert len(helper.read(file_id, 0, size + 1)) == len('\0' * size)
assert helper.read(file_id, 0, size + 1) == '\0' * size
def test_truncate_should_pad_block(helper, file_id, server):
data = random_str()
assert helper.write(file_id, data, BLOCK_SIZE) == len(data)
helper.truncate(file_id, BLOCK_SIZE, len(data)+BLOCK_SIZE)
assert helper.read(file_id, 0, BLOCK_SIZE + 1) == '\0' * BLOCK_SIZE
assert helper.write(file_id, data, BLOCK_SIZE) == len(data)
def test_truncate_should_truncate_to_zero(helper, file_id, server):
blocks_num = 10
data = random_str(blocks_num * BLOCK_SIZE)
assert helper.write(file_id, data, 0) == len(data)
helper.truncate(file_id, 0, len(data))
assert helper.getattr(file_id).st_size == 0
def test_write_should_overwrite_multiple_blocks_part(helper, file_id):
block_num = 10
updates_num = 100
seed = random_str(BLOCK_SIZE)
data = seed * block_num
assert helper.write(file_id, data, 0) == len(data)
for _ in range(updates_num):
offset = random_int(lower_bound=0, upper_bound=len(data))
block = random_str(BLOCK_SIZE)
data = data[:offset] + block + data[offset + len(block):]
helper.write(file_id, block, offset) == len(block)
assert helper.read(file_id, 0, len(data)) == data
def test_read_should_read_multi_block_data_with_holes(helper, file_id):
data = random_str(10)
empty_block = '\0' * BLOCK_SIZE
block_num = 10
assert helper.write(file_id, data, 0) == len(data)
assert helper.write(file_id, data, block_num * BLOCK_SIZE) == len(data)
data = data + empty_block[len(data):] + (block_num - 1) * empty_block + data
assert helper.read(file_id, 0, len(data)) == data
def test_read_should_read_large_file_from_offset(helper, file_id):
data = 'A'*1000 + 'B'*1000 + 'C'*1000
assert helper.write(file_id, data, 0) == len(data)
assert helper.read(file_id, 0, 1000) == 'A'*1000
assert helper.read(file_id, 1000, 1000) == 'B'*1000
assert helper.read(file_id, 2000, 1000) == 'C'*1000
def test_write_should_modify_inner_object_on_canonical_storage(helper):
# originalObject: [----------------------------------------]
# buf : [----------]
file_id = random_str()
original_object = 'A'*10 + 'B'*10 + 'C'*10
helper.write(file_id, original_object, 0)
helper.write(file_id, 'X'*10, 10)
new_object = 'A'*10 + 'X'*10 + 'C'*10
assert helper.read(file_id, 0, 1000) == new_object
def test_write_should_modify_non_overlapping_object_on_canonical_storage(helper):
# originalObject: [-------]
# buf : [------------------]
file_id = random_str()
original_object = 'A'*10
helper.write(file_id, original_object, 0)
helper.write(file_id, 'X'*10, 20)
new_object = 'A'*10 + '\0'*10 + 'X'*10
assert helper.read(file_id, 0, 1000) == new_object
def test_write_should_modify_overlapping_object_on_canonical_storage(helper):
# originalObject: [-------------------------]
# buf : [------------------]
file_id = random_str()
original_object = 'A'*10 + 'B'*10 + 'C'*10
helper.write(file_id, original_object, 0)
helper.write(file_id, 'X'*10, 25)
new_object = 'A'*10 + 'B'*10 + 'C'*5 + 'X'*10
assert helper.read(file_id, 0, 1000) == new_object
def test_write_should_fill_new_files_with_non_zero_offset(helper):
# originalObject: []
# buf : [------------------]
file_id = random_str()
original_object = 'A'*10
helper.write(file_id, original_object, 10)
new_object = '\0'*10 + 'A'*10
assert helper.read(file_id, 0, 1000) == new_object
def test_getattr_should_return_default_permissions(helper):
dir_id = random_str()
data = random_str()
default_dir_mode = 0o775
default_file_mode = 0o664
file_id = random_str()
file_id2 = random_str()
try:
helper.write(dir_id+'/'+file_id, data, 0)
helper.write('/'+dir_id+'/'+file_id2, data, 0)
except:
pytest.fail("Couldn't create directory: %s"%(dir_id))
assert oct(helper.getattr('').st_mode & 0o777) == oct(default_dir_mode)
assert oct(helper.getattr('/').st_mode & 0o777) == oct(default_dir_mode)
assert oct(helper.getattr(dir_id+'/'+file_id).st_mode & 0o777) == oct(default_file_mode)
assert oct(helper.getattr('/'+dir_id+'/'+file_id).st_mode & 0o777) == oct(default_file_mode)
assert oct(helper.getattr(dir_id).st_mode & 0o777) == oct(default_dir_mode)
assert oct(helper.getattr(dir_id+'/').st_mode & 0o777) == oct(default_dir_mode)
assert oct(helper.getattr('/'+dir_id).st_mode & 0o777) == oct(default_dir_mode)
assert oct(helper.getattr('/'+dir_id+'/').st_mode & 0o777) == oct(default_dir_mode)
assert oct(helper.getattr(dir_id+'/'+file_id2).st_mode & 0o777) == oct(default_file_mode)
assert oct(helper.getattr('/'+dir_id+'/'+file_id2).st_mode & 0o777) == oct(default_file_mode)
assert oct(helper.getattr(dir_id).st_mode & 0o777) == oct(default_dir_mode)
assert oct(helper.getattr(dir_id+'/').st_mode & 0o777) == oct(default_dir_mode)
assert oct(helper.getattr('/'+dir_id).st_mode & 0o777) == oct(default_dir_mode)
assert oct(helper.getattr('/'+dir_id+'/').st_mode & 0o777) == oct(default_dir_mode)
def test_listobjects_should_handle_subdirectories(helper):
dir1 = 'dir1'
dir2 = 'dir2'
dir3 = 'dir3'
files = ['file{}.txt'.format(i,) for i in range(1, 6)]
result = []
# Listing empty prefix should return empty result
objects = to_python_list(helper.listobjects('dir1', '', 100))
assert objects == []
for file in files:
helper.write('/'+dir1+'/'+dir2+'/'+file, random_str(), 0)
result.append('/'+dir1+'/'+dir2+'/'+file)
helper.write('/'+dir1+'/'+dir3+'/'+file, random_str(), 0)
result.append('/'+dir1+'/'+dir3+'/'+file)
helper.write('/'+dir1+'/'+file, random_str(), 0)
result.append('/'+dir1+'/'+file)
# List objects from root should include leading '/'
# objects is a list of tuples (path, stat)
objects = to_python_list(helper.listobjects('', '', 100))
assert len(objects) > 0
for o in objects:
assert o[0] == '/'
objects = to_python_list(helper.listobjects('/', '', 100))
for o in objects:
assert o[0] == '/'
# List only objects starting with 'dir1/dir2' prefix
objects = to_python_list(helper.listobjects('dir1/dir2', '', 100))
assert objects == ['/dir1/dir2/file{}.txt'.format(i,) for i in range(1, 6)]
# Check that the same results are returned for paths with and without
# forward slash
assert to_python_list(helper.listobjects('/dir1/dir2', '', 100)) \
== to_python_list(helper.listobjects('dir1/dir2', '', 100))
# Make sure that all results are returned for single query
objects = to_python_list(helper.listobjects('dir1', '', 100))
assert set(objects) == set(result)
# Make sure that all results are returned in chunks
objects = []
marker = ""
chunk_size = 3
while True:
chunk = to_python_list(helper.listobjects('dir1', marker, chunk_size))
if len(chunk) < chunk_size:
break
marker = chunk[-1]
objects.extend(chunk)
def test_listobjects_should_handle_multiple_subdirs_with_offset(helper):
test_dir = random_str()
contents = []
dirs = ['/'+test_dir+'/'+'dir{}'.format(i,) for i in range(100)]
files = ['/'+test_dir+'/'+'file{}.txt'.format(i,) for i in range(100)]
step = 7
for d in dirs:
helper.write(d+'/file.txt', random_str(), 0)
contents.append(d+'/file.txt')
for f in files:
helper.write(f, random_str(), 0)
contents.append(f)
res = []
i = 0
while i < len(contents):
marker = ''
if res:
marker = res[-1]
res.extend(to_python_list(helper.listobjects(test_dir, marker, step)))
i += step
assert len(contents) == len(res)
assert set(contents) == set(res)
def test_listobjects_should_not_return_root_dir(helper):
test_dir = random_str()
contents = []
helper.write('/{}/dir1/file.txt'.format(test_dir), random_str(), 0)
helper.write('/{}/dir2/file.txt'.format(test_dir), random_str(), 0)
helper.write('/{}/file.txt'.format(test_dir), random_str(), 0)
res = to_python_list(helper.listobjects('/{}'.format(test_dir), "", 100))
assert len(res) == 3
| mit |
willemneal/Docky | lib/unidecode/x028.py | 253 | 5069 | data = (
' ', # 0x00
'a', # 0x01
'1', # 0x02
'b', # 0x03
'\'', # 0x04
'k', # 0x05
'2', # 0x06
'l', # 0x07
'@', # 0x08
'c', # 0x09
'i', # 0x0a
'f', # 0x0b
'/', # 0x0c
'm', # 0x0d
's', # 0x0e
'p', # 0x0f
'"', # 0x10
'e', # 0x11
'3', # 0x12
'h', # 0x13
'9', # 0x14
'o', # 0x15
'6', # 0x16
'r', # 0x17
'^', # 0x18
'd', # 0x19
'j', # 0x1a
'g', # 0x1b
'>', # 0x1c
'n', # 0x1d
't', # 0x1e
'q', # 0x1f
',', # 0x20
'*', # 0x21
'5', # 0x22
'<', # 0x23
'-', # 0x24
'u', # 0x25
'8', # 0x26
'v', # 0x27
'.', # 0x28
'%', # 0x29
'[', # 0x2a
'$', # 0x2b
'+', # 0x2c
'x', # 0x2d
'!', # 0x2e
'&', # 0x2f
';', # 0x30
':', # 0x31
'4', # 0x32
'\\', # 0x33
'0', # 0x34
'z', # 0x35
'7', # 0x36
'(', # 0x37
'_', # 0x38
'?', # 0x39
'w', # 0x3a
']', # 0x3b
'#', # 0x3c
'y', # 0x3d
')', # 0x3e
'=', # 0x3f
'[d7]', # 0x40
'[d17]', # 0x41
'[d27]', # 0x42
'[d127]', # 0x43
'[d37]', # 0x44
'[d137]', # 0x45
'[d237]', # 0x46
'[d1237]', # 0x47
'[d47]', # 0x48
'[d147]', # 0x49
'[d247]', # 0x4a
'[d1247]', # 0x4b
'[d347]', # 0x4c
'[d1347]', # 0x4d
'[d2347]', # 0x4e
'[d12347]', # 0x4f
'[d57]', # 0x50
'[d157]', # 0x51
'[d257]', # 0x52
'[d1257]', # 0x53
'[d357]', # 0x54
'[d1357]', # 0x55
'[d2357]', # 0x56
'[d12357]', # 0x57
'[d457]', # 0x58
'[d1457]', # 0x59
'[d2457]', # 0x5a
'[d12457]', # 0x5b
'[d3457]', # 0x5c
'[d13457]', # 0x5d
'[d23457]', # 0x5e
'[d123457]', # 0x5f
'[d67]', # 0x60
'[d167]', # 0x61
'[d267]', # 0x62
'[d1267]', # 0x63
'[d367]', # 0x64
'[d1367]', # 0x65
'[d2367]', # 0x66
'[d12367]', # 0x67
'[d467]', # 0x68
'[d1467]', # 0x69
'[d2467]', # 0x6a
'[d12467]', # 0x6b
'[d3467]', # 0x6c
'[d13467]', # 0x6d
'[d23467]', # 0x6e
'[d123467]', # 0x6f
'[d567]', # 0x70
'[d1567]', # 0x71
'[d2567]', # 0x72
'[d12567]', # 0x73
'[d3567]', # 0x74
'[d13567]', # 0x75
'[d23567]', # 0x76
'[d123567]', # 0x77
'[d4567]', # 0x78
'[d14567]', # 0x79
'[d24567]', # 0x7a
'[d124567]', # 0x7b
'[d34567]', # 0x7c
'[d134567]', # 0x7d
'[d234567]', # 0x7e
'[d1234567]', # 0x7f
'[d8]', # 0x80
'[d18]', # 0x81
'[d28]', # 0x82
'[d128]', # 0x83
'[d38]', # 0x84
'[d138]', # 0x85
'[d238]', # 0x86
'[d1238]', # 0x87
'[d48]', # 0x88
'[d148]', # 0x89
'[d248]', # 0x8a
'[d1248]', # 0x8b
'[d348]', # 0x8c
'[d1348]', # 0x8d
'[d2348]', # 0x8e
'[d12348]', # 0x8f
'[d58]', # 0x90
'[d158]', # 0x91
'[d258]', # 0x92
'[d1258]', # 0x93
'[d358]', # 0x94
'[d1358]', # 0x95
'[d2358]', # 0x96
'[d12358]', # 0x97
'[d458]', # 0x98
'[d1458]', # 0x99
'[d2458]', # 0x9a
'[d12458]', # 0x9b
'[d3458]', # 0x9c
'[d13458]', # 0x9d
'[d23458]', # 0x9e
'[d123458]', # 0x9f
'[d68]', # 0xa0
'[d168]', # 0xa1
'[d268]', # 0xa2
'[d1268]', # 0xa3
'[d368]', # 0xa4
'[d1368]', # 0xa5
'[d2368]', # 0xa6
'[d12368]', # 0xa7
'[d468]', # 0xa8
'[d1468]', # 0xa9
'[d2468]', # 0xaa
'[d12468]', # 0xab
'[d3468]', # 0xac
'[d13468]', # 0xad
'[d23468]', # 0xae
'[d123468]', # 0xaf
'[d568]', # 0xb0
'[d1568]', # 0xb1
'[d2568]', # 0xb2
'[d12568]', # 0xb3
'[d3568]', # 0xb4
'[d13568]', # 0xb5
'[d23568]', # 0xb6
'[d123568]', # 0xb7
'[d4568]', # 0xb8
'[d14568]', # 0xb9
'[d24568]', # 0xba
'[d124568]', # 0xbb
'[d34568]', # 0xbc
'[d134568]', # 0xbd
'[d234568]', # 0xbe
'[d1234568]', # 0xbf
'[d78]', # 0xc0
'[d178]', # 0xc1
'[d278]', # 0xc2
'[d1278]', # 0xc3
'[d378]', # 0xc4
'[d1378]', # 0xc5
'[d2378]', # 0xc6
'[d12378]', # 0xc7
'[d478]', # 0xc8
'[d1478]', # 0xc9
'[d2478]', # 0xca
'[d12478]', # 0xcb
'[d3478]', # 0xcc
'[d13478]', # 0xcd
'[d23478]', # 0xce
'[d123478]', # 0xcf
'[d578]', # 0xd0
'[d1578]', # 0xd1
'[d2578]', # 0xd2
'[d12578]', # 0xd3
'[d3578]', # 0xd4
'[d13578]', # 0xd5
'[d23578]', # 0xd6
'[d123578]', # 0xd7
'[d4578]', # 0xd8
'[d14578]', # 0xd9
'[d24578]', # 0xda
'[d124578]', # 0xdb
'[d34578]', # 0xdc
'[d134578]', # 0xdd
'[d234578]', # 0xde
'[d1234578]', # 0xdf
'[d678]', # 0xe0
'[d1678]', # 0xe1
'[d2678]', # 0xe2
'[d12678]', # 0xe3
'[d3678]', # 0xe4
'[d13678]', # 0xe5
'[d23678]', # 0xe6
'[d123678]', # 0xe7
'[d4678]', # 0xe8
'[d14678]', # 0xe9
'[d24678]', # 0xea
'[d124678]', # 0xeb
'[d34678]', # 0xec
'[d134678]', # 0xed
'[d234678]', # 0xee
'[d1234678]', # 0xef
'[d5678]', # 0xf0
'[d15678]', # 0xf1
'[d25678]', # 0xf2
'[d125678]', # 0xf3
'[d35678]', # 0xf4
'[d135678]', # 0xf5
'[d235678]', # 0xf6
'[d1235678]', # 0xf7
'[d45678]', # 0xf8
'[d145678]', # 0xf9
'[d245678]', # 0xfa
'[d1245678]', # 0xfb
'[d345678]', # 0xfc
'[d1345678]', # 0xfd
'[d2345678]', # 0xfe
'[d12345678]', # 0xff
)
| mit |
victorbriz/rethinkdb | external/v8_3.30.33.16/testing/gtest/test/gtest_output_test.py | 496 | 12051 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
not IS_WINDOWS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| agpl-3.0 |
UnicronNL/vyos-kernel-clearfog | scripts/gdb/linux/utils.py | 630 | 4267 | #
# gdb helper commands and functions for Linux kernel debugging
#
# common utilities
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
class CachedType:
def __init__(self, name):
self._type = None
self._name = name
def _new_objfile_handler(self, event):
self._type = None
gdb.events.new_objfile.disconnect(self._new_objfile_handler)
def get_type(self):
if self._type is None:
self._type = gdb.lookup_type(self._name)
if self._type is None:
raise gdb.GdbError(
"cannot resolve type '{0}'".format(self._name))
if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(self._new_objfile_handler)
return self._type
long_type = CachedType("long")
def get_long_type():
global long_type
return long_type.get_type()
def offset_of(typeobj, field):
element = gdb.Value(0).cast(typeobj)
return int(str(element[field].address).split()[0], 16)
def container_of(ptr, typeobj, member):
return (ptr.cast(get_long_type()) -
offset_of(typeobj, member)).cast(typeobj)
class ContainerOf(gdb.Function):
"""Return pointer to containing data structure.
$container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the
data structure of the type TYPE in which PTR is the address of ELEMENT.
Note that TYPE and ELEMENT have to be quoted as strings."""
def __init__(self):
super(ContainerOf, self).__init__("container_of")
def invoke(self, ptr, typename, elementname):
return container_of(ptr, gdb.lookup_type(typename.string()).pointer(),
elementname.string())
ContainerOf()
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
target_endianness = None
def get_target_endianness():
global target_endianness
if target_endianness is None:
endian = gdb.execute("show endian", to_string=True)
if "little endian" in endian:
target_endianness = LITTLE_ENDIAN
elif "big endian" in endian:
target_endianness = BIG_ENDIAN
else:
raise gdb.GdbError("unknown endianness '{0}'".format(str(endian)))
return target_endianness
def read_u16(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return ord(buffer[0]) + (ord(buffer[1]) << 8)
else:
return ord(buffer[1]) + (ord(buffer[0]) << 8)
def read_u32(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
else:
return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
def read_u64(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
else:
return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
target_arch = None
def is_target_arch(arch):
if hasattr(gdb.Frame, 'architecture'):
return arch in gdb.newest_frame().architecture().name()
else:
global target_arch
if target_arch is None:
target_arch = gdb.execute("show architecture", to_string=True)
return arch in target_arch
GDBSERVER_QEMU = 0
GDBSERVER_KGDB = 1
gdbserver_type = None
def get_gdbserver_type():
def exit_handler(event):
global gdbserver_type
gdbserver_type = None
gdb.events.exited.disconnect(exit_handler)
def probe_qemu():
try:
return gdb.execute("monitor info version", to_string=True) != ""
except:
return False
def probe_kgdb():
try:
thread_info = gdb.execute("info thread 2", to_string=True)
return "shadowCPU0" in thread_info
except:
return False
global gdbserver_type
if gdbserver_type is None:
if probe_qemu():
gdbserver_type = GDBSERVER_QEMU
elif probe_kgdb():
gdbserver_type = GDBSERVER_KGDB
if gdbserver_type is not None and hasattr(gdb, 'events'):
gdb.events.exited.connect(exit_handler)
return gdbserver_type
| gpl-2.0 |
Moriadry/tensorflow | tensorflow/python/framework/errors_test.py | 83 | 3683 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
class ErrorsTest(test.TestCase):
def testUniqueClassForEachErrorCode(self):
for error_code, exc_type in [
(errors.CANCELLED, errors_impl.CancelledError),
(errors.UNKNOWN, errors_impl.UnknownError),
(errors.INVALID_ARGUMENT, errors_impl.InvalidArgumentError),
(errors.DEADLINE_EXCEEDED, errors_impl.DeadlineExceededError),
(errors.NOT_FOUND, errors_impl.NotFoundError),
(errors.ALREADY_EXISTS, errors_impl.AlreadyExistsError),
(errors.PERMISSION_DENIED, errors_impl.PermissionDeniedError),
(errors.UNAUTHENTICATED, errors_impl.UnauthenticatedError),
(errors.RESOURCE_EXHAUSTED, errors_impl.ResourceExhaustedError),
(errors.FAILED_PRECONDITION, errors_impl.FailedPreconditionError),
(errors.ABORTED, errors_impl.AbortedError),
(errors.OUT_OF_RANGE, errors_impl.OutOfRangeError),
(errors.UNIMPLEMENTED, errors_impl.UnimplementedError),
(errors.INTERNAL, errors_impl.InternalError),
(errors.UNAVAILABLE, errors_impl.UnavailableError),
(errors.DATA_LOSS, errors_impl.DataLossError),
]:
# pylint: disable=protected-access
self.assertTrue(
isinstance(
errors_impl._make_specific_exception(None, None, None,
error_code), exc_type))
# pylint: enable=protected-access
def testKnownErrorClassForEachErrorCodeInProto(self):
for error_code in error_codes_pb2.Code.values():
# pylint: disable=line-too-long
if error_code in (
error_codes_pb2.OK, error_codes_pb2.
DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
):
continue
# pylint: enable=line-too-long
with warnings.catch_warnings(record=True) as w:
# pylint: disable=protected-access
exc = errors_impl._make_specific_exception(None, None, None, error_code)
# pylint: enable=protected-access
self.assertEqual(0, len(w)) # No warning is raised.
self.assertTrue(isinstance(exc, errors_impl.OpError))
self.assertTrue(errors_impl.OpError in exc.__class__.__bases__)
def testUnknownErrorCodeCausesWarning(self):
with warnings.catch_warnings(record=True) as w:
# pylint: disable=protected-access
exc = errors_impl._make_specific_exception(None, None, None, 37)
# pylint: enable=protected-access
self.assertEqual(1, len(w))
self.assertTrue("Unknown error code: 37" in str(w[0].message))
self.assertTrue(isinstance(exc, errors_impl.OpError))
if __name__ == "__main__":
test.main()
| apache-2.0 |
adrianholovaty/django | django/contrib/gis/db/backends/spatialite/introspection.py | 401 | 2112 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, basestring) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
tgsd96/gargnotes | venv/lib/python2.7/site-packages/setuptools/tests/test_test.py | 148 | 2329 | # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import os
import site
import pytest
from setuptools.command.test import test
from setuptools.dist import Distribution
from .textwrap import DALS
from . import contexts
SETUP_PY = DALS("""
from setuptools import setup
setup(name='foo',
packages=['name', 'name.space', 'name.space.tests'],
namespace_packages=['name'],
test_suite='name.space.tests.test_suite',
)
""")
NS_INIT = DALS("""
# -*- coding: Latin-1 -*-
# Söme Arbiträry Ünicode to test Distribute Issüé 310
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
""")
TEST_PY = DALS("""
import unittest
class TestTest(unittest.TestCase):
def test_test(self):
print "Foo" # Should fail under Python 3 unless 2to3 is used
test_suite = unittest.makeSuite(TestTest)
""")
@pytest.fixture
def sample_test(tmpdir_cwd):
os.makedirs('name/space/tests')
# setup.py
with open('setup.py', 'wt') as f:
f.write(SETUP_PY)
# name/__init__.py
with open('name/__init__.py', 'wb') as f:
f.write(NS_INIT.encode('Latin-1'))
# name/space/__init__.py
with open('name/space/__init__.py', 'wt') as f:
f.write('#empty\n')
# name/space/tests/__init__.py
with open('name/space/tests/__init__.py', 'wt') as f:
f.write(TEST_PY)
@pytest.mark.skipif('hasattr(sys, "real_prefix")')
@pytest.mark.usefixtures('user_override')
@pytest.mark.usefixtures('sample_test')
class TestTestTest:
def test_test(self):
params = dict(
name='foo',
packages=['name', 'name.space', 'name.space.tests'],
namespace_packages=['name'],
test_suite='name.space.tests.test_suite',
use_2to3=True,
)
dist = Distribution(params)
dist.script_name = 'setup.py'
cmd = test(dist)
cmd.user = 1
cmd.ensure_finalized()
cmd.install_dir = site.USER_SITE
cmd.user = 1
with contexts.quiet():
# The test runner calls sys.exit
with contexts.suppress_exceptions(SystemExit):
cmd.run()
| mit |
versae/DH2304 | data/arts1.py | 1 | 1038 | import numpy as np
import pandas as pd
arts = pd.DataFrame()
# Clean the dates so you only see numbers.
def clean_years(value):
result = value
chars_to_replace = ["c.", "©", ", CARCC", "no date", "n.d.", " SODRAC", ", CA", " CARCC", ""]
chars_to_split = ["-", "/"]
if isinstance(result, str):
for char in chars_to_split:
if char in result:
result = result.split(char)[1].strip()
for char in chars_to_replace:
result = result.replace(char, "")
if result == "":
return np.nan
else:
return int(result)
else:
return result
arts['execution_date'] = arts['execution_date'].apply(clean_years)
arts.head()
# If a year is lower than 100, then is referred to 1900. For example, 78 is actually 1978, and that needs to be fixed too.
def clean_year_99(value):
if value < 100:
return value + 1900
else:
return value
arts["execution_date"] = arts["execution_date"].apply(clean_year_99)
arts.head()
| mit |
ItsAGeekThing/namebench | namebench.py | 171 | 2390 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""namebench: DNS service benchmarking tool."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import os
import platform
import sys
# Check before we start importing internal dependencies
if sys.version < '2.4':
your_version = sys.version.split(' ')[0]
print '* Your Python version (%s) is too old! Please upgrade to 2.6+!' % your_version
sys.exit(1)
elif sys.version >= '3.0':
print '* namebench is currently incompatible with Python 3.0 - trying anyways'
from libnamebench import cli
from libnamebench import config
if __name__ == '__main__':
options = config.GetMergedConfiguration()
use_tk = False
if len(sys.argv) == 1:
if os.getenv('DISPLAY', None):
use_tk = True
# Macs get a special Cocoa binary
if os.getenv('I_LOVE_TK', None):
use_tk = True
elif platform.mac_ver()[0]:
use_tk = False
elif platform.system() == 'Windows':
use_tk = True
if use_tk:
try:
# Workaround for unicode path errors.
# See http://code.google.com/p/namebench/issues/detail?id=41
if hasattr(sys, 'winver') and hasattr(sys, 'frozen'):
os.environ['TCL_LIBRARY'] = os.path.join(os.path.dirname(sys.executable), 'tcl', 'tcl8.5')
os.environ['TK_LIBRARY'] = os.path.join(os.path.dirname(sys.executable), 'tcl', 'tk8.5')
import Tkinter
except ImportError:
if len(sys.argv) == 1:
print "- The python-tk (tkinter) library is missing, using the command-line interface.\n"
use_tk = False
if use_tk:
print 'Starting graphical interface for namebench (use -x to force command-line usage)'
from libnamebench import tk
interface = tk.NameBenchGui
else:
interface = cli.NameBenchCli
namebench = interface(options)
namebench.Execute()
| apache-2.0 |
openstates/openstates | openstates/nh/bills.py | 1 | 15203 | import re
import datetime as dt
from collections import defaultdict
import pytz
from pupa.scrape import Scraper, Bill, VoteEvent as Vote
from openstates.nh.legacyBills import NHLegacyBillScraper
body_code = {"lower": "H", "upper": "S"}
bill_type_map = {
"B": "bill",
"R": "resolution",
"CR": "concurrent resolution",
"JR": "joint resolution",
"CO": "concurrent order",
# really "bill of address";
# see https://github.com/opencivicdata/python-opencivicdata/issues/115
"A": "bill",
# special session senate/house bill
"SSSB": "bill",
"SSHB": "bill",
}
action_classifiers = [
("Minority Committee Report", None), # avoid calling these passage
("Ought to Pass", ["passage"]),
("Passed by Third Reading", ["reading-3", "passage"]),
(".*Ought to Pass", ["committee-passage-favorable"]),
(".*Introduced(.*) and (R|r)eferred", ["introduction", "referral-committee"]),
("Proposed(.*) Amendment", "amendment-introduction"),
("Amendment .* Adopted", "amendment-passage"),
("Amendment .* Failed", "amendment-failure"),
("Signed", "executive-signature"),
("Vetoed", "executive-veto"),
]
VERSION_URL = "http://www.gencourt.state.nh.us/legislation/%s/%s.html"
AMENDMENT_URL = "http://www.gencourt.state.nh.us/legislation/amendments/%s.html"
def classify_action(action):
for regex, classification in action_classifiers:
if re.match(regex, action):
return classification
return None
def extract_amendment_id(action):
piece = re.findall(r"Amendment #(\d{4}-\d+[hs])", action)
if piece:
return piece[0]
class NHBillScraper(Scraper):
cachebreaker = dt.datetime.now().strftime("%Y%d%d%H%I%s")
def scrape(self, chamber=None, session=None):
if not session:
session = self.latest_session()
self.info("no session specified, using %s", session)
chambers = [chamber] if chamber else ["upper", "lower"]
for chamber in chambers:
yield from self.scrape_chamber(chamber, session)
def scrape_chamber(self, chamber, session):
if int(session) < 2017:
legacy = NHLegacyBillScraper(self.metadata, self.datadir)
yield from legacy.scrape(chamber, session)
# This throws an error because object_count isn't being properly incremented,
# even though it saves fine. So fake the output_names
self.output_names = ["1"]
return
# bill basics
self.bills = {} # LSR->Bill
self.bills_by_id = {} # need a second table to attach votes
self.versions_by_lsr = {} # mapping of bill ID to lsr
self.amendments_by_lsr = {}
# pre load the mapping table of LSR -> version id
self.scrape_version_ids()
self.scrape_amendments()
last_line = []
for line in (
self.get("http://gencourt.state.nh.us/dynamicdatafiles/LSRs.txt")
.content.decode("utf-8")
.split("\n")
):
line = line.split("|")
if len(line) < 1:
continue
if len(line) < 36:
if len(last_line + line[1:]) == 36:
# combine two lines for processing
# (skip an empty entry at beginning of second line)
line = last_line + line
self.warning("used bad line")
else:
# skip this line, maybe we'll use it later
self.warning("bad line: %s" % "|".join(line))
last_line = line
continue
session_yr = line[0]
lsr = line[1]
title = line[2]
body = line[3]
# type_num = line[4]
expanded_bill_id = line[9]
bill_id = line[10]
if body == body_code[chamber] and session_yr == session:
if expanded_bill_id.startswith("CACR"):
bill_type = "constitutional amendment"
elif expanded_bill_id.startswith("PET"):
bill_type = "petition"
elif expanded_bill_id.startswith("AR") and bill_id.startswith("CACR"):
bill_type = "constitutional amendment"
elif expanded_bill_id.startswith("SSSB") or expanded_bill_id.startswith(
"SSHB"
):
# special session house/senate bills
bill_type = "bill"
else:
bill_type = bill_type_map[expanded_bill_id.split(" ")[0][1:]]
if title.startswith("("):
title = title.split(")", 1)[1].strip()
self.bills[lsr] = Bill(
legislative_session=session,
chamber=chamber,
identifier=bill_id,
title=title,
classification=bill_type,
)
# http://www.gencourt.state.nh.us/bill_status/billText.aspx?sy=2017&id=95&txtFormat=html
if lsr in self.versions_by_lsr:
version_id = self.versions_by_lsr[lsr]
version_url = (
"http://www.gencourt.state.nh.us/bill_status/"
"billText.aspx?sy={}&id={}&txtFormat=html".format(
session, version_id
)
)
self.bills[lsr].add_version_link(
note="latest version", url=version_url, media_type="text/html"
)
# http://gencourt.state.nh.us/bill_status/billtext.aspx?sy=2017&txtFormat=amend&id=2017-0464S
if lsr in self.amendments_by_lsr:
amendment_id = self.amendments_by_lsr[lsr]
amendment_url = (
"http://www.gencourt.state.nh.us/bill_status/"
"billText.aspx?sy={}&id={}&txtFormat=amend".format(
session, amendment_id
)
)
amendment_name = "Amendment #{}".format(amendment_id)
self.bills[lsr].add_version_link(
note=amendment_name,
url=amendment_url,
media_type="application/pdf",
)
self.bills_by_id[bill_id] = self.bills[lsr]
# load legislators
self.legislators = {}
for line in (
self.get(
"http://gencourt.state.nh.us/dynamicdatafiles/legislators.txt?x={}".format(
self.cachebreaker
)
)
.content.decode("utf-8")
.split("\n")
):
if len(line) < 2:
continue
line = line.split("|")
employee_num = line[0]
# first, last, middle
if len(line) > 2:
name = "%s %s %s" % (line[2], line[3], line[1])
else:
name = "%s %s" % (line[2], line[1])
self.legislators[employee_num] = {"name": name, "seat": line[5]}
# body = line[4]
# sponsors
for line in (
self.get("http://gencourt.state.nh.us/dynamicdatafiles/LsrSponsors.txt")
.content.decode("utf-8")
.split("\n")
):
if len(line) < 1:
continue
session_yr, lsr, _seq, employee, primary = line.strip().split("|")
if session_yr == session and lsr in self.bills:
sp_type = "primary" if primary == "1" else "cosponsor"
try:
# Removes extra spaces in names
sponsor_name = self.legislators[employee]["name"].strip()
sponsor_name = " ".join(sponsor_name.split())
self.bills[lsr].add_sponsorship(
classification=sp_type,
name=sponsor_name,
entity_type="person",
primary=True if sp_type == "primary" else False,
)
self.bills[lsr].extras = {
"_code": self.legislators[employee]["seat"]
}
except KeyError:
self.warning("Error, can't find person %s" % employee)
# actions
for line in (
self.get("http://gencourt.state.nh.us/dynamicdatafiles/Docket.txt")
.content.decode("utf-8")
.split("\n")
):
if len(line) < 1:
continue
# a few blank/irregular lines, irritating
if "|" not in line:
continue
(session_yr, lsr, timestamp, bill_id, body, action, _) = line.split("|")
if session_yr == session and lsr in self.bills:
actor = "lower" if body == "H" else "upper"
time = dt.datetime.strptime(timestamp, "%m/%d/%Y %H:%M:%S %p")
action = action.strip()
atype = classify_action(action)
self.bills[lsr].add_action(
chamber=actor,
description=action,
date=time.strftime("%Y-%m-%d"),
classification=atype,
)
amendment_id = extract_amendment_id(action)
if amendment_id:
self.bills[lsr].add_document_link(
note="amendment %s" % amendment_id,
url=AMENDMENT_URL % amendment_id,
)
yield from self.scrape_votes(session)
# save all bills
for bill in self.bills:
# bill.add_source(zip_url)
self.add_source(self.bills[bill], bill, session)
yield self.bills[bill]
def add_source(self, bill, lsr, session):
bill_url = (
"http://www.gencourt.state.nh.us/bill_Status/bill_status.aspx?"
+ "lsr={}&sy={}&sortoption=&txtsessionyear={}".format(lsr, session, session)
)
bill.add_source(bill_url)
def scrape_version_ids(self):
for line in (
self.get("http://gencourt.state.nh.us/dynamicdatafiles/LsrsOnly.txt")
.content.decode("utf-8")
.split("\n")
):
if len(line) < 1:
continue
# a few blank/irregular lines, irritating
if "|" not in line:
continue
line = line.split("|")
file_id = line[2]
lsr = line[0].split("-")
lsr = lsr[1]
self.versions_by_lsr[lsr] = file_id
def scrape_amendments(self):
for line in (
self.get("http://gencourt.state.nh.us/dynamicdatafiles/Docket.txt")
.content.decode("utf-8")
.split("\n")
):
if len(line) < 1:
continue
# a few blank/irregular lines, irritating
if "|" not in line:
continue
line = line.split("|")
lsr = line[1]
amendment_regex = re.compile(r"Amendment # (\d{4}-\d+\w)", re.IGNORECASE)
for match in amendment_regex.finditer(line[5]):
self.amendments_by_lsr[lsr] = match.group(1)
def scrape_votes(self, session):
votes = {}
other_counts = defaultdict(int)
last_line = []
vote_url = "http://gencourt.state.nh.us/dynamicdatafiles/RollCallSummary.txt"
lines = self.get(vote_url).content.decode("utf-8").splitlines()
for line in lines:
if len(line) < 2:
continue
if line.strip() == "":
continue
line = line.split("|")
if len(line) < 14:
if len(last_line + line[1:]) == 14:
line = last_line
self.warning("used bad vote line")
else:
last_line = line
self.warning("bad vote line %s" % "|".join(line))
session_yr = line[0].replace("\xef\xbb\xbf", "")
body = line[1]
vote_num = line[2]
timestamp = line[3]
bill_id = line[4].strip()
yeas = int(line[5])
nays = int(line[6])
# present = int(line[7])
# absent = int(line[8])
motion = line[11].strip() or "[not available]"
if session_yr == session and bill_id in self.bills_by_id:
actor = "lower" if body == "H" else "upper"
time = dt.datetime.strptime(timestamp, "%m/%d/%Y %I:%M:%S %p")
time = pytz.timezone("America/New_York").localize(time).isoformat()
# TODO: stop faking passed somehow
passed = yeas > nays
vote = Vote(
chamber=actor,
start_date=time,
motion_text=motion,
result="pass" if passed else "fail",
classification="passage",
bill=self.bills_by_id[bill_id],
)
vote.set_count("yes", yeas)
vote.set_count("no", nays)
vote.add_source(vote_url)
vote.pupa_id = session_yr + body + vote_num # unique ID for vote
votes[body + vote_num] = vote
for line in (
self.get("http://gencourt.state.nh.us/dynamicdatafiles/RollCallHistory.txt")
.content.decode("utf-8")
.splitlines()
):
if len(line) < 2:
continue
# 2016|H|2|330795||Yea|
# 2012 | H | 2 | 330795 | 964 | HB309 | Yea | 1/4/2012 8:27:03 PM
session_yr, body, v_num, _, employee, bill_id, vote, date = line.split("|")
if not bill_id:
continue
if session_yr == session and bill_id.strip() in self.bills_by_id:
try:
leg = " ".join(self.legislators[employee]["name"].split())
except KeyError:
self.warning("Error, can't find person %s" % employee)
continue
vote = vote.strip()
if body + v_num not in votes:
self.warning("Skipping processing this vote:")
self.warning("Bad ID: %s" % (body + v_num))
continue
# code = self.legislators[employee]['seat']
if vote == "Yea":
votes[body + v_num].yes(leg)
elif vote == "Nay":
votes[body + v_num].no(leg)
else:
votes[body + v_num].vote("other", leg)
# hack-ish, but will keep the vote count sync'd
other_counts[body + v_num] += 1
votes[body + v_num].set_count("other", other_counts[body + v_num])
for vote in votes.values():
yield vote
| gpl-3.0 |
paul99/clank | tools/grit/grit/node/io.py | 4 | 3001 | #!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The <output> and <file> elements.
'''
import os
import re
import grit.format.rc_header
from grit.node import base
from grit import exception
from grit import util
from grit import xtb_reader
class FileNode(base.Node):
'''A <file> element.'''
def __init__(self):
super(type(self), self).__init__()
self.re = None
self.should_load_ = True
def IsTranslation(self):
return True
def GetLang(self):
return self.attrs['lang']
def DisableLoading(self):
self.should_load_ = False
def MandatoryAttributes(self):
return ['path', 'lang']
def RunGatherers(self, recursive=False, debug=False):
if not self.should_load_ or not self.SatisfiesOutputCondition():
return
root = self.GetRoot()
defs = {}
if hasattr(root, 'defines'):
defs = root.defines
xtb_file = file(self.GetFilePath())
try:
lang = xtb_reader.Parse(xtb_file,
self.UberClique().GenerateXtbParserCallback(
self.attrs['lang'], debug=debug),
defs=defs)
except:
print "Exception during parsing of %s" % self.GetFilePath()
raise
# We special case 'he' and 'iw' because the translation console uses 'iw'
# and we use 'he'.
assert (lang == self.attrs['lang'] or
(lang == 'iw' and self.attrs['lang'] == 'he')), ('The XTB file you '
'reference must contain messages in the language specified\n'
'by the \'lang\' attribute.')
def GetFilePath(self):
return self.ToRealPath(os.path.expandvars(self.attrs['path']))
class OutputNode(base.Node):
'''An <output> element.'''
def MandatoryAttributes(self):
return ['filename', 'type']
def DefaultAttributes(self):
return { 'lang' : '', # empty lang indicates all languages
'language_section' : 'neutral' # defines a language neutral section
}
def GetType(self):
return self.attrs['type']
def GetLanguage(self):
'''Returns the language ID, default 'en'.'''
return self.attrs['lang']
def GetFilename(self):
return self.attrs['filename']
def GetOutputFilename(self):
if hasattr(self, 'output_filename'):
return self.output_filename
else:
return self.attrs['filename']
def _IsValidChild(self, child):
return isinstance(child, EmitNode)
class EmitNode(base.ContentNode):
''' An <emit> element.'''
def DefaultAttributes(self):
return { 'emit_type' : 'prepend'}
def GetEmitType(self):
'''Returns the emit_type for this node. Default is 'append'.'''
return self.attrs['emit_type']
def ItemFormatter(self, t):
if t == 'rc_header':
return grit.format.rc_header.EmitAppender()
else:
return super(type(self), self).ItemFormatter(t)
| bsd-3-clause |
john5223/airflow | airflow/hooks/hive_hooks.py | 17 | 14064 | from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import csv
import logging
import subprocess
from tempfile import NamedTemporaryFile
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
import pyhs2
from airflow.utils import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils import TemporaryDirectory
class HiveCliHook(BaseHook):
"""
Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default"):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.conn = conn
def run_cli(self, hql, schema=None):
"""
Run an hql statement using the hive cli
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql)
f.flush()
fname = f.name
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = (
"jdbc:hive2://"
"{0}:{1}/{2}"
";auth=noSasl"
).format(conn.host, conn.port, conn.schema)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
cmd_extra += ['-p', conn.login]
hive_cmd = [hive_bin, '-f', fname] + cmd_extra
if self.hive_cli_params:
hive_params_list = self.hive_cli_params.split()
hive_cmd.extend(hive_params_list)
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
all_err = ''
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, ''):
stdout += line
logging.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(all_err)
return stdout
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile;"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.kill()
class HiveMetastoreHook(BaseHook):
'''
Wrapper to interact with the Hive Metastore
'''
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
'''
Returns a Hive thrift client.
'''
ms = self.metastore_conn
transport = TSocket.TSocket(ms.host, ms.port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
'''
Checks whether a partition exists
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
'''
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def get_table(self, table_name, db='default'):
'''
Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
'''
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
'''
Get a metastore table object
'''
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
'''
Get a metastore table object
'''
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
'''
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
'''
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
'''
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
'''
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
class HiveServer2Hook(BaseHook):
'''
Wrapper around the pyhs2 library
Note that the default authMechanism is NOSASL, to override it you
can specify it in the ``extra`` of your connection in the UI as in
``{"authMechanism": "PLAIN"}``. Refer to the pyhs2 for more details.
'''
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self):
db = self.get_connection(self.hiveserver2_conn_id)
return pyhs2.connect(
host=db.host,
port=db.port,
authMechanism=db.extra_dejson.get('authMechanism', 'NOSASL'),
user=db.login,
database=db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
with self.get_conn() as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
for statement in hql:
with conn.cursor() as cur:
cur.execute(statement)
records = cur.fetchall()
if records:
results = {
'data': records,
'header': cur.getSchema(),
}
return results
def to_csv(self, hql, csv_filepath, schema='default'):
schema = schema or 'default'
with self.get_conn() as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.getSchema()
with open(csv_filepath, 'w') as f:
writer = csv.writer(f)
writer.writerow([c['columnName'] for c in cur.getSchema()])
i = 0
while cur.hasMoreRows:
rows = [row for row in cur.fetchmany() if row]
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
'''
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
'''
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
'''
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
'''
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c['columnName'] for c in res['header']]
return df
| apache-2.0 |
regisb/babel | scripts/dump_data.py | 55 | 1500 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
from optparse import OptionParser
from pprint import pprint
from babel.localedata import load, LocaleDataDict
def main():
parser = OptionParser(usage='%prog [options] locale [path]')
parser.add_option('--noinherit', action='store_false', dest='inherit',
help='do not merge inherited data into locale data')
parser.add_option('--resolve', action='store_true', dest='resolve',
help='resolve aliases in locale data')
parser.set_defaults(inherit=True, resolve=False)
options, args = parser.parse_args()
if len(args) not in (1, 2):
parser.error('incorrect number of arguments')
data = load(args[0], merge_inherited=options.inherit)
if options.resolve:
data = LocaleDataDict(data)
if len(args) > 1:
for key in args[1].split('.'):
data = data[key]
if isinstance(data, dict):
data = dict(data.items())
pprint(data)
if __name__ == '__main__':
main()
| bsd-3-clause |
EmanueleCannizzaro/scons | src/engine/SCons/Tool/RCS.py | 3 | 2283 | """SCons.Tool.RCS.py
Tool-specific initialization for RCS.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/RCS.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
RCS to an Environment."""
def RCSFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The RCS() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR')
return SCons.Builder.Builder(action = act, env = env)
env.RCS = RCSFactory
env['RCS'] = 'rcs'
env['RCS_CO'] = 'co'
env['RCS_COFLAGS'] = SCons.Util.CLVar('')
env['RCS_COCOM'] = '$RCS_CO $RCS_COFLAGS $TARGET'
def exists(env):
return env.Detect('rcs')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
sysadminmatmoz/odoo-clearcorp | account_move_no_filter/__init__.py | 21 | 1030 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| agpl-3.0 |
gautam1858/tensorflow | third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py | 3 | 6689 | #!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
DESCRIPTION:
This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
"""
from __future__ import print_function
from argparse import ArgumentParser
import os
import subprocess
import re
import sys
import pipes
# Template values set by cuda_autoconf.
CPU_COMPILER = ('/usr/bin/gcc')
GCC_HOST_COMPILER_PATH = ('/usr/bin/gcc')
NVCC_PATH = '/usr/local/cuda-10.0/bin/nvcc'
NVCC_VERSION = '10.0'
NVCC_TEMP_DIR = "C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir"
supported_cuda_compute_capabilities = [ "3.0" ]
def Log(s):
print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):
"""Extract the list of values for option from options.
Args:
option: The option whose value to extract, without the leading '/'.
Returns:
1. A list of values, either directly following the option,
(eg., /opt val1 val2) or values collected from multiple occurrences of
the option (eg., /opt val1 /opt val2).
2. The leftover options.
"""
parser = ArgumentParser(prefix_chars='/')
parser.add_argument('/' + option, nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args and vars(args)[option]:
return (sum(vars(args)[option], []), leftover)
return ([], leftover)
def _update_options(nvcc_options):
if NVCC_VERSION in ("7.0",):
return nvcc_options
update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
return [ update_options[opt] if opt in update_options else opt
for opt in nvcc_options ]
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover)
def InvokeNvcc(argv, log=False):
"""Call nvcc with arguments assembled from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
log: True if logging is requested.
Returns:
The return value of calling os.system('nvcc ' + args)
"""
src_files = [
f for f in argv if re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)
]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')
out_file = [ f for f in argv if f.startswith('/Fo') ]
if len(out_file) != 1:
raise Error('Please sepecify exactly one output file for cuda compilation.')
out = ['-o', out_file[0][len('/Fo'):]]
nvcc_compiler_options, argv = GetNvccOptions(argv)
opt_option, argv = GetOptionValue(argv, 'O')
opt = ['-g', '-G']
if (len(opt_option) > 0 and opt_option[0] != 'd'):
opt = ['-O2']
include_options, argv = GetOptionValue(argv, 'I')
includes = ["-I " + include for include in include_options]
defines, argv = GetOptionValue(argv, 'D')
defines = ['-D' + define for define in defines]
undefines, argv = GetOptionValue(argv, 'U')
undefines = ['-U' + define for define in undefines]
# The rest of the unrecongized options should be passed to host compiler
host_compiler_options = [option for option in argv if option not in (src_files + out_file)]
m_options = ["-m64"]
nvccopts = ['-D_FORCE_INLINES']
for capability in supported_cuda_compute_capabilities:
capability = capability.replace('.', '')
nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % (
capability, capability, capability)]
nvccopts += nvcc_compiler_options
nvccopts += undefines
nvccopts += defines
nvccopts += m_options
nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"']
nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
# If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
# Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
# http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
# Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
if os.path.isfile(NVCC_TEMP_DIR):
os.remove(NVCC_TEMP_DIR)
if not os.path.exists(NVCC_TEMP_DIR):
os.makedirs(NVCC_TEMP_DIR)
nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
cmd = [NVCC_PATH] + nvccopts
if log:
Log(cmd)
proc = subprocess.Popen(cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=os.environ.copy(),
shell=True)
proc.wait()
return proc.returncode
def main():
parser = ArgumentParser()
parser.add_argument('-x', nargs=1)
parser.add_argument('--cuda_log', action='store_true')
args, leftover = parser.parse_known_args(sys.argv[1:])
if args.x and args.x[0] == 'cuda':
if args.cuda_log: Log('-x cuda')
leftover = [pipes.quote(s) for s in leftover]
if args.cuda_log: Log('using nvcc')
return InvokeNvcc(leftover, log=args.cuda_log)
# Strip our flags before passing through to the CPU compiler for files which
# are not -x cuda. We can't just pass 'leftover' because it also strips -x.
# We not only want to pass -x to the CPU compiler, but also keep it in its
# relative location in the argv list (the compiler is actually sensitive to
# this).
cpu_compiler_flags = [flag for flag in sys.argv[1:]
if not flag.startswith(('--cuda_log'))
and not flag.startswith(('-nvcc_options'))]
return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
jrjang/mbed | workspace_tools/export/__init__.py | 1 | 7502 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os, tempfile
from os.path import join, exists, basename
from shutil import copytree, rmtree, copy
from workspace_tools.utils import mkdir
from workspace_tools.export import uvision4, codered, gccarm, ds5_5, iar, emblocks, coide, kds, zip, simplicityv3, atmelstudio, sw4stm32
from workspace_tools.export.exporters import zip_working_directory_and_clean_up, OldLibrariesException
from workspace_tools.targets import TARGET_NAMES, EXPORT_MAP, TARGET_MAP
from project_generator_definitions.definitions import ProGenDef
EXPORTERS = {
'uvision': uvision4.Uvision4,
'lpcxpresso': codered.CodeRed,
'gcc_arm': gccarm.GccArm,
'ds5_5': ds5_5.DS5_5,
'iar': iar.IAREmbeddedWorkbench,
'emblocks' : emblocks.IntermediateFile,
'coide' : coide.CoIDE,
'kds' : kds.KDS,
'simplicityv3' : simplicityv3.SimplicityV3,
'atmelstudio' : atmelstudio.AtmelStudio,
'sw4stm32' : sw4stm32.Sw4STM32,
}
ERROR_MESSAGE_UNSUPPORTED_TOOLCHAIN = """
Sorry, the target %s is not currently supported on the %s toolchain.
Please refer to <a href='/handbook/Exporting-to-offline-toolchains' target='_blank'>Exporting to offline toolchains</a> for more information.
"""
ERROR_MESSAGE_NOT_EXPORT_LIBS = """
To export this project please <a href='http://mbed.org/compiler/?import=http://mbed.org/users/mbed_official/code/mbed-export/k&mode=lib' target='_blank'>import the export version of the mbed library</a>.
"""
def online_build_url_resolver(url):
# TODO: Retrieve the path and name of an online library build URL
return {'path':'', 'name':''}
def export(project_path, project_name, ide, target, destination='/tmp/',
tempdir=None, clean=True, extra_symbols=None, build_url_resolver=online_build_url_resolver):
# Convention: we are using capitals for toolchain and target names
if target is not None:
target = target.upper()
if tempdir is None:
tempdir = tempfile.mkdtemp()
report = {'success': False, 'errormsg':''}
if ide is None or ide == "zip":
# Simple ZIP exporter
try:
ide = "zip"
exporter = zip.ZIP(target, tempdir, project_name, build_url_resolver, extra_symbols=extra_symbols)
exporter.scan_and_copy_resources(project_path, tempdir)
exporter.generate()
report['success'] = True
except OldLibrariesException, e:
report['errormsg'] = ERROR_MESSAGE_NOT_EXPORT_LIBS
else:
if ide not in EXPORTERS:
report['errormsg'] = ERROR_MESSAGE_UNSUPPORTED_TOOLCHAIN % (target, ide)
else:
Exporter = EXPORTERS[ide]
target = EXPORT_MAP.get(target, target)
# use progen targets or mbed exporters targets, check progen attribute
use_progen = False
supported = True
try:
if Exporter.PROGEN_ACTIVE:
use_progen = True
except AttributeError:
pass
if use_progen:
if not ProGenDef(ide).is_supported(TARGET_MAP[target].progen['target']):
supported = False
else:
if target not in Exporter.TARGETS:
supported = False
if supported:
# target checked, export
try:
exporter = Exporter(target, tempdir, project_name, build_url_resolver, extra_symbols=extra_symbols)
exporter.scan_and_copy_resources(project_path, tempdir)
exporter.generate()
report['success'] = True
except OldLibrariesException, e:
report['errormsg'] = ERROR_MESSAGE_NOT_EXPORT_LIBS
else:
report['errormsg'] = ERROR_MESSAGE_UNSUPPORTED_TOOLCHAIN % (target, ide)
zip_path = None
if report['success']:
# add readme file to every offline export.
open(os.path.join(tempdir, 'GettingStarted.htm'),'w').write('<meta http-equiv="refresh" content="0; url=http://mbed.org/handbook/Getting-Started-mbed-Exporters#%s"/>'% (ide))
# copy .hgignore file to exported direcotry as well.
copy(os.path.join(exporter.TEMPLATE_DIR,'.hgignore'),tempdir)
zip_path = zip_working_directory_and_clean_up(tempdir, destination, project_name, clean)
return zip_path, report
###############################################################################
# Generate project folders following the online conventions
###############################################################################
def copy_tree(src, dst, clean=True):
if exists(dst):
if clean:
rmtree(dst)
else:
return
copytree(src, dst)
def setup_user_prj(user_dir, prj_path, lib_paths=None):
"""
Setup a project with the same directory structure of the mbed online IDE
"""
mkdir(user_dir)
# Project Path
copy_tree(prj_path, join(user_dir, "src"))
# Project Libraries
user_lib = join(user_dir, "lib")
mkdir(user_lib)
if lib_paths is not None:
for lib_path in lib_paths:
copy_tree(lib_path, join(user_lib, basename(lib_path)))
def mcu_ide_matrix(verbose_html=False, platform_filter=None):
""" Shows target map using prettytable """
supported_ides = []
for key in EXPORTERS.iterkeys():
supported_ides.append(key)
supported_ides.sort()
from prettytable import PrettyTable, ALL # Only use it in this function so building works without extra modules
# All tests status table print
columns = ["Platform"] + supported_ides
pt = PrettyTable(columns)
# Align table
for col in columns:
pt.align[col] = "c"
pt.align["Platform"] = "l"
perm_counter = 0
target_counter = 0
for target in sorted(TARGET_NAMES):
target_counter += 1
row = [target] # First column is platform name
for ide in supported_ides:
text = "-"
if target in EXPORTERS[ide].TARGETS:
if verbose_html:
text = "✓"
else:
text = "x"
perm_counter += 1
row.append(text)
pt.add_row(row)
pt.border = True
pt.vrules = ALL
pt.hrules = ALL
# creates a html page suitable for a browser
# result = pt.get_html_string(format=True) if verbose_html else pt.get_string()
# creates a html page in a shorter format suitable for readme.md
result = pt.get_html_string() if verbose_html else pt.get_string()
result += "\n"
result += "Total IDEs: %d\n"% (len(supported_ides))
if verbose_html: result += "<br>"
result += "Total platforms: %d\n"% (target_counter)
if verbose_html: result += "<br>"
result += "Total permutations: %d"% (perm_counter)
if verbose_html: result = result.replace("&", "&")
return result
| apache-2.0 |
brutkin/commons | tests/python/twitter/common/zookeeper/group/test_group.py | 13 | 5257 | # ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import time
import threading
import unittest
from twitter.common.zookeeper.client import ZooKeeper, ZooDefs
from twitter.common.zookeeper.group.group import ActiveGroup, Group
from twitter.common.zookeeper.group.test_base import GroupTestBase
import zookeeper
class AlternateGroup(Group):
MEMBER_PREFIX = 'herpderp_'
class TestGroup(GroupTestBase, unittest.TestCase):
GroupImpl = Group
AlternateGroupImpl = AlternateGroup
ACLS = dict(
OPEN_ACL_UNSAFE=ZooDefs.Acls.OPEN_ACL_UNSAFE,
CREATOR_ALL_ACL=ZooDefs.Acls.CREATOR_ALL_ACL,
READ_ACL_UNSAFE=ZooDefs.Acls.READ_ACL_UNSAFE,
EVERYONE_READ_CREATOR_ALL=ZooDefs.Acls.EVERYONE_READ_CREATOR_ALL,
)
@classmethod
def make_zk(cls, ensemble, **kw):
return ZooKeeper(ensemble,
timeout_secs=cls.CONNECT_TIMEOUT_SECS,
max_reconnects=cls.CONNECT_RETRIES,
**kw)
@classmethod
def session_id(cls, zk):
return zk.session_id
def test_children_filtering(self):
zk = self.make_zk(self._server.ensemble)
zk.create('/test', '', self.ACLS['OPEN_ACL_UNSAFE'])
zk.create('/test/alt_member_', '', self.ACLS['OPEN_ACL_UNSAFE'],
zookeeper.SEQUENCE | zookeeper.EPHEMERAL)
zk.create('/test/candidate_', '', self.ACLS['OPEN_ACL_UNSAFE'],
zookeeper.SEQUENCE | zookeeper.EPHEMERAL)
zkg = self.GroupImpl(self._zk, '/test')
assert list(zkg) == []
assert zkg.monitor(membership=set(['frank', 'larry'])) == set()
def test_monitor_through_expiration(self):
session_expired = threading.Event()
def on_watch(_, event, state, path):
if event == zookeeper.SESSION_EVENT and state == zookeeper.EXPIRED_SESSION_STATE:
session_expired.set()
zk1 = self.make_zk(self._server.ensemble, watch=on_watch)
zkg1 = self.GroupImpl(self._zk, '/test')
session_id1 = self.session_id(zk1)
zk2 = self.make_zk(self._server.ensemble)
zkg2 = self.GroupImpl(zk2, '/test')
member1 = zkg2.join('hello 1')
new_members = zkg1.monitor([]) # wait until the first group acknowledges the join
assert new_members == set([member1])
membership_event = threading.Event()
membership = []
def on_membership(new_members):
membership[:] = new_members
membership_event.set()
zkg1.monitor([member1], on_membership)
self._server.expire(session_id1)
session_expired.wait(self.MAX_EVENT_WAIT_SECS)
assert not membership_event.is_set()
member2 = zkg2.join('hello 2')
membership_event.wait()
assert membership_event.is_set()
assert membership == [member1, member2]
for member in membership:
assert zkg1.info(member) is not None
assert zkg1.info(member).startswith('hello')
class TestActiveGroup(TestGroup):
GroupImpl = ActiveGroup
# These tests do use time.sleep but mostly to simulate real eventual consistency
# in the behavior of iter and getitem. Because we don't have more intimate control
# over the underlying store (Zookeeper), this will have to do.
def test_container_idioms(self):
zkg1 = self.GroupImpl(self._zk, '/test')
zkg2 = self.GroupImpl(self._zk, '/test')
def devnull(*args, **kw):
pass
def cancel_by_value(group, cancel_group, value):
for member in group:
if group[member] == value:
cancel_group.cancel(member, callback=devnull)
break
def assert_iter_equals(membership, max_wait=self.MAX_EVENT_WAIT_SECS):
total_wait = 0.0
while total_wait < max_wait:
members = list(zkg1)
if len(members) == len(membership):
break
time.sleep(0.1)
total_wait += 0.1
for member in members:
assert zkg1[member] in membership
zkg2.join('hello 1', callback=devnull)
zkg2.join('hello 2', callback=devnull)
zkg2.join('hello 3', callback=devnull)
assert_iter_equals(['hello 1', 'hello 2', 'hello 3'])
cancel_by_value(zkg1, zkg2, 'hello 2')
assert_iter_equals(['hello 1', 'hello 3'])
# cancel on same group
cancel_by_value(zkg1, zkg1, 'hello 3')
assert_iter_equals(['hello 1'])
# join on same group
zkg1.join('hello 4', callback=devnull)
assert_iter_equals(['hello 1', 'hello 4'])
# join on same group
zkg2.join('hello 5', callback=devnull)
assert_iter_equals(['hello 1', 'hello 4', 'hello 5'])
| apache-2.0 |
nicholedwight/nichole-theme | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/styles/perldoc.py | 364 | 2175 | # -*- coding: utf-8 -*-
"""
pygments.styles.perldoc
~~~~~~~~~~~~~~~~~~~~~~~
Style similar to the style used in the `perldoc`_ code blocks.
.. _perldoc: http://perldoc.perl.org/
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class PerldocStyle(Style):
"""
Style similar to the style used in the perldoc code blocks.
"""
background_color = '#eeeedd'
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: '#228B22',
Comment.Preproc: '#1e889b',
Comment.Special: '#8B008B bold',
String: '#CD5555',
String.Heredoc: '#1c7e71 italic',
String.Regex: '#B452CD',
String.Other: '#cb6c20',
String.Regex: '#1c7e71',
Number: '#B452CD',
Operator.Word: '#8B008B',
Keyword: '#8B008B bold',
Keyword.Type: '#a7a7a7',
Name.Class: '#008b45 bold',
Name.Exception: '#008b45 bold',
Name.Function: '#008b45',
Name.Namespace: '#008b45 underline',
Name.Variable: '#00688B',
Name.Constant: '#00688B',
Name.Decorator: '#707a7c',
Name.Tag: '#8B008B bold',
Name.Attribute: '#658b00',
Name.Builtin: '#658b00',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
| mit |
pombreda/androguard | apkviewer.py | 38 | 3150 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys, os
from optparse import OptionParser
from androguard.core.bytecodes import apk, dvm
from androguard.core.data import data
from androguard.core.analysis import analysis, ganalysis
from androguard.core import androconf
option_0 = { 'name' : ('-i', '--input'), 'help' : 'filename input (dex, apk)', 'nargs' : 1 }
option_1 = { 'name' : ('-o', '--output'), 'help' : 'directory output', 'nargs' : 1 }
options = [option_0, option_1]
def create_directory( class_name, output ) :
output_name = output
if output_name[-1] != "/" :
output_name = output_name + "/"
try :
os.makedirs( output_name + class_name )
except OSError :
pass
def create_directories( vm, output ) :
for class_name in vm.get_classes_names() :
z = os.path.split( class_name )[0]
create_directory( z[1:], output )
def main(options, arguments) :
if options.input != None and options.output != None :
ret_type = androconf.is_android( options.input )
vm = None
a = None
if ret_type == "APK" :
a = apk.APK( options.input )
if a.is_valid_APK() :
vm = dvm.DalvikVMFormat( a.get_dex() )
else :
print "INVALID APK"
elif ret_type == "DEX" :
try :
vm = dvm.DalvikVMFormat( open(options.input, "rb").read() )
except Exception, e :
print "INVALID DEX", e
vmx = analysis.VMAnalysis( vm )
gvmx = ganalysis.GVMAnalysis( vmx, a )
create_directories( vm, options.output )
# dv.export_to_gml( options.output )
dd = data.Data(vm, vmx, gvmx, a)
buff = dd.export_apk_to_gml()
androconf.save_to_disk( buff, options.output + "/" + "apk.graphml" )
buff = dd.export_methodcalls_to_gml()
androconf.save_to_disk( buff, options.output + "/" + "methodcalls.graphml" )
buff = dd.export_dex_to_gml()
for i in buff :
androconf.save_to_disk( buff[i], options.output + "/" + i + ".graphml" )
if __name__ == "__main__" :
parser = OptionParser()
for option in options :
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments)
| apache-2.0 |
makinacorpus/odoo | addons/account/project/report/inverted_analytic_balance.py | 358 | 5760 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_inverted_analytic_balance(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_inverted_analytic_balance, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'lines_g': self._lines_g,
'lines_a': self._lines_a,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'sum_balance': self._sum_balance,
'sum_quantity': self._sum_quantity,
})
def _lines_g(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT aa.name AS name, aa.code AS code, "
"sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, aa.id AS id \
FROM account_analytic_line AS aal, account_account AS aa \
WHERE (aal.general_account_id=aa.id) "
"AND (aal.account_id IN %s) "
"AND (date>=%s) AND (date<=%s) AND aa.active \
GROUP BY aal.general_account_id, aa.name, aa.code, aal.code, aa.id "
"ORDER BY aal.code",
(tuple(ids), date1, date2))
res = self.cr.dictfetchall()
for r in res:
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _lines_a(self, accounts, general_account_id, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, "
"aaa.code AS code, aaa.name AS name, account_id \
FROM account_analytic_line AS aal, "
"account_analytic_account AS aaa \
WHERE aal.account_id=aaa.id AND aal.account_id IN %s "
"AND aal.general_account_id=%s AND aal.date>=%s "
"AND aal.date<=%s \
GROUP BY aal.account_id, general_account_id, aaa.code, aaa.name "
"ORDER BY aal.account_id",
(tuple(ids), general_account_id, date1, date2))
res = self.cr.dictfetchall()
aaa_obj = self.pool.get('account.analytic.account')
res2 = aaa_obj.read(self.cr, self.uid, ids, ['complete_name'])
complete_name = {}
for r in res2:
complete_name[r['id']] = r['complete_name']
for r in res:
r['complete_name'] = complete_name[r['account_id']]
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _sum_debit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount>0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT -sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_balance(self, accounts, date1, date2):
debit = self._sum_debit(accounts, date1, date2)
credit = self._sum_credit(accounts, date1, date2)
return (debit-credit)
def _sum_quantity(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
class report_invertedanalyticbalance(osv.AbstractModel):
_name = 'report.account.report_invertedanalyticbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_invertedanalyticbalance'
_wrapped_report_class = account_inverted_analytic_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andymckay/zamboni | lib/es/management/commands/fixup_mkt_index.py | 21 | 1143 | """
A Marketplace only command that finds apps missing from the search index and
adds them.
"""
import sys
import elasticsearch
from django.core.management.base import BaseCommand
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
class Command(BaseCommand):
help = 'Fix up Marketplace index.'
def handle(self, *args, **kwargs):
index = WebappIndexer.get_index()
doctype = WebappIndexer.get_mapping_type_name()
es = WebappIndexer.get_es()
app_ids = Webapp.objects.values_list('id', flat=True)
missing_ids = []
for app_id in app_ids:
try:
es.get(index, app_id, doctype, fields='id')
except elasticsearch.NotFoundError:
# App doesn't exist in our index, add it to `missing_ids`.
missing_ids.append(app_id)
if missing_ids:
sys.stdout.write('Adding %s doc(s) to the index.'
% len(missing_ids))
WebappIndexer().run_indexing(missing_ids, es)
else:
sys.stdout.write('No docs missing from index.')
| bsd-3-clause |
ammaradil/fibonacci | Lib/site-packages/django/conf/__init__.py | 59 | 6888 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import importlib
import os
import time
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return sorted(
s for s in list(self.__dict__) + dir(self.default_settings)
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return (deleted or set_locally or set_on_default)
def __repr__(self):
return '<%(cls)s>' % {
'cls': self.__class__.__name__,
}
settings = LazySettings()
| mit |
jaimeMF/youtube-dl | youtube_dl/extractor/brightcove.py | 9 | 22826 | # encoding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_parse_qs,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
js_to_json,
int_or_none,
parse_iso8601,
sanitized_Request,
unescapeHTML,
unsmuggle_url,
)
class BrightcoveLegacyIE(InfoExtractor):
IE_NAME = 'brightcove:legacy'
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
'id': '2371591881001',
'ext': 'mp4',
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
'info_dict': {
'id': '1785452137001',
'ext': 'flv',
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
'info_dict': {
'id': '2750934548001',
'ext': 'mp4',
'title': 'This Bracelet Acts as a Personal Thermostat',
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
'uploader': 'Mashable',
},
},
{
# test that the default referer works
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
'info_dict': {
'id': '2878862109001',
'ext': 'mp4',
'title': 'Lost in Motion II',
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
},
{
# test flv videos served by akamaihd.net
# From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3ABC2996102916001&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D',
# The md5 checksum changes on each download
'info_dict': {
'id': '2996102916001',
'ext': 'flv',
'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'uploader': 'Red Bull TV',
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
},
},
{
# playlist test
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
'id': '3550319591001',
},
'playlist_mincount': 7,
},
]
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
# remove namespace to simplify extraction
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
try:
object_doc = compat_etree_fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error:
return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
flashvars = dict(
(k, v[0])
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
else:
flashvars = {}
def find_param(name):
if name in flashvars:
return flashvars[name]
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return None
params = {}
playerID = find_param('playerID')
if playerID is None:
raise ExtractorError('Cannot find player ID')
params['playerID'] = playerID
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# The three fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID')
if videoPlayer is not None:
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
return cls._make_brightcove_url(params)
@classmethod
def _build_brighcove_url_from_js(cls, object_js):
# The layout of JS is as follows:
# customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
# // build Brightcove <object /> XML
# }
m = re.search(
r'''(?x)customBC.\createVideo\(
.*? # skipping width and height
["\'](?P<playerID>\d+)["\']\s*,\s* # playerID
["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters
# in length, however it's appended to itself
# in places, so truncate
["\'](?P<videoID>\d+)["\'] # @videoPlayer
''', object_js)
if m:
return cls._make_brightcove_url(m.groupdict())
@classmethod
def _make_brightcove_url(cls, params):
data = compat_urllib_parse.urlencode(params)
return cls._FEDERATED_URL_TEMPLATE % data
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the webpage, returns None
if it can't be found
"""
urls = cls._extract_brightcove_urls(webpage)
return urls[0] if urls else None
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
url_m = re.search(
r'<meta\s+property=[\'"]og:video[\'"]\s+content=[\'"](https?://(?:secure|c)\.brightcove.com/[^\'"]+)[\'"]',
webpage)
if url_m:
url = unescapeHTML(url_m.group(1))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
if 'playerKey' in url or 'videoId' in url:
return [url]
matches = re.findall(
r'''(?sx)<object
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
).+?>\s*</object>''',
webpage)
if matches:
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
return list(filter(None, [
cls._build_brighcove_url_from_js(custom_bc)
for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)]))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
# We set the original url as the default 'Referer' header
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query_str, query, referer=referer)
elif 'playerKey' in query:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
else:
raise ExtractorError(
'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
expected=True)
def _get_video_info(self, video_id, query_str, query, referer=None):
request_url = self._FEDERATED_URL_TEMPLATE % query_str
req = sanitized_Request(request_url)
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
if referer is not None:
req.add_header('Referer', referer)
webpage = self._download_webpage(req, video_id)
error_msg = self._html_search_regex(
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
'brightcove said: %s' % error_msg, expected=True)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
playlist_info = self._download_webpage(
info_url, player_key, 'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' not in json_data:
raise ExtractorError('Empty playlist')
playlist_info = json_data['videoList']
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
def _extract_video_info(self, video_info):
info = {
'id': compat_str(video_info['id']),
'title': video_info['displayName'].strip(),
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
}
renditions = video_info.get('renditions')
if renditions:
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
ext = None
if rend['remote']:
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(url, info['id'], 'mp4'))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
if ext is None:
ext = determine_ext(url)
size = rend.get('size')
formats.append({
'url': url,
'ext': ext,
'height': rend.get('frameHeight'),
'width': rend.get('frameWidth'),
'filesize': size if size != 0 else None,
})
self._sort_formats(formats)
info['formats'] = formats
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
})
if self._downloader.params.get('include_ads', False):
adServerURL = video_info.get('_youtubedl_adServerURL')
if adServerURL:
ad_info = {
'_type': 'url',
'url': adServerURL,
}
if 'url' in info:
return {
'_type': 'playlist',
'title': info['title'],
'entries': [ad_info, info],
}
else:
return ad_info
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % info['id'])
return info
class BrightcoveNewIE(InfoExtractor):
IE_NAME = 'brightcove:new'
_VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>(?:ref:)?\d+)'
_TESTS = [{
'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001',
'md5': 'c8100925723840d4b0d243f7025703be',
'info_dict': {
'id': '4463358922001',
'ext': 'mp4',
'title': 'Meet the man behind Popcorn Time',
'description': 'md5:eac376a4fe366edc70279bfb681aea16',
'duration': 165.768,
'timestamp': 1441391203,
'upload_date': '20150904',
'uploader_id': '929656772001',
'formats': 'mincount:22',
},
}, {
# with rtmp streams
'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001',
'info_dict': {
'id': '4279049078001',
'ext': 'mp4',
'title': 'Titansgrave: Chapter 0',
'description': 'Titansgrave: Chapter 0',
'duration': 1242.058,
'timestamp': 1433556729,
'upload_date': '20150606',
'uploader_id': '4036320279001',
'formats': 'mincount:41',
},
'params': {
'skip_download': True,
}
}, {
# ref: prefixed video id
'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
urls = BrightcoveNewIE._extract_urls(webpage)
return urls[0] if urls else None
@staticmethod
def _extract_urls(webpage):
# Reference:
# 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
# 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript
# 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/embed-in-page.html
# 4. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player
entries = []
# Look for iframe embeds [1]
for _, url in re.findall(
r'<iframe[^>]+src=(["\'])((?:https?:)//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage):
entries.append(url)
# Look for embed_in_page embeds [2]
for video_id, account_id, player_id, embed in re.findall(
# According to examples from [3] it's unclear whether video id
# may be optional and what to do when it is
# According to [4] data-video-id may be prefixed with ref:
r'''(?sx)
<video[^>]+
data-video-id=["\']((?:ref:)?\d+)["\'][^>]*>.*?
</video>.*?
<script[^>]+
src=["\'](?:https?:)?//players\.brightcove\.net/
(\d+)/([\da-f-]+)_([^/]+)/index\.min\.js
''', webpage):
entries.append(
'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s'
% (account_id, player_id, embed, video_id))
return entries
def _real_extract(self, url):
account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(
'http://players.brightcove.net/%s/%s_%s/index.min.js'
% (account_id, player_id, embed), video_id)
policy_key = None
catalog = self._search_regex(
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
if catalog:
catalog = self._parse_json(
js_to_json(catalog), video_id, fatal=False)
if catalog:
policy_key = catalog.get('policyKey')
if not policy_key:
policy_key = self._search_regex(
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
webpage, 'policy key', group='pk')
req = sanitized_Request(
'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s'
% (account_id, video_id),
headers={'Accept': 'application/json;pk=%s' % policy_key})
json_data = self._download_json(req, video_id)
title = json_data['name']
formats = []
for source in json_data.get('sources', []):
source_type = source.get('type')
src = source.get('src')
if source_type == 'application/x-mpegURL':
if not src:
continue
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
streaming_src = source.get('streaming_src')
stream_name, app_name = source.get('stream_name'), source.get('app_name')
if not src and not streaming_src and (not stream_name or not app_name):
continue
tbr = float_or_none(source.get('avg_bitrate'), 1000)
height = int_or_none(source.get('height'))
f = {
'tbr': tbr,
'width': int_or_none(source.get('width')),
'height': height,
'filesize': int_or_none(source.get('size')),
'container': source.get('container'),
'vcodec': source.get('codec'),
'ext': source.get('container').lower(),
}
def build_format_id(kind):
format_id = kind
if tbr:
format_id += '-%dk' % int(tbr)
if height:
format_id += '-%dp' % height
return format_id
if src or streaming_src:
f.update({
'url': src or streaming_src,
'format_id': build_format_id('http' if src else 'http-streaming'),
'preference': 2 if src else 1,
})
else:
f.update({
'url': app_name,
'play_path': stream_name,
'format_id': build_format_id('rtmp'),
})
formats.append(f)
self._sort_formats(formats)
description = json_data.get('description')
thumbnail = json_data.get('thumbnail')
timestamp = parse_iso8601(json_data.get('published_at'))
duration = float_or_none(json_data.get('duration'), 1000)
tags = json_data.get('tags', [])
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'uploader_id': account_id,
'formats': formats,
'tags': tags,
}
| unlicense |
thinkopensolutions/l10n-brazil | financial/wizards/report_xlsx_financial_defaults_wizard.py | 4 | 1360 | # -*- coding: utf-8 -*-
#
# Copyright 2017 KMEE INFORMATICA LTDA
# Luiz Felipe do Divino <luiz.divino@kmee.com.br>
# License AGPL-3 or later (http://www.gnu.org/licenses/agpl)
#
from __future__ import division, print_function, unicode_literals
from datetime import datetime
from openerp import fields, models, api, _
class ReportXlsxFinancialDefaultsWizard(models.TransientModel):
_name = b'report.xlsx.financial.defaults.wizard'
_description = 'Report Xlsx Financial Defaults Wizard'
company_id = fields.Many2one(
comodel_name='res.company',
default=lambda self: self.env.user.company_id,
string='Company'
)
selected_partners = fields.Many2many(
comodel_name="res.partner",
relation="financial_move_partner_defaults",
column1="wizard_id",
column2="partner_id",
required=True,
)
date_from = fields.Date(
string=_("Date From"),
required=True,
default=datetime.now().strftime('%Y-%m-01'),
)
date_to = fields.Date(
string=_("Date To"),
required=True,
default=datetime.now().strftime('%Y-12-31'),
)
@api.multi
def generate_report(self):
self.ensure_one()
return self.env['report'].get_action(
self,
report_name='report_xlsx_financial_defaults'
)
| agpl-3.0 |
tinkerthaler/odoo | addons/stock_landed_costs/stock_landed_costs.py | 56 | 18361 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
import product
class stock_landed_cost(osv.osv):
_name = 'stock.landed.cost'
_description = 'Stock Landed Cost'
_inherit = 'mail.thread'
_track = {
'state': {
'stock_landed_costs.mt_stock_landed_cost_open': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
},
}
def _total_amount(self, cr, uid, ids, name, args, context=None):
result = {}
for cost in self.browse(cr, uid, ids, context=context):
total = 0.0
for line in cost.cost_lines:
total += line.price_unit
result[cost.id] = total
return result
def _get_cost_line(self, cr, uid, ids, context=None):
cost_to_recompute = []
for line in self.pool.get('stock.landed.cost.lines').browse(cr, uid, ids, context=context):
cost_to_recompute.append(line.cost_id.id)
return cost_to_recompute
def get_valuation_lines(self, cr, uid, ids, picking_ids=None, context=None):
picking_obj = self.pool.get('stock.picking')
lines = []
if not picking_ids:
return lines
for picking in picking_obj.browse(cr, uid, picking_ids):
for move in picking.move_lines:
#it doesn't make sense to make a landed cost for a product that isn't set as being valuated in real time at real cost
if move.product_id.valuation != 'real_time' or move.product_id.cost_method != 'real':
continue
total_cost = 0.0
total_qty = move.product_qty
weight = move.product_id and move.product_id.weight * move.product_qty
volume = move.product_id and move.product_id.volume * move.product_qty
for quant in move.quant_ids:
total_cost += quant.cost
vals = dict(product_id=move.product_id.id, move_id=move.id, quantity=move.product_uom_qty, former_cost=total_cost * total_qty, weight=weight, volume=volume)
lines.append(vals)
if not lines:
raise osv.except_osv(_('Error!'), _('The selected picking does not contain any move that would be impacted by landed costs. Landed costs are only possible for products configured in real time valuation with real price costing method. Please make sure it is the case, or you selected the correct picking'))
return lines
_columns = {
'name': fields.char('Name', track_visibility='always', readonly=True, copy=False),
'date': fields.date('Date', required=True, states={'done': [('readonly', True)]}, track_visibility='onchange', copy=False),
'picking_ids': fields.many2many('stock.picking', string='Pickings', states={'done': [('readonly', True)]}, copy=False),
'cost_lines': fields.one2many('stock.landed.cost.lines', 'cost_id', 'Cost Lines', states={'done': [('readonly', True)]}, copy=True),
'valuation_adjustment_lines': fields.one2many('stock.valuation.adjustment.lines', 'cost_id', 'Valuation Adjustments', states={'done': [('readonly', True)]}),
'description': fields.text('Item Description', states={'done': [('readonly', True)]}),
'amount_total': fields.function(_total_amount, type='float', string='Total', digits_compute=dp.get_precision('Account'),
store={
'stock.landed.cost': (lambda self, cr, uid, ids, c={}: ids, ['cost_lines'], 20),
'stock.landed.cost.lines': (_get_cost_line, ['price_unit', 'quantity', 'cost_id'], 20),
}, track_visibility='always'
),
'state': fields.selection([('draft', 'Draft'), ('done', 'Posted'), ('cancel', 'Cancelled')], 'State', readonly=True, track_visibility='onchange', copy=False),
'account_move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False),
'account_journal_id': fields.many2one('account.journal', 'Account Journal', required=True),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.landed.cost'),
'state': 'draft',
'date': fields.date.context_today,
}
def _create_accounting_entries(self, cr, uid, line, move_id, qty_out, context=None):
product_obj = self.pool.get('product.template')
cost_product = line.cost_line_id and line.cost_line_id.product_id
if not cost_product:
return False
accounts = product_obj.get_product_accounts(cr, uid, line.product_id.product_tmpl_id.id, context=context)
debit_account_id = accounts['property_stock_valuation_account_id']
already_out_account_id = accounts['stock_account_output']
credit_account_id = line.cost_line_id.account_id.id or cost_product.property_account_expense.id or cost_product.categ_id.property_account_expense_categ.id
if not credit_account_id:
raise osv.except_osv(_('Error!'), _('Please configure Stock Expense Account for product: %s.') % (cost_product.name))
return self._create_account_move_line(cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=context)
def _create_account_move_line(self, cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=None):
"""
Generate the account.move.line values to track the landed cost.
Afterwards, for the goods that are already out of stock, we should create the out moves
"""
aml_obj = self.pool.get('account.move.line')
aml_obj.create(cr, uid, {
'name': line.name,
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': line.quantity,
'debit': line.additional_landed_cost,
'account_id': debit_account_id
}, context=context)
aml_obj.create(cr, uid, {
'name': line.name,
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': line.quantity,
'credit': line.additional_landed_cost,
'account_id': credit_account_id
}, context=context)
#Create account move lines for quants already out of stock
if qty_out > 0:
aml_obj.create(cr, uid, {
'name': line.name + ": " + str(qty_out) + _(' already out'),
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': qty_out,
'credit': line.additional_landed_cost * qty_out / line.quantity,
'account_id': debit_account_id
}, context=context)
aml_obj.create(cr, uid, {
'name': line.name + ": " + str(qty_out) + _(' already out'),
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': qty_out,
'debit': line.additional_landed_cost * qty_out / line.quantity,
'account_id': already_out_account_id
}, context=context)
return True
def _create_account_move(self, cr, uid, cost, context=None):
vals = {
'journal_id': cost.account_journal_id.id,
'period_id': self.pool.get('account.period').find(cr, uid, cost.date, context=context)[0],
'date': cost.date,
'ref': cost.name
}
return self.pool.get('account.move').create(cr, uid, vals, context=context)
def _check_sum(self, cr, uid, landed_cost, context=None):
"""
Will check if each cost line its valuation lines sum to the correct amount
and if the overall total amount is correct also
"""
costcor = {}
tot = 0
for valuation_line in landed_cost.valuation_adjustment_lines:
if costcor.get(valuation_line.cost_line_id):
costcor[valuation_line.cost_line_id] += valuation_line.additional_landed_cost
else:
costcor[valuation_line.cost_line_id] = valuation_line.additional_landed_cost
tot += valuation_line.additional_landed_cost
res = (tot == landed_cost.amount_total)
for costl in costcor.keys():
if costcor[costl] != costl.price_unit:
res = False
return res
def button_validate(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for cost in self.browse(cr, uid, ids, context=context):
if not cost.valuation_adjustment_lines or not self._check_sum(cr, uid, cost, context=context):
raise osv.except_osv(_('Error!'), _('You cannot validate a landed cost which has no valid valuation lines.'))
move_id = self._create_account_move(cr, uid, cost, context=context)
quant_dict = {}
for line in cost.valuation_adjustment_lines:
if not line.move_id:
continue
per_unit = line.final_cost / line.quantity
diff = per_unit - line.former_cost_per_unit
quants = [quant for quant in line.move_id.quant_ids]
for quant in quants:
if quant.id not in quant_dict:
quant_dict[quant.id] = quant.cost + diff
else:
quant_dict[quant.id] += diff
for key, value in quant_dict.items():
quant_obj.write(cr, uid, key, {'cost': value}, context=context)
qty_out = 0
for quant in line.move_id.quant_ids:
if quant.location_id.usage != 'internal':
qty_out += quant.qty
self._create_accounting_entries(cr, uid, line, move_id, qty_out, context=context)
self.write(cr, uid, cost.id, {'state': 'done', 'account_move_id': move_id}, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def compute_landed_cost(self, cr, uid, ids, context=None):
line_obj = self.pool.get('stock.valuation.adjustment.lines')
unlink_ids = line_obj.search(cr, uid, [('cost_id', 'in', ids)], context=context)
line_obj.unlink(cr, uid, unlink_ids, context=context)
towrite_dict = {}
for cost in self.browse(cr, uid, ids, context=None):
if not cost.picking_ids:
continue
picking_ids = [p.id for p in cost.picking_ids]
total_qty = 0.0
total_cost = 0.0
total_weight = 0.0
total_volume = 0.0
total_line = 0.0
vals = self.get_valuation_lines(cr, uid, [cost.id], picking_ids=picking_ids, context=context)
for v in vals:
for line in cost.cost_lines:
v.update({'cost_id': cost.id, 'cost_line_id': line.id})
self.pool.get('stock.valuation.adjustment.lines').create(cr, uid, v, context=context)
total_qty += v.get('quantity', 0.0)
total_cost += v.get('former_cost', 0.0)
total_weight += v.get('weight', 0.0)
total_volume += v.get('volume', 0.0)
total_line += 1
for line in cost.cost_lines:
for valuation in cost.valuation_adjustment_lines:
value = 0.0
if valuation.cost_line_id and valuation.cost_line_id.id == line.id:
if line.split_method == 'by_quantity' and total_qty:
per_unit = (line.price_unit / total_qty)
value = valuation.quantity * per_unit
elif line.split_method == 'by_weight' and total_weight:
per_unit = (line.price_unit / total_weight)
value = valuation.weight * per_unit
elif line.split_method == 'by_volume' and total_volume:
per_unit = (line.price_unit / total_volume)
value = valuation.volume * per_unit
elif line.split_method == 'equal':
value = (line.price_unit / total_line)
elif line.split_method == 'by_current_cost_price' and total_cost:
per_unit = (line.price_unit / total_cost)
value = valuation.former_cost * per_unit
else:
value = (line.price_unit / total_line)
if valuation.id not in towrite_dict:
towrite_dict[valuation.id] = value
else:
towrite_dict[valuation.id] += value
if towrite_dict:
for key, value in towrite_dict.items():
line_obj.write(cr, uid, key, {'additional_landed_cost': value}, context=context)
return True
class stock_landed_cost_lines(osv.osv):
_name = 'stock.landed.cost.lines'
_description = 'Stock Landed Cost Lines'
def onchange_product_id(self, cr, uid, ids, product_id=False, context=None):
result = {}
if not product_id:
return {'value': {'quantity': 0.0, 'price_unit': 0.0}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
result['name'] = product.name
result['split_method'] = product.split_method
result['price_unit'] = product.standard_price
result['account_id'] = product.property_account_expense and product.property_account_expense.id or product.categ_id.property_account_expense_categ.id
return {'value': result}
_columns = {
'name': fields.char('Description'),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'price_unit': fields.float('Cost', required=True, digits_compute=dp.get_precision('Product Price')),
'split_method': fields.selection(product.SPLIT_METHOD, string='Split Method', required=True),
'account_id': fields.many2one('account.account', 'Account', domain=[('type', '<>', 'view'), ('type', '<>', 'closed')]),
}
class stock_valuation_adjustment_lines(osv.osv):
_name = 'stock.valuation.adjustment.lines'
_description = 'Stock Valuation Adjustment Lines'
def _amount_final(self, cr, uid, ids, name, args, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = {
'former_cost_per_unit': 0.0,
'final_cost': 0.0,
}
result[line.id]['former_cost_per_unit'] = (line.former_cost / line.quantity if line.quantity else 1.0)
result[line.id]['final_cost'] = (line.former_cost + line.additional_landed_cost)
return result
def _get_name(self, cr, uid, ids, name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.product_id.code or line.product_id.name or ''
if line.cost_line_id:
res[line.id] += ' - ' + line.cost_line_id.name
return res
_columns = {
'name': fields.function(_get_name, type='char', string='Description', store=True),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'cost_line_id': fields.many2one('stock.landed.cost.lines', 'Cost Line', readonly=True),
'move_id': fields.many2one('stock.move', 'Stock Move', readonly=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'weight': fields.float('Weight', digits_compute=dp.get_precision('Product Unit of Measure')),
'volume': fields.float('Volume', digits_compute=dp.get_precision('Product Unit of Measure')),
'former_cost': fields.float('Former Cost', digits_compute=dp.get_precision('Product Price')),
'former_cost_per_unit': fields.function(_amount_final, multi='cost', string='Former Cost(Per Unit)', type='float', digits_compute=dp.get_precision('Account'), store=True),
'additional_landed_cost': fields.float('Additional Landed Cost', digits_compute=dp.get_precision('Product Price')),
'final_cost': fields.function(_amount_final, multi='cost', string='Final Cost', type='float', digits_compute=dp.get_precision('Account'), store=True),
}
_defaults = {
'quantity': 1.0,
'weight': 1.0,
'volume': 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
projectcalico/calico-neutron | neutron/plugins/oneconvergence/plugin.py | 3 | 17259 | # Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of OneConvergence Neutron Plugin."""
from oslo.config import cfg
from oslo import messaging
from oslo.utils import excutils
from oslo.utils import importutils
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.rpc.handlers import metadata_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.common import constants as q_const
from neutron.common import exceptions as nexception
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import portbindings_base
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import portbindings
from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as svc_constants
import neutron.plugins.oneconvergence.lib.config # noqa
import neutron.plugins.oneconvergence.lib.exception as nvsdexception
import neutron.plugins.oneconvergence.lib.nvsd_db as nvsd_db
from neutron.plugins.oneconvergence.lib import nvsdlib as nvsd_lib
LOG = logging.getLogger(__name__)
IPv6 = 6
class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin):
@staticmethod
def get_port_from_device(device):
port = nvsd_db.get_port_from_device(device)
if port:
port['device'] = device
return port
class NVSDPluginV2AgentNotifierApi(sg_rpc.SecurityGroupAgentRpcApiMixin):
def __init__(self, topic):
self.topic = topic
self.topic_port_update = topics.get_topic_name(topic, topics.PORT,
topics.UPDATE)
target = messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def port_update(self, context, port):
cctxt = self.client.prepare(topic=self.topic_port_update, fanout=True)
cctxt.cast(context, 'port_update', port=port)
class OneConvergencePluginV2(db_base_plugin_v2.NeutronDbPluginV2,
extraroute_db.ExtraRoute_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
external_net_db.External_net_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
portbindings_base.PortBindingBaseMixin,
SecurityGroupServerRpcMixin):
"""L2 Virtual Network Plugin.
OneConvergencePluginV2 is a Neutron plugin that provides L2 Virtual Network
functionality.
"""
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
_supported_extension_aliases = ['agent',
'binding',
'dhcp_agent_scheduler',
'ext-gw-mode',
'external-net',
'extraroute',
'l3_agent_scheduler',
'quotas',
'router',
'security-group'
]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(OneConvergencePluginV2, self).__init__()
self.oneconvergence_init()
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
portbindings.VIF_DETAILS: {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
portbindings_base.register_port_dict_function()
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
def oneconvergence_init(self):
"""Initialize the connections and set the log levels for the plugin."""
self.nvsdlib = nvsd_lib.NVSDApi()
self.nvsdlib.set_connection()
def setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.conn = n_rpc.create_connection(new=True)
self.notifier = NVSDPluginV2AgentNotifierApi(topics.AGENT)
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotifyAPI()
)
self.endpoints = [securitygroups_rpc.SecurityGroupServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
l3_rpc.L3RpcCallback(),
agents_db.AgentExtRpcCallback(),
metadata_rpc.MetadataRpcCallback()]
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.endpoints, fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
net = self.nvsdlib.create_network(network['network'])
network['network']['id'] = net['id']
with context.session.begin(subtransactions=True):
try:
neutron_net = super(OneConvergencePluginV2,
self).create_network(context, network)
#following call checks whether the network is external or not
#and if it is external then adds this network to
#externalnetworks table of neutron db
self._process_l3_create(context, neutron_net,
network['network'])
except nvsdexception.NVSDAPIException:
with excutils.save_and_reraise_exception():
self.nvsdlib.delete_network(net)
return neutron_net
def update_network(self, context, net_id, network):
with context.session.begin(subtransactions=True):
neutron_net = super(OneConvergencePluginV2,
self).update_network(context, net_id, network)
self.nvsdlib.update_network(neutron_net, network['network'])
# updates neutron database e.g. externalnetworks table.
self._process_l3_update(context, neutron_net, network['network'])
return neutron_net
def delete_network(self, context, net_id):
with context.session.begin(subtransactions=True):
network = self._get_network(context, net_id)
#get all the subnets under the network to delete them
subnets = self._get_subnets_by_network(context, net_id)
self._process_l3_delete(context, net_id)
super(OneConvergencePluginV2, self).delete_network(context,
net_id)
self.nvsdlib.delete_network(network, subnets)
def create_subnet(self, context, subnet):
if subnet['subnet']['ip_version'] == IPv6:
raise nexception.InvalidInput(
error_message="NVSDPlugin doesn't support IPv6.")
neutron_subnet = super(OneConvergencePluginV2,
self).create_subnet(context, subnet)
try:
self.nvsdlib.create_subnet(neutron_subnet)
except nvsdexception.NVSDAPIException:
with excutils.save_and_reraise_exception():
#Log the message and delete the subnet from the neutron
super(OneConvergencePluginV2,
self).delete_subnet(context, neutron_subnet['id'])
LOG.error(_LE("Failed to create subnet, "
"deleting it from neutron"))
return neutron_subnet
def delete_subnet(self, context, subnet_id):
neutron_subnet = self._get_subnet(context, subnet_id)
with context.session.begin(subtransactions=True):
super(OneConvergencePluginV2, self).delete_subnet(context,
subnet_id)
self.nvsdlib.delete_subnet(neutron_subnet)
def update_subnet(self, context, subnet_id, subnet):
with context.session.begin(subtransactions=True):
neutron_subnet = super(OneConvergencePluginV2,
self).update_subnet(context, subnet_id,
subnet)
self.nvsdlib.update_subnet(neutron_subnet, subnet)
return neutron_subnet
def create_port(self, context, port):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
network = {}
network_id = port['port']['network_id']
with context.session.begin(subtransactions=True):
# Invoke the Neutron API for creating port
neutron_port = super(OneConvergencePluginV2,
self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port['port'],
neutron_port)
self._process_port_create_security_group(context, neutron_port,
sgids)
if port['port']['device_owner'] in ('network:router_gateway',
'network:floatingip'):
# for l3 requests, tenant_id will be None/''
network = self._get_network(context, network_id)
tenant_id = network['tenant_id']
else:
tenant_id = port['port']['tenant_id']
port_id = neutron_port['id']
try:
self.nvsdlib.create_port(tenant_id, neutron_port)
except nvsdexception.NVSDAPIException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deleting newly created "
"neutron port %s"), port_id)
super(OneConvergencePluginV2, self).delete_port(context,
port_id)
self.notify_security_groups_member_updated(context, neutron_port)
return neutron_port
def update_port(self, context, port_id, port):
with context.session.begin(subtransactions=True):
old_port = super(OneConvergencePluginV2, self).get_port(context,
port_id)
neutron_port = super(OneConvergencePluginV2,
self).update_port(context, port_id, port)
if neutron_port['tenant_id'] == '':
network = self._get_network(context,
neutron_port['network_id'])
tenant_id = network['tenant_id']
else:
tenant_id = neutron_port['tenant_id']
self.nvsdlib.update_port(tenant_id, neutron_port, port['port'])
self._process_portbindings_create_and_update(context,
port['port'],
neutron_port)
need_port_update_notify = self.update_security_group_on_port(
context, port_id, port, old_port, neutron_port)
if need_port_update_notify:
self.notifier.port_update(context, neutron_port)
return neutron_port
def delete_port(self, context, port_id, l3_port_check=True):
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
neutron_port = super(OneConvergencePluginV2,
self).get_port(context, port_id)
router_ids = self.disassociate_floatingips(
context, port_id, do_notify=False)
super(OneConvergencePluginV2, self).delete_port(context, port_id)
network = self._get_network(context, neutron_port['network_id'])
neutron_port['tenant_id'] = network['tenant_id']
self.nvsdlib.delete_port(port_id, neutron_port)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
self.notify_security_groups_member_updated(context, neutron_port)
def create_floatingip(self, context, floatingip):
neutron_floatingip = super(OneConvergencePluginV2,
self).create_floatingip(context,
floatingip)
try:
self.nvsdlib.create_floatingip(neutron_floatingip)
except nvsdexception.NVSDAPIException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to create floatingip"))
super(OneConvergencePluginV2,
self).delete_floatingip(context,
neutron_floatingip['id'])
return neutron_floatingip
def update_floatingip(self, context, fip_id, floatingip):
with context.session.begin(subtransactions=True):
neutron_floatingip = super(OneConvergencePluginV2,
self).update_floatingip(context,
fip_id,
floatingip)
self.nvsdlib.update_floatingip(neutron_floatingip, floatingip)
return neutron_floatingip
def delete_floatingip(self, context, floating_ip_id):
with context.session.begin(subtransactions=True):
floating_ip = self._get_floatingip(context, floating_ip_id)
super(OneConvergencePluginV2,
self).delete_floatingip(context, floating_ip_id)
self.nvsdlib.delete_floatingip(floating_ip)
def create_router(self, context, router):
neutron_router = super(OneConvergencePluginV2,
self).create_router(context, router)
try:
self.nvsdlib.create_router(neutron_router)
except nvsdexception.NVSDAPIException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to create router"))
super(OneConvergencePluginV2,
self).delete_router(context, neutron_router['id'])
return neutron_router
def update_router(self, context, router_id, router):
with context.session.begin(subtransactions=True):
neutron_router = super(OneConvergencePluginV2,
self).update_router(context, router_id,
router)
self.nvsdlib.update_router(neutron_router)
return neutron_router
def delete_router(self, context, router_id):
tenant_id = self._get_router(context, router_id)['tenant_id']
with context.session.begin(subtransactions=True):
super(OneConvergencePluginV2, self).delete_router(context,
router_id)
self.nvsdlib.delete_router(tenant_id, router_id)
| apache-2.0 |
OpenUpgrade-dev/OpenUpgrade | addons/portal_project/tests/__init__.py | 170 | 1124 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
checks = [
test_access_rights,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
oldstylejoe/pychess-timed | lib/pychess/System/ping.py | 22 | 2950 | from __future__ import print_function
# -*- coding: UTF-8 -*-
from gi.repository import GObject
from pychess.System.Log import log
from pychess.System.SubProcess import SubProcess, searchPath
import re
class Pinger (GObject.GObject):
""" The recieved signal contains the time it took to get response from the
server in millisecconds. -1 means that some error occurred """
__gsignals__ = {
"recieved": (GObject.SignalFlags.RUN_FIRST, None, (float,)),
"error": (GObject.SignalFlags.RUN_FIRST, None, (str,))
}
def __init__ (self, host):
GObject.GObject.__init__(self)
self.host = host
self.subproc = None
self.expression = re.compile("time=([\d\.]+) (m?s)")
# We need untranslated error messages in regexp search
# below, so have to use deferred translation here
def _(msg): return msg
error = _("Destination Host Unreachable")
self.errorExprs = (
re.compile("(%s)" % error),
)
del _
self.restartsOnDead = 3
self.deadCount = 0
def start (self):
assert not self.subproc
self.subproc = SubProcess(searchPath("ping"),
["-i10", self.host], env={"LANG":"en"})
self.conid1 = self.subproc.connect("line", self.__handleLines)
self.conid2 = self.subproc.connect("died", self.__handleDead)
def __handleLines (self, subprocess, lines):
for line in lines:
self.__handleLine(line)
def __handleLine (self, line):
match = self.expression.search(line)
if match:
time, unit = match.groups()
time = float(time)
if unit == "s":
time *= 1000
self.emit("recieved", time)
else:
for expr in self.errorExprs:
match = expr.search(line)
if match:
msg = match.groups()[0]
self.emit("error", _(msg))
def __handleDead (self, subprocess):
if self.deadCount < self.restartsOnDead:
log.warning("Pinger died and restarted (%d/%d)" %
(self.deadCount+1, self.restartsOnDead),
extra={"task": self.subproc.defname})
self.stop()
self.start()
self.deadCount += 1
else:
self.emit("error", _("Died"))
self.stop()
def stop (self):
if not self.subproc:
return
exitCode = self.subproc.gentleKill()
self.subproc.disconnect(self.conid1)
self.subproc.disconnect(self.conid2)
self.subproc = None
if __name__ == "__main__":
pinger = Pinger("google.com")
def callback(pinger, time):
print(time)
pinger.connect("recieved", callback)
pinger.start()
import time
time.sleep(5)
pinger.stop()
time.sleep(3)
| gpl-3.0 |
dafrito/trac-mirror | trac/mimeview/rst.py | 1 | 11299 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Oliver Rutherfurd
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin
# Oliver Rutherfurd (initial implementation)
# Nuutti Kotivuori (role support)
#
# Trac support for reStructured Text, including a custom 'trac' directive
#
# 'trac' directive code by Oliver Rutherfurd, overhauled by cboos.
#
# Inserts `reference` nodes for TracLinks into the document tree.
__docformat__ = 'reStructuredText'
from distutils.version import StrictVersion
try:
from docutils import nodes
from docutils.core import publish_parts
from docutils.parsers import rst
from docutils.readers import standalone
from docutils import __version__
has_docutils = True
except ImportError:
has_docutils = False
from genshi.core import escape
from trac.core import *
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, content_to_unicode
from trac.util.html import Element, Fragment, Markup, find_element
from trac.util.translation import _
from trac.wiki.api import WikiSystem
from trac.wiki.formatter import WikiProcessor, Formatter, extract_link
if has_docutils and StrictVersion(__version__) < StrictVersion('0.6'):
# Monkey-patch "raw" role handler in docutils to add a missing check
# See docutils bug #2845002 on SourceForge
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
return _raw_role(role, rawtext, text, lineno, inliner, options,
content)
from docutils.parsers.rst import roles
raw_role.options = roles.raw_role.options
_raw_role = roles.raw_role
roles.raw_role = raw_role
roles.register_canonical_role('raw', raw_role)
if has_docutils:
# Register "trac" role handler and directive
def trac_get_reference(env, context, rawtext, target, text):
fulltext = target + ' ' + text if text else target
link = extract_link(env, context, fulltext)
uri = None
missing = False
if isinstance(link, (Element, Fragment)):
linktext = Markup(link).striptags()
# the following is a bit hackish, but it takes into account:
# - an eventual trailing '?' for missing wiki pages
# - space eventually introduced due to split_page_names option
if linktext.rstrip('?').replace(' ', '') != target:
text = linktext
elt = find_element(link, 'href', 'missing')
if elt is not None:
uri = elt.attrib.get('href', '')
missing = 'missing' in elt.attrib.get('class', '').split()
else:
uri = context.href.wiki(target)
missing = not WikiSystem(env).has_page(target)
if uri or missing:
reference = nodes.reference(rawtext, text or target)
reference['refuri'] = uri
if missing:
reference['classes'].append('missing')
return reference
def trac_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Inserts a `reference` node into the document for a given
`TracLink`_, based on the content of the arguments.
Usage::
.. trac:: target [text]
``target`` may be any `TracLink`_, provided it doesn't
embed a space character (e.g. wiki:"..." notation won't work).
``[text]`` is optional. If not given, ``target`` is
used as the reference text.
.. _TracLink: http://trac.edgewall.org/wiki/TracLinks
"""
if hasattr(state.inliner, 'trac'):
env, context = state.inliner.trac
link = arguments[0]
if len(arguments) == 2:
text = arguments[1]
else:
text = None
reference = trac_get_reference(env, context, block_text, link, text)
if reference:
if isinstance(state, rst.states.SubstitutionDef):
return [reference]
p = nodes.paragraph()
p += reference
return [p]
# didn't find a match (invalid TracLink)
msg = _("%(link)s is not a valid TracLink", link=arguments[0])
# this is an user facing message, hence localized
else:
msg = "No trac context active while rendering"
# this is more an internal error, not translated.
# report a warning
warning = state_machine.reporter.warning(
msg, nodes.literal_block(block_text, block_text), line=lineno)
return [warning]
def trac_role(name, rawtext, text, lineno, inliner, options={},
content=[]):
if hasattr(inliner, 'trac'):
env, context = inliner.trac
args = text.split(" ", 1)
link = args[0]
if len(args) == 2:
text = args[1]
else:
text = None
reference = trac_get_reference(env, context, rawtext, link, text)
if reference:
return [reference], []
msg = _("%(link)s is not a valid TracLink", link=rawtext)
else:
msg = "No trac context active while rendering"
return nodes.warning(None, nodes.literal_block(text, msg)), []
# 1 required arg, 1 optional arg, spaces allowed in last arg
trac_directive.arguments = (1, 1, 1)
trac_directive.options = None
trac_directive.content = None
rst.directives.register_directive('trac', trac_directive)
rst.roles.register_canonical_role('trac', trac_role)
# Register "code-block" role handler and directive
# (code derived from the leo plugin rst2)
def code_formatter(env, context, language, text):
processor = WikiProcessor(Formatter(env, context), language)
html = processor.process(text)
raw = nodes.raw('', html, format='html')
return raw
def code_block_role(name, rawtext, text, lineno, inliner, options={},
content=[]):
if not hasattr(inliner, 'trac'):
return [], []
env, context = inliner.trac
language = options.get('language')
if not language:
args = text.split(':', 1)
language = args[0]
if len(args) == 2:
text = args[1]
else:
text = ''
return [code_formatter(env, context, language, text)], []
def code_block_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Create a code-block directive for docutils.
Usage: .. code-block:: language
If the language can be syntax highlighted it will be.
"""
if not hasattr(state.inliner, 'trac'):
return []
env, context = state.inliner.trac
language = arguments[0]
text = '\n'.join(content)
return [code_formatter(env, context, language, text)]
# These are documented
# at http://docutils.sourceforge.net/spec/howto/rst-directives.html.
code_block_directive.arguments = (
1, # Number of required arguments.
0, # Number of optional arguments.
0) # True if final argument may contain whitespace.
# A mapping from option name to conversion function.
code_block_role.options = code_block_directive.options = {
'language' :
rst.directives.unchanged # Return the text argument, unchanged
}
code_block_directive.content = 1 # True if content is allowed.
# Register the directive with docutils.
rst.directives.register_directive('code-block', code_block_directive)
rst.roles.register_local_role('code-block', code_block_role)
class ReStructuredTextRenderer(Component):
"""HTML renderer for plain text in reStructuredText format."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer)
can_render = False
def __init__(self):
if has_docutils:
if StrictVersion(__version__) < StrictVersion('0.3.9'):
self.log.warning('Docutils version >= %s required, '
'%s found' % ('0.3.9', __version__))
else:
self.can_render = True
# ISystemInfoProvider methods
def get_system_info(self):
if has_docutils:
yield 'Docutils', __version__
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
if self.can_render and mimetype in ('text/x-rst',
'text/prs.fallenstein.rst'):
return 8
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
# Minimize visual impact of errors
from docutils.writers import html4css1
class TracHTMLTranslator(html4css1.HTMLTranslator):
"""Specialized translator with unobtrusive error reporting"""
def visit_system_message(self, node):
paragraph = node.children.pop(0)
message = escape(paragraph.astext()) if paragraph else ''
backrefs = node['backrefs']
if backrefs:
span = ('<span class="system-message">%s</span>' %
(''.join('<a href="#%s" title="%s">?</a>' %
(backref, message)
for backref in backrefs)))
else:
span = ('<span class="system-message" title="%s">?</span>' %
message)
self.body.append(span)
def depart_system_message(self, node):
pass
writer = html4css1.Writer()
writer.translator_class = TracHTMLTranslator
inliner = rst.states.Inliner()
inliner.trac = (self.env, context)
parser = rst.Parser(inliner=inliner)
content = content_to_unicode(self.env, content, mimetype)
# The default Reader is explicitly passed as a workaround for #11248
parts = publish_parts(content, writer=writer, parser=parser,
reader=standalone.Reader(parser),
settings_overrides={'halt_level': 6,
'file_insertion_enabled': 0,
'raw_enabled': 0,
'warning_stream': False})
return parts['html_body']
| bsd-3-clause |
balloob/home-assistant | script/hassfest/dependencies.py | 15 | 8331 | """Validate dependencies."""
import ast
from pathlib import Path
from typing import Dict, Set
from homeassistant.requirements import DISCOVERY_INTEGRATIONS
from .model import Integration
class ImportCollector(ast.NodeVisitor):
"""Collect all integrations referenced."""
def __init__(self, integration: Integration):
"""Initialize the import collector."""
self.integration = integration
self.referenced: Dict[Path, Set[str]] = {}
# Current file or dir we're inspecting
self._cur_fil_dir = None
def collect(self) -> None:
"""Collect imports from a source file."""
for fil in self.integration.path.glob("**/*.py"):
if not fil.is_file():
continue
self._cur_fil_dir = fil.relative_to(self.integration.path)
self.referenced[self._cur_fil_dir] = set()
self.visit(ast.parse(fil.read_text()))
self._cur_fil_dir = None
def _add_reference(self, reference_domain: str):
"""Add a reference."""
self.referenced[self._cur_fil_dir].add(reference_domain)
def visit_ImportFrom(self, node):
"""Visit ImportFrom node."""
if node.module is None:
return
if node.module.startswith("homeassistant.components."):
# from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
# from homeassistant.components.logbook import bla
self._add_reference(node.module.split(".")[2])
elif node.module == "homeassistant.components":
# from homeassistant.components import sun
for name_node in node.names:
self._add_reference(name_node.name)
def visit_Import(self, node):
"""Visit Import node."""
# import homeassistant.components.hue as hue
for name_node in node.names:
if name_node.name.startswith("homeassistant.components."):
self._add_reference(name_node.name.split(".")[2])
def visit_Attribute(self, node):
"""Visit Attribute node."""
# hass.components.hue.async_create()
# Name(id=hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
# self.hass.components.hue.async_create()
# Name(id=self)
# .Attribute(attr=hass) or .Attribute(attr=_hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
if (
isinstance(node.value, ast.Attribute)
and node.value.attr == "components"
and (
(
isinstance(node.value.value, ast.Name)
and node.value.value.id == "hass"
)
or (
isinstance(node.value.value, ast.Attribute)
and node.value.value.attr in ("hass", "_hass")
)
)
):
self._add_reference(node.attr)
else:
# Have it visit other kids
self.generic_visit(node)
ALLOWED_USED_COMPONENTS = {
# Internal integrations
"alert",
"automation",
"conversation",
"device_automation",
"frontend",
"group",
"hassio",
"homeassistant",
"input_boolean",
"input_datetime",
"input_number",
"input_select",
"input_text",
"onboarding",
"persistent_notification",
"person",
"script",
"shopping_list",
"sun",
"system_health",
"system_log",
"timer",
"webhook",
"websocket_api",
"zone",
# Entity integrations with platforms
"alarm_control_panel",
"binary_sensor",
"climate",
"cover",
"device_tracker",
"fan",
"humidifier",
"image_processing",
"light",
"lock",
"media_player",
"scene",
"sensor",
"switch",
"vacuum",
"water_heater",
# Other
"mjpeg", # base class, has no reqs or component to load.
"stream", # Stream cannot install on all systems, can be imported without reqs.
}
IGNORE_VIOLATIONS = {
# Has same requirement, gets defaults.
("sql", "recorder"),
# Sharing a base class
("openalpr_cloud", "openalpr_local"),
("lutron_caseta", "lutron"),
("ffmpeg_noise", "ffmpeg_motion"),
# Demo
("demo", "manual"),
("demo", "openalpr_local"),
# This should become a helper method that integrations can submit data to
("websocket_api", "lovelace"),
("websocket_api", "shopping_list"),
"logbook",
}
def calc_allowed_references(integration: Integration) -> Set[str]:
"""Return a set of allowed references."""
allowed_references = (
ALLOWED_USED_COMPONENTS
| set(integration.manifest.get("dependencies", []))
| set(integration.manifest.get("after_dependencies", []))
)
# Discovery requirements are ok if referenced in manifest
for check_domain, to_check in DISCOVERY_INTEGRATIONS.items():
if any(check in integration.manifest for check in to_check):
allowed_references.add(check_domain)
return allowed_references
def find_non_referenced_integrations(
integrations: Dict[str, Integration],
integration: Integration,
references: Dict[Path, Set[str]],
):
"""Find intergrations that are not allowed to be referenced."""
allowed_references = calc_allowed_references(integration)
referenced = set()
for path, refs in references.items():
if len(path.parts) == 1:
# climate.py is stored as climate
cur_fil_dir = path.stem
else:
# climate/__init__.py is stored as climate
cur_fil_dir = path.parts[0]
is_platform_other_integration = cur_fil_dir in integrations
for ref in refs:
# We are always allowed to import from ourselves
if ref == integration.domain:
continue
# These references are approved based on the manifest
if ref in allowed_references:
continue
# Some violations are whitelisted
if (integration.domain, ref) in IGNORE_VIOLATIONS:
continue
# If it's a platform for another integration, the other integration is ok
if is_platform_other_integration and cur_fil_dir == ref:
continue
# These have a platform specified in this integration
if not is_platform_other_integration and (
(integration.path / f"{ref}.py").is_file()
# Platform dir
or (integration.path / ref).is_dir()
):
continue
referenced.add(ref)
return referenced
def validate_dependencies(
integrations: Dict[str, Integration], integration: Integration
):
"""Validate all dependencies."""
# Some integrations are allowed to have violations.
if integration.domain in IGNORE_VIOLATIONS:
return
# Find usage of hass.components
collector = ImportCollector(integration)
collector.collect()
for domain in sorted(
find_non_referenced_integrations(
integrations, integration, collector.referenced
)
):
integration.add_error(
"dependencies",
f"Using component {domain} but it's not in 'dependencies' "
"or 'after_dependencies'",
)
def validate(integrations: Dict[str, Integration], config):
"""Handle dependencies for integrations."""
# check for non-existing dependencies
for integration in integrations.values():
if not integration.manifest:
continue
validate_dependencies(integrations, integration)
if config.specific_integrations:
continue
# check that all referenced dependencies exist
after_deps = integration.manifest.get("after_dependencies", [])
for dep in integration.manifest.get("dependencies", []):
if dep in after_deps:
integration.add_error(
"dependencies",
f"Dependency {dep} is both in dependencies and after_dependencies",
)
if dep not in integrations:
integration.add_error(
"dependencies", f"Dependency {dep} does not exist"
)
| apache-2.0 |
saurabh6790/medsynaptic-app | startup/event_handlers.py | 15 | 2631 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt"
from __future__ import unicode_literals
import webnotes
import home
def on_login_post_session(login_manager):
"""
called after login
update login_from and delete parallel sessions
"""
# Clear previous sessions i.e. logout previous log-in attempts
allow_multiple_sessions = ['demo@erpnext.com', 'Administrator', 'Guest']
if webnotes.session['user'] not in allow_multiple_sessions:
from webnotes.sessions import clear_sessions
clear_sessions(webnotes.session.user, keep_current=True)
# check if account is expired
check_if_expired()
if webnotes.session['user'] not in ('Guest', 'demo@erpnext.com'):
# create feed
from webnotes.utils import nowtime
from webnotes.profile import get_user_fullname
webnotes.conn.begin()
home.make_feed('Login', 'Profile', login_manager.user, login_manager.user,
'%s logged in at %s' % (get_user_fullname(login_manager.user), nowtime()),
login_manager.user=='Administrator' and '#8CA2B3' or '#1B750D')
webnotes.conn.commit()
if webnotes.conn.get_value("Profile", webnotes.session.user, "user_type") == "Website User":
from selling.utils.cart import set_cart_count
set_cart_count()
def on_logout(login_manager):
webnotes._response.set_cookie("cart_count", "")
def check_if_expired():
"""check if account is expired. If expired, do not allow login"""
from webnotes import conf
# check if expires_on is specified
if not 'expires_on' in conf: return
# check if expired
from datetime import datetime, date
expires_on = datetime.strptime(conf.expires_on, '%Y-%m-%d').date()
if date.today() <= expires_on: return
# if expired, stop user from logging in
from webnotes.utils import formatdate
msg = """Oops! Your subscription expired on <b>%s</b>.<br>""" % formatdate(conf.expires_on)
if 'System Manager' in webnotes.user.get_roles():
msg += """Just drop in a mail at <b>support@erpnext.com</b> and
we will guide you to get your account re-activated."""
else:
msg += """Just ask your System Manager to drop in a mail at <b>support@erpnext.com</b> and
we will guide him to get your account re-activated."""
webnotes.msgprint(msg)
webnotes.response['message'] = 'Account Expired'
raise webnotes.AuthenticationError
def on_build():
from home.page.latest_updates import latest_updates
latest_updates.make()
def comment_added(doc):
"""add comment to feed"""
home.make_feed('Comment', doc.comment_doctype, doc.comment_docname, doc.comment_by,
'<i>"' + doc.comment + '"</i>', '#6B24B3')
| agpl-3.0 |
deepakselvaraj/federated-horizon | openstack_dashboard/dashboards/project/containers/tables.py | 3 | 8834 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse # noqa
from django.template.defaultfilters import filesizeformat # noqa
from django.utils import http
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.api import swift
LOG = logging.getLogger(__name__)
def wrap_delimiter(name):
if name and not name.endswith(swift.FOLDER_DELIMITER):
return name + swift.FOLDER_DELIMITER
return name
class ViewContainer(tables.LinkAction):
name = "view"
verbose_name = _("View Details")
url = "horizon:project:containers:container_detail"
classes = ("ajax-modal", "btn-view")
class DeleteContainer(tables.DeleteAction):
data_type_singular = _("Container")
data_type_plural = _("Containers")
success_url = "horizon:project:containers:index"
def delete(self, request, obj_id):
api.swift.swift_delete_container(request, obj_id)
def get_success_url(self, request=None):
"""
Returns the URL to redirect to after a successful action.
"""
current_container = self.table.kwargs.get("container_name", None)
# If the current_container is deleted, then redirect to the default
# completion url
if current_container in self.success_ids:
return self.success_url
return request.get_full_path()
class CreateContainer(tables.LinkAction):
name = "create"
verbose_name = _("Create Container")
url = "horizon:project:containers:create"
classes = ("ajax-modal", "btn-create")
class ListObjects(tables.LinkAction):
name = "list_objects"
verbose_name = _("View Container")
url = "horizon:project:containers:index"
classes = ("btn-list",)
def get_link_url(self, datum=None):
container_name = http.urlquote(datum.name)
args = (wrap_delimiter(container_name),)
return reverse(self.url, args=args)
class UploadObject(tables.LinkAction):
name = "upload"
verbose_name = _("Upload Object")
url = "horizon:project:containers:object_upload"
classes = ("ajax-modal", "btn-upload")
def get_link_url(self, datum=None):
# Usable for both the container and object tables
if getattr(datum, 'container', datum):
# This is a container
container_name = http.urlquote(datum.name)
else:
# This is a table action, and we already have the container name
container_name = self.table.kwargs['container_name']
subfolders = self.table.kwargs.get('subfolder_path', '')
args = (http.urlquote(bit) for bit in
(container_name, subfolders) if bit)
return reverse(self.url, args=args)
def allowed(self, request, datum=None):
if self.table.kwargs.get('container_name', None):
return True
return False
def update(self, request, obj):
# This will only be called for the row, so we can remove the button
# styles meant for the table action version.
self.attrs = {'class': 'ajax-modal'}
def get_size_used(container):
return filesizeformat(container.bytes)
def get_container_link(container):
return reverse("horizon:project:containers:index",
args=(http.urlquote(wrap_delimiter(container.name)),))
class ContainersTable(tables.DataTable):
name = tables.Column("name",
link=get_container_link,
verbose_name=_("Container Name"))
def get_object_id(self, container):
return container.name
class Meta:
name = "containers"
verbose_name = _("Containers")
table_actions = (CreateContainer,)
row_actions = (ViewContainer, DeleteContainer,)
browser_table = "navigation"
footer = False
class ViewObject(tables.LinkAction):
name = "view"
verbose_name = _("View Details")
url = "horizon:project:containers:object_detail"
classes = ("ajax-modal", "btn-view")
def get_link_url(self, obj):
container_name = self.table.kwargs['container_name']
return reverse(self.url, args=(http.urlquote(container_name),
http.urlquote(obj.name)))
class DeleteObject(tables.DeleteAction):
name = "delete_object"
data_type_singular = _("Object")
data_type_plural = _("Objects")
allowed_data_types = ("objects",)
def delete(self, request, obj_id):
obj = self.table.get_object_by_id(obj_id)
container_name = obj.container_name
api.swift.swift_delete_object(request, container_name, obj_id)
class DeleteMultipleObjects(DeleteObject):
name = "delete_multiple_objects"
data_type_singular = _("Object")
data_type_plural = _("Objects")
allowed_data_types = ("objects",)
class CopyObject(tables.LinkAction):
name = "copy"
verbose_name = _("Copy")
url = "horizon:project:containers:object_copy"
classes = ("ajax-modal", "btn-copy")
allowed_data_types = ("objects",)
def get_link_url(self, obj):
container_name = self.table.kwargs['container_name']
return reverse(self.url, args=(http.urlquote(container_name),
http.urlquote(obj.name)))
class DownloadObject(tables.LinkAction):
name = "download"
verbose_name = _("Download")
url = "horizon:project:containers:object_download"
classes = ("btn-download",)
allowed_data_types = ("objects",)
def get_link_url(self, obj):
container_name = self.table.kwargs['container_name']
return reverse(self.url, args=(http.urlquote(container_name),
http.urlquote(obj.name)))
class ObjectFilterAction(tables.FilterAction):
def _filtered_data(self, table, filter_string):
request = table.request
container = self.table.kwargs['container_name']
subfolder = self.table.kwargs['subfolder_path']
prefix = wrap_delimiter(subfolder) if subfolder else ''
self.filtered_data = api.swift.swift_filter_objects(request,
filter_string,
container,
prefix=prefix)
return self.filtered_data
def filter_subfolders_data(self, table, objects, filter_string):
data = self._filtered_data(table, filter_string)
return [datum for datum in data if
datum.content_type == "application/pseudo-folder"]
def filter_objects_data(self, table, objects, filter_string):
data = self._filtered_data(table, filter_string)
return [datum for datum in data if
datum.content_type != "application/pseudo-folder"]
def allowed(self, request, datum=None):
if self.table.kwargs.get('container_name', None):
return True
return False
def sanitize_name(name):
return name.split(swift.FOLDER_DELIMITER)[-1]
def get_size(obj):
if obj.bytes:
return filesizeformat(obj.bytes)
def get_link_subfolder(subfolder):
container_name = subfolder.container_name
return reverse("horizon:project:containers:index",
args=(http.urlquote(wrap_delimiter(container_name)),
http.urlquote(wrap_delimiter(subfolder.name))))
class ObjectsTable(tables.DataTable):
name = tables.Column("name",
link=get_link_subfolder,
allowed_data_types=("subfolders",),
verbose_name=_("Object Name"),
filters=(sanitize_name,))
size = tables.Column(get_size, verbose_name=_('Size'))
class Meta:
name = "objects"
verbose_name = _("Objects")
table_actions = (ObjectFilterAction, UploadObject,
DeleteMultipleObjects)
row_actions = (DownloadObject, CopyObject, ViewObject, DeleteObject)
data_types = ("subfolders", "objects")
browser_table = "content"
footer = False
| apache-2.0 |
arne-cl/alt-mulig | elasticsearch/generate_json_documents.py | 1 | 21510 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# original author: Marko Drotschmann
# changes: Arne Neumann
import json
import hashlib
'''
# an example document for a 'celebrity'
{
# Alter
"age": ,
# ausgedachtes Rating von 1.0 bis 5.0
"rating": 0.0,
# Geburtstag, Format yyyy-mm-dd
"birthday": "",
# Vorname
"firstname": "",
# Nachname
"lastname": "",
# voller Name, inklusive Zwischennamen und eventueller Adels- oder
Doktortitel
"fullname": "",
# die Kategorien, die bei der Wikipedia unten dem Prominenten
zugeordnet sind (z.B. Mann, Frau, Komiker, Mitglied des Bundestages, etc.)
"categories": [
],
# eine Stadt, die mit dem Prominenten verbunden ist, z.B.
Geburtsstadt, derzeitiger Wohnort, etc.
"city": "",
# Koordinaten
"coordinates": {
"lat": ,
"lon":
},
# der erste Absatz mit einführenden Worten zu dem Prominenten
"teaser": "",
# entweder der gesamte Artikel oder nur der Lebenslaufteil aus der
#Wikipedia kopiert
"vita": ""
}
'''
steve_martin_data = {
# Alter
"age": 69,
# ausgedachtes Rating von 1.0 bis 5.0
"rating": 4.2,
# Geburtstag, Format yyyy-mm-dd
"birthday": "1945-08-14",
# Vorname
"firstname": "Steve",
# Nachname
"lastname": "Martin",
# voller Name, inklusive Zwischennamen und eventueller Adels- oder
#Doktortitel
"fullname": "Stephen Glenn Martin",
# die Kategorien, die bei der Wikipedia unten dem Prominenten
#zugeordnet sind (z.B. Mann, Frau, Komiker, Mitglied des Bundestages, etc.)
"categories": [
"Schauspieler",
"Filmproduzent",
"Komiker",
"Autor",
"Literatur (20. Jahrhundert)",
"Literatur (Englisch)",
"US-amerikanischer Musiker",
"Emmy-Preisträger",
"Grammy-Preisträger",
"Oscarpreisträger",
"Mitglied der American Academy of Arts and Sciences",
"US-Amerikaner",
"Geboren 1945",
"Mann",
],
# eine Stadt, die mit dem Prominenten verbunden ist, z.B.
#Geburtsstadt, derzeitiger Wohnort, etc.
"city": "Waco",
# Koordinaten
"coordinates": {
"lat": 31.551388888889,
"lon": 97.155833333333
},
# der erste Absatz mit einführenden Worten zu dem Prominenten
"teaser":
"""Steve Martin (eigentlich: Stephen Glenn Martin; * 14. August 1945 in Waco,
Texas) ist ein US-amerikanischer Komiker, Schriftsteller, Musiker, Produzent
und Schauspieler. 2013 wurde ihm der Ehrenoscar verliehen.""",
# entweder der gesamte Artikel oder nur der Lebenslaufteil aus der
#Wikipedia kopiert
"vita":
"""
Steve Martin (eigentlich: Stephen Glenn Martin; * 14. August 1945 in Waco,
Texas) ist ein US-amerikanischer Komiker, Schriftsteller, Musiker, Produzent
und Schauspieler. 2013 wurde ihm der Ehrenoscar verliehen.
Leben
Nach dem Abschluss der Garden Grove High School im Jahr 1963 studierte Martin
an der California State University einige Semester Philosophie und
Theaterwissenschaften. Nebenbei arbeitete er im Magic Shop von Disneyland, wo
er seine Fähigkeiten im Jonglieren, Zaubern, Banjospielen und Ballontiereformen
entwickelte. Ende der 1960er schrieb er sein erstes Bühnenprogramm, mit dem er
durch zahlreiche kleinere Clubs in Los Angeles tourte.
Martin machte sich in der Branche schnell einen Namen. Bereits 1969 gewann er
als Autor der Smothers Brothers Comedy Hour einen Emmy. 1971 war er festes
Ensemblemitglied der Sonny & Cher-Show. Mit zahlreichen Auftritten in der
Tonight Show mit Johnny Carson wurde er einem breiten Publikum bekannt. Dass er
in dem Bruce-Lee-Film Todesgrüße aus Shanghai sein Leinwanddebüt gegeben haben
soll, ist eine verbreitete Legende. Tatsächlich sieht ihm ein Polizist in
diesem Film nur sehr ähnlich.
1976 moderierte er erstmals die legendäre Comedy-Sendung Saturday Night Live,
für die er in den folgenden zehn Jahren u.a. mit Dan Aykroyd, John Belushi,
Chevy Chase, Eddie Murphy, Bill Murray und Martin Short vor der Kamera stand.
In dem Film Reichtum ist keine Schande, zu dem er auch das Drehbuch schrieb,
spielte Martin 1979 seine erste Hauptrolle. Für den Kurzfilm The Absent-Minded
Waiter erhielt er im selben Jahr eine Oscar-Nominierung. In den 1980er Jahren
war Martin u.a. in der Film-noir-Parodie Tote tragen keine Karos, den jeweils
Golden-Globe-nominierten Komödien Solo für zwei und Roxanne sowie dem
Oscar-nominierten Horror-Musical Der kleine Horrorladen zu sehen. Mit diesen
Filmen wurde er nun auch in Europa bekannt.
1991 wirkte er in L.A. Story und Vater der Braut, einem Remake des
Spencer-Tracy-Klassikers, mit. Das Drama Grand Canyon – Im Herzen der Stadt, in
dem er neben Kevin Kline und Danny Glover die Hauptrolle spielte, gewann auf
der Berlinale den Goldenen Bären. Viel Beachtung fand seine Darstellung in dem
Thriller Die unsichtbare Falle, in dem er in einer seiner wenigen ernsthaften
Rollen zu sehen ist. Für Housesitter und Schlaflos in New York stand Martin
1992 und 1999 gemeinsam mit Goldie Hawn vor der Kamera.
2001 und 2003 moderierte er die Oscarverleihung und wurde dafür mehrfach für
den Emmy nominiert. Auch als Autor ist Martin nach wie vor aktiv. Das
Theaterstück Picasso at the Lapin Agile wurde 1993 in Chicago uraufgeführt und
wird derzeit verfilmt. Seine Bücher, die Kurzgeschichtensammlung Pure Drivel
und der Roman Shopgirl (2005 verfilmt), waren Ende der 90er ebenfalls sehr
erfolgreich. 2003 spielte er die Rolle als Vater von zwölf Kindern in „Im
Dutzend billiger“. 2005 übernahm Steve Martin in einer Neuverfilmung von Der
rosarote Panther die Rolle des Inspektor Clouseau und spielte erneut den Vater
in „Im Dutzend billiger 2“.
2009 war er mit Der rosarote Panther 2 erneut in dieser Rolle zu sehen, zudem
an der Seite von Meryl Streep und Alec Baldwin in der Liebeskomödie It's
Complicated von Nancy Meyers. Im selben Jahr erhielt Martin für seinen
Gastauftritt als Gavin Volure in der gleichnamigen Episode der Serie 30 Rock
seine fünfte Emmy-Nominierung.
Von 1986 bis 1994 war er mit der Schauspielerin Victoria Tennant verheiratet.
Am 28. Juli 2007 heiratete er die Journalistin Anne Stringfield, mit der er
seit Dezember 2012 ein Kind hat.
Martin wurde 2007 mit dem Kennedy-Preis ausgezeichnet.
2010 erhielt er den Grammy in der Kategorie Bestes Bluegrass-Album für The Crow
/ New Songs for the Five-String Banjo. Im selben Jahr wurde er in die American
Academy of Arts and Sciences gewählt.
Am 7. März 2010 führte Martin zusammen mit Alec Baldwin im Kodak Theatre noch
einmal durch die Oscarverleihung. 2014 erhielt er den Ehrenoscar für sein
Lebenswerk.
Filmografie (Auswahl)
• 1970: The Ray Stevens Show
• 1971: The Sonny & Cher Comedy Hour
• 1975: The Smothers Brothers Show
• 1976: Johnny Cash And Friends
• 1976: Saturday Night Live
• 1977: The Absent-Minded Waiter (Kurzfilm)
• 1977: The Muppet Show
• 1978: Sgt. Pepper’s Lonely Hearts Club Band
• 1979: Reichtum ist keine Schande (The Jerk)
• 1981: Tanz in den Wolken (Pennies from Heaven)
• 1982: Tote tragen keine Karos (Dead Men Don’t Wear Plaid)
• 1983: Der Mann mit zwei Gehirnen (The Man with Two Brains)
• 1984: Solo für 2 (All of Me)
• 1984: Ein Single kommt selten allein (The Lonely Guy)
• 1986: Der kleine Horrorladen (Little Shop of Horrors)
• 1986: Drei Amigos! (¡Three Amigos!)
• 1987: Die Tracey Ullman Show
• 1987: Roxanne
• 1987: Ein Ticket für Zwei (Planes, Trains & Automobiles)
• 1988: Zwei hinreißend verdorbene Schurken (Dirty Rotten Scoundrels)
• 1989: Eine Wahnsinnsfamilie (Parenthood)
• 1990: Das Schlitzohr von der Mafia (My Blue Heaven)
• 1991: Grand Canyon – Im Herzen der Stadt
• 1991: Vater der Braut (Father of the Bride)
• 1991: L.A. Story
• 1992: Housesitter – Lügen haben schöne Beine (House Sitter)
• 1992: Der Schein-Heilige (Leap of Faith)
• 1993: … und das Leben geht weiter (And the Band Played On)
• 1994: Der Zufalls-Dad (A Simple Twist of Fate)
• 1994: Lifesavers – Die Lebensretter (Mixed Nuts)
• 1995: Ein Geschenk des Himmels – Vater der Braut 2 (Father of the Bride
Part II)
• 1996: Immer Ärger mit Sergeant Bilko (Sgt. Bilko)
• 1997: Die unsichtbare Falle (The Spanish Prisoner)
• 1998: Der Prinz von Ägypten (The Prince of Egypt) – Stimme
• 1999: Bowfingers große Nummer (Bowfinger)
• 1999: Schlaflos in New York (The Out-of-Towners)
• 2001: Fantasia 2000 – Sprecher
• 2001: Novocaine – Zahn um Zahn (Novocaine)
• 2003: Haus über Kopf (Bringing Down the House)
• 2003: Looney Tunes: Back in Action
• 2003: Im Dutzend billiger (Cheaper by the Dozen)
• 2005: Shopgirl
• 2005: Im Dutzend billiger 2 – Zwei Väter drehen durch (Cheaper by the Dozen
2)
• 2006: Der rosarote Panther (The Pink Panther)
• 2008: Baby Mama
• 2009: Der rosarote Panther 2 (The Pink Panther deux)
• 2009: Wenn Liebe so einfach wäre (It’s Complicated)
• 2011: Ein Jahr vogelfrei! (The Big Year)
Diskografie
• Let’s Get Small (1977)
• A Wild and Crazy Guy, (1978)
• Comedy Is Not Pretty! (1979)
• The Steve Martin Brothers (1981)
• Born Standing Up (2007)
• The Crow: New Songs for the 5-String Banjo (2009), Grammy
• Rare Bird Alert (2011), zusammen mit den Steep Canyon Rangers
• Love Has Come for You (2013), mit Edie Brickell, Grammy (für den Titelsong)
Literarische Veröffentlichungen
• Cruel shoes. G. P. Putnam’s Sons, New York 1979, ISBN 978-0-399-12304-7
(Kurzgeschichten)
• Picasso at the lapin agile and other plays. Grove Press, New York 1997,
ISBN 978-0-8021-3523-0 (Theaterstücke)
• Sehr erfreut, meine Bekanntschaft zu machen. Manhattan 2004, ISBN
3-442-54574-9 (mit Detlev Ullrich)
• Shopgirl. Hyperion, New York 2001, ISBN 978-0-7868-8568-8 (Erzählung)
• Blanker Unsinn. Goldmann, München 2002, ISBN 978-3-442-45152-4
(Kurzgeschichten)
Sonstiges
Die deutsche Synchronstimme von Steve Martin sprechen unter anderem Norbert
Gescher und Eckart Dux.
"""
}
steve_martin = ('comedian', steve_martin_data)
bill_callahan_data = {
# Alter
"age": 49,
# ausgedachtes Rating von 1.0 bis 5.0
"rating": 5.0,
# Geburtstag, Format yyyy-mm-dd
"birthday": "1966-01-01",
# Vorname
"firstname": "Bill",
# Nachname
"lastname": "Callahan",
# voller Name, inklusive Zwischennamen und eventueller Adels- oder
#Doktortitel
"fullname": "Bill Callahan",
# die Kategorien, die bei der Wikipedia unten dem Prominenten
#zugeordnet sind (z.B. Mann, Frau, Komiker, Mitglied des Bundestages, etc.)
"categories": [
"Alternative-Country-Musiker",
"US-amerikanischer Musiker",
"Geboren 1966",
"Mann"
],
# eine Stadt, die mit dem Prominenten verbunden ist, z.B.
#Geburtsstadt, derzeitiger Wohnort, etc.
"city": "Silver Spring",
# Koordinaten
"coordinates": {
"lat": 39.004166666667,
"lon": 77.018888888889
},
# der erste Absatz mit einführenden Worten zu dem Prominenten
"teaser":
"""
Bill Callahan (* 1966 in Silver Spring, Maryland, USA) ist ein
US-amerikanischer Sänger und Songwriter, der mit seinen anfänglich mit
einfachster Produktion auf Vier-Spur-Rekordern aufgenommenen Songs als einer
der Vorreiter des Lo-Fi gilt. Seit 1991 veröffentlicht er beim Label Drag City.
Er trat zunächst unter den Namen "Smog" bzw. "(Smog)", seit 2007 aber unter
seinem bürgerlichen Namen in Erscheinung. Callahan lebt derzeit in Austin,
Texas.
""",
# entweder der gesamte Artikel oder nur der Lebenslaufteil aus der
#Wikipedia kopiert
"vita":
"""
Bill Callahan (* 1966 in Silver Spring, Maryland, USA) ist ein
US-amerikanischer Sänger und Songwriter, der mit seinen anfänglich mit
einfachster Produktion auf Vier-Spur-Rekordern aufgenommenen Songs als einer
der Vorreiter des Lo-Fi gilt. Seit 1991 veröffentlicht er beim Label Drag City.
Er trat zunächst unter den Namen "Smog" bzw. "(Smog)", seit 2007 aber unter
seinem bürgerlichen Namen in Erscheinung. Callahan lebt derzeit in Austin,
Texas.
Karriere
Obwohl Callahan in Maryland geboren wurde, verbrachte seine Familie acht Jahre
in Knaresborough, North Yorkshire, und kehrte bloß für die Jahre 1969-1973 nach
Maryland zurück.
Callahans erste Veröffentlichungen erschienen unter dem Bandnamen "Smog" auf
Audiocassetten. Diese waren von kargen Melodien und dissonanten Arrangements
geprägt. Damit entsprachen sie in etwa Callahans damaligen instrumentalen und
produktionstechnischen Möglichkeiten. Sein Debütalbum "Sewn to the Sky" erregte
durch verstimmte Gitarren und repetitive Strukturen erstes Aufsehen und
erinnerte an Arbeiten von Jandek oder Daniel Johnston. Mit Beginn der Arbeit
bei Drag City erweiterte sich das musikalische Potential seiner Musik, die z.
B. von John McEntire und Jim O'Rourke produziert wurde. Dabei entwickelte
Callahan nicht nur seine lyrischen, oft schwarzhumorigen Fähigkeiten, auch
seine Arrangements wurden zunächst reichhaltiger. Zwischen 2001 und 2003 nannte
er die Band "(Smog)" und kehrte zu einfacheren Produktionen zurück, ohne aber
die textliche Raffinesse, für die er mittlerweile stand, aufzugeben.
2006 entschloss er sich zur Nutzung seines bürgerlichen Namens und ließ das
Pseudonym "Smog" fallen. So erschien 2007 mit "Woke on a Whaleheart" das erste
Studioalbum als Bill Callahan, 2009 folgte Sometimes I Wish We Were an Eagle.
Im Jahr 2011 erschien das Album "Apocalypse" und 2013 "Dreamriver".
Veröffentlichungen
Alben
als Smog bzw. (Smog)
• "Forgotten Foundation", 1992/1996
• "Burning Kingdom", 1994
• "Julius Caesar", 1994
• "Wild Love", 1995
• "Sewn to the Sky", 1995 (Wiederveröffentlichung)
• "The Doctor Came at Dawn", 1996
• "Red Apple Falls", 1997
• "Knock Knock", 1999
• "Dongs of Sevotion", 2000
• "Rain on Lens", 2001
• "Accumulation: None", 2002
• "Supper", 2003
• "A River Ain't Too Much to Love", 2005
als Bill Callahan
• Woke on a Whaleheart, 2007
• Sometimes I Wish We Were an Eagle, 2009
• Apocalypse, 2011
• Dream River, 2013
• Have Fun with God, 2014
Andere Formate
als Smog bzw. (Smog)
• "Macrame Gunplay" (Cassette), 1988
• "Cow" (Cassette), 1989
• "A Table Setting" (Cassette), 1990
• "Tired Tape Machine" (Cassette), 1990
• "Sewn to the Sky" (Cassette), 1990
• "Floating" (EP), 1991
• "A Hit" (Single), 1994
• "Kicking a Couple Around" (EP), 1994
• "Came Blue" (Single), 1997
• "Ex-con" (Single), 1997
• "Cold-Blooded Old Times" (EP), 1999
• "Look Now" (Single), 1999
• "Strayed" (Single), 2000
• "'Neath the Puke Tree" (EP), 2000
• "The Manta Rays of Time" (EP), 2000
• "Rock Bottom Riser", 2006
als Bill Callahan
• "Diamond Dancer" (Single), 2007
• "Rough Travel for a Rare Thing", (Live-Album, nur Vinyl oder Download),
2010
Bücher
Callahan veröffentlichte im Jahr 2004 drei Bücher mit Zeichnungen: Ballerina
Scratchpad, The Death's Head Drawings und Women. Im Juli 2010 veröffentlichte
Drag City auch seinen Briefroman Letters to Emma Bowlcut.
"""
}
bill_callahan = ('musician', bill_callahan_data)
jon_gnarr_data = {
# Alter
"age": 48,
# ausgedachtes Rating von 1.0 bis 5.0
"rating": 1.4,
# Geburtstag, Format yyyy-mm-dd
"birthday": "1967-01-02",
# Vorname
"firstname": "Jón",
# Nachname
"lastname": "Gnarr",
# voller Name, inklusive Zwischennamen und eventueller Adels- oder
#Doktortitel
"fullname": "Jón Gunnar Kristinsson",
# die Kategorien, die bei der Wikipedia unten dem Prominenten
#zugeordnet sind (z.B. Mann, Frau, Komiker, Mitglied des Bundestages, etc.)
"categories": [
"Komiker",
"Autor",
"Schriftsteller (Reykjavík)",
"Bürgermeister (Reykjavík)",
"Mitglied von Björt framtíð",
"Isländer",
"Geboren 1967",
"Mann",
],
# eine Stadt, die mit dem Prominenten verbunden ist, z.B.
#Geburtsstadt, derzeitiger Wohnort, etc.
"city": "Reykjavík",
# Koordinaten
"coordinates": {
"lat": 64.15,
"lon": 21.933333333333
},
# der erste Absatz mit einführenden Worten zu dem Prominenten
"teaser":
"""
Jón Gnarr (* 2. Januar 1967 in Reykjavík als Jón Gunnar Kristinsson) ist ein
isländischer Komiker, Musiker, Schriftsteller und Politiker. Von Juni 2010 bis
zum 17. Juni 2014 war er Bürgermeister Reykjavíks, der Hauptstadt Islands.
""",
# entweder der gesamte Artikel oder nur der Lebenslaufteil aus der
#Wikipedia kopiert
"vita":
"""
Jón Gnarr (* 2. Januar 1967 in Reykjavík als Jón Gunnar Kristinsson) ist ein
isländischer Komiker, Musiker, Schriftsteller und Politiker. Von Juni 2010 bis
zum 17. Juni 2014 war er Bürgermeister Reykjavíks, der Hauptstadt Islands.
Leben
Jón Gnarr − jüngster ehelicher Sohn eines Polizeibeamten (* 1917) und einer
Arbeiterin (* 1922; † 25. Dezember 2010) − verließ im Alter von vierzehn Jahren
ohne Abschluss eine Reykjavíker Schule, die kein herkömmliches Klassensystem
und keine Noten kannte. Seit dieser Zeit nennt er sich Jón Gnarr.^[1] Er
besuchte dann für zwei Jahre ein Internat für schwer erziehbare Jugendliche.
Nach diversen Gelegenheitsjobs arbeitete er zunächst als Pfleger in einem Heim
für geistig und körperlich Behinderte. Mit neunzehn Jahren schrieb er den Roman
Miðnætursólborgin („Die Stadt der Mitternachtssonne“).
Jón ist mit der Masseurin Jóhanna Jóhannsdóttir (Jóga), Tochter eines
Seemannes, verheiratet. Das Ehepaar hat fünf Kinder: das älteste Kind wurde
1985 und das jüngste 2005 geboren.^[2] Im Jahr 2006 schrieb Jón Gnarr eine
fiktive Autobiografie mit dem Titel Indjáninn („Der Indianer“), die er 2012 mit
Sjóræninginn („Der Seeräuber“) fortsetzte.
Seine Karriere als Komiker begann Jón Gnarr Anfang der 1990er Jahre beim
öffentlich-rechtlichen Rundfunk Ríkisútvarpið mit der Radio-Sitcom Hotel
Volkswagen. Später arbeitete er für einen privaten Sender, für den er
morgendliche „Interviews“ führte, und trat in Hörfunk, Fernsehen und in
Spielfilmen auf. Neben der Sketch-Show Fóstbræður (1997–2001) wurde er vor
allem durch seine Rolle als Georg Bjarnfreðarson in den drei Serien Næturvaktin
(2007), Dagvaktin (2008) und Fangavaktin (2009) bekannt. Jón Gnarr war laut
Munzinger-Archiv Bassist der Punkrockband Nefrennsli („laufende Nasen“)^[3],
obwohl er laut Henryk M. Broder „kein Instrument spielen und Musik nicht
leiden“ kann^[4] und wirkte in mehreren Filmen, Sitcoms und Talkshows mit.
Ferner veröffentlicht er Prosa und Lyrik.
Im Jahr 2009 − während der isländischen Finanzkrise − arbeitete Jón Gnarr in
einer Werbeagentur als Creative Director. Nachdem er sich von dieser Agentur
getrennt hatte, konzentrierte er seine Arbeit auf die Politik.^[5] Bei der
Kommunalwahl in Reykjavík am 29. Mai 2010 erzielte Jóns Partei Besti flokkurinn
mit 34,7 % die meisten Stimmen.^[6] Nach dem Wahlsieg übernahm Jón am 15. Juni
2010 von der bisherigen Bürgermeisterin Hanna Birna Kristjánsdóttir das Amt des
Bürgermeisters.^[7] Auf der Liste der Partei kandidierten Musiker,
Schauspieler, Comic-Zeichner und weitere Prominente.^[8] Zum Wahlprogramm
gehörten folgende Punkte:^[9]
1. Offene statt heimliche Korruption.
2. Kostenlose Handtücher für alle Schwimmbäder.
3. Ein Eisbär für Reykjavíks Zoo.
Auf nationaler Ebene ist Jón Gnarr Mitglied im Vorstand der 2012 gegründeten
Partei Björt framtíð,^[10] die in enger Verbindung mit Besti flokkurinn steht.
Im Oktober 2013 gab Jón Gnarr bekannt, dass er keine zweite Amtszeit als
Bürgermeister von Reykjavík anstreben wird.^[11] Seine Amtszeit endete am 17.
Juni 2014. Jóns Nachfolger wurde sein bisheriger Koalitionspartner Dagur B.
Eggertsson von der sozialdemokratischen Allianz (Samfylkingin). Dagur äußerte
anlässlich der Amtsübergabe, „die ganze Gesellschaft“ habe von Jón Gnarr
gelernt.^[12]
Während der Amtszeit von Jón Gnarr wurde die Online-Plattform Betri Reykjavik
(Better Reykjavik) als eine Möglichkeit direkter Demokratie etabliert.^[13]
Jón Gnarr konvertierte vom evangelischen zum römisch-katholischen Glauben,^[4]
bezeichnet sich aber als Atheisten ^[14] und Anarchisten.^[15]
Veröffentlichungen
Isländische Sprache
• Miðnætursólborgin. Smekkleysa, Reykjavík 1989.
• Plebbabókin. Mál og menning, Reykjavík 2002, ISBN 9979-3-2373-6.
• Þankagangur. Skálholtsútgáfan, Reykjavík 2005, ISBN 9979-792-06-X.
• Indjáninn. Skálduð ævisaga. Mál og menning, Reykjavík 2006, ISBN
9979-3-2792-8.
• Sjóræninginn. Skálduð ævisaga. Mál og menning, Reykjavík 2012, ISBN
978-9979-3-3320-3.
Deutsche Sprache
• Hören Sie gut zu und wiederholen Sie!!! Wie ich einmal Bürgermeister wurde
und die Welt veränderte. Mitarbeit: Jóhann Ævar Grímsson. Aus dem
Isländischen von Betty Wahl. Klett-Cotta, Stuttgart 2014, ISBN
978-3-608-50322-7.
Film
• Jón Gnarr - Mein Reykjavik. Dokumentarfilm, Österreich, 2014, 51:20 Min.,
Buch und Regie: Günter Schilhan, Produktion: ORF, 3sat, Reihe: Meine Stadt,
Erstsendung: 15. Dezember 2014 bei 3sat, Inhaltsangabe von 3sat.
"""
}
jon_gnarr = ('politician', jon_gnarr_data)
with open("output.jsonl", "w") as output:
for star_type, star_data in (steve_martin, bill_callahan, jon_gnarr):
# generate a doc_id dependent on the star's fullname
doc_id = hashlib.sha1()
doc_id.update(star_data["fullname"])
# this is the index directive for the bulk API we will be using
output.write('{"index":{"_index":"people","_type":"%s","_id":"%s"}}\n'
% (star_type, doc_id.hexdigest()))
output.write(json.dumps(star_data))
output.write("\n")
| gpl-3.0 |
unreal666/outwiker | plugins/source/source/pygments/lexers/forth.py | 4 | 7179 | # -*- coding: utf-8 -*-
"""
pygments.lexers.forth
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Forth language.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Error, Punctuation, Literal, Token, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
__all__ = ['ForthLexer']
class ForthLexer(RegexLexer):
"""
Lexer for Forth files.
.. versionadded:: 2.2
"""
name = 'Forth'
aliases = ['forth']
filenames = ['*.frt', '*.fs']
mimetypes = ['application/x-forth']
delimiter = r'\s'
delimiter_end = r'(?=[%s])' % delimiter
valid_name_chars = r'[^%s]' % delimiter
valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
# All comment types
(r'\\.*?\n', Comment.Single),
(r'\([\s].*?\)', Comment.Single),
# defining words. The next word is a new command name
(r'(:|variable|constant|value|buffer:)(\s+)',
bygroups(Keyword.Namespace, Text), 'worddef'),
# strings are rather simple
(r'([.sc]")(\s+?)', bygroups(String, Text), 'stringdef'),
# keywords from the various wordsets
# *** Wordset BLOCK
(r'(blk|block|buffer|evaluate|flush|load|save-buffers|update|'
# *** Wordset BLOCK-EXT
r'empty-buffers|list|refill|scr|thru|'
# *** Wordset CORE
r'\#s|\*\/mod|\+loop|\/mod|0<|0=|1\+|1-|2!|'
r'2\*|2\/|2@|2drop|2dup|2over|2swap|>body|'
r'>in|>number|>r|\?dup|abort|abort\"|abs|'
r'accept|align|aligned|allot|and|base|begin|'
r'bl|c!|c,|c@|cell\+|cells|char|char\+|'
r'chars|constant|count|cr|create|decimal|'
r'depth|do|does>|drop|dup|else|emit|environment\?|'
r'evaluate|execute|exit|fill|find|fm\/mod|'
r'here|hold|i|if|immediate|invert|j|key|'
r'leave|literal|loop|lshift|m\*|max|min|'
r'mod|move|negate|or|over|postpone|quit|'
r'r>|r@|recurse|repeat|rot|rshift|s\"|s>d|'
r'sign|sm\/rem|source|space|spaces|state|swap|'
r'then|type|u\.|u\<|um\*|um\/mod|unloop|until|'
r'variable|while|word|xor|\[char\]|\[\'\]|'
r'@|!|\#|<\#|\#>|:|;|\+|-|\*|\/|,|<|>|\|1\+|1-|\.|'
# *** Wordset CORE-EXT
r'\.r|0<>|'
r'0>|2>r|2r>|2r@|:noname|\?do|again|c\"|'
r'case|compile,|endcase|endof|erase|false|'
r'hex|marker|nip|of|pad|parse|pick|refill|'
r'restore-input|roll|save-input|source-id|to|'
r'true|tuck|u\.r|u>|unused|value|within|'
r'\[compile\]|'
# *** Wordset CORE-EXT-obsolescent
r'\#tib|convert|expect|query|span|'
r'tib|'
# *** Wordset DOUBLE
r'2constant|2literal|2variable|d\+|d-|'
r'd\.|d\.r|d0<|d0=|d2\*|d2\/|d<|d=|d>s|'
r'dabs|dmax|dmin|dnegate|m\*\/|m\+|'
# *** Wordset DOUBLE-EXT
r'2rot|du<|'
# *** Wordset EXCEPTION
r'catch|throw|'
# *** Wordset EXCEPTION-EXT
r'abort|abort\"|'
# *** Wordset FACILITY
r'at-xy|key\?|page|'
# *** Wordset FACILITY-EXT
r'ekey|ekey>char|ekey\?|emit\?|ms|time&date|'
# *** Wordset FILE
r'BIN|CLOSE-FILE|CREATE-FILE|DELETE-FILE|FILE-POSITION|'
r'FILE-SIZE|INCLUDE-FILE|INCLUDED|OPEN-FILE|R\/O|'
r'R\/W|READ-FILE|READ-LINE|REPOSITION-FILE|RESIZE-FILE|'
r'S\"|SOURCE-ID|W/O|WRITE-FILE|WRITE-LINE|'
# *** Wordset FILE-EXT
r'FILE-STATUS|FLUSH-FILE|REFILL|RENAME-FILE|'
# *** Wordset FLOAT
r'>float|d>f|'
r'f!|f\*|f\+|f-|f\/|f0<|f0=|f<|f>d|f@|'
r'falign|faligned|fconstant|fdepth|fdrop|fdup|'
r'fliteral|float\+|floats|floor|fmax|fmin|'
r'fnegate|fover|frot|fround|fswap|fvariable|'
r'represent|'
# *** Wordset FLOAT-EXT
r'df!|df@|dfalign|dfaligned|dfloat\+|'
r'dfloats|f\*\*|f\.|fabs|facos|facosh|falog|'
r'fasin|fasinh|fatan|fatan2|fatanh|fcos|fcosh|'
r'fe\.|fexp|fexpm1|fln|flnp1|flog|fs\.|fsin|'
r'fsincos|fsinh|fsqrt|ftan|ftanh|f~|precision|'
r'set-precision|sf!|sf@|sfalign|sfaligned|sfloat\+|'
r'sfloats|'
# *** Wordset LOCAL
r'\(local\)|to|'
# *** Wordset LOCAL-EXT
r'locals\||'
# *** Wordset MEMORY
r'allocate|free|resize|'
# *** Wordset SEARCH
r'definitions|find|forth-wordlist|get-current|'
r'get-order|search-wordlist|set-current|set-order|'
r'wordlist|'
# *** Wordset SEARCH-EXT
r'also|forth|only|order|previous|'
# *** Wordset STRING
r'-trailing|\/string|blank|cmove|cmove>|compare|'
r'search|sliteral|'
# *** Wordset TOOLS
r'.s|dump|see|words|'
# *** Wordset TOOLS-EXT
r';code|'
r'ahead|assembler|bye|code|cs-pick|cs-roll|'
r'editor|state|\[else\]|\[if\]|\[then\]|'
# *** Wordset TOOLS-EXT-obsolescent
r'forget|'
# Forth 2012
r'defer|defer@|defer!|action-of|begin-structure|field:|buffer:|'
r'parse-name|buffer:|traverse-wordlist|n>r|nr>|2value|fvalue|'
r'name>interpret|name>compile|name>string|'
r'cfield:|end-structure)'+delimiter, Keyword),
# Numbers
(r'(\$[0-9A-F]+)', Number.Hex),
(r'(\#|%|&|\-|\+)?[0-9]+', Number.Integer),
(r'(\#|%|&|\-|\+)?[0-9.]+', Keyword.Type),
# amforth specific
(r'(@i|!i|@e|!e|pause|noop|turnkey|sleep|'
r'itype|icompare|sp@|sp!|rp@|rp!|up@|up!|'
r'>a|a>|a@|a!|a@+|a@-|>b|b>|b@|b!|b@+|b@-|'
r'find-name|1ms|'
r'sp0|rp0|\(evaluate\)|int-trap|int!)' + delimiter,
Name.Constant),
# a proposal
(r'(do-recognizer|r:fail|recognizer:|get-recognizers|'
r'set-recognizers|r:float|r>comp|r>int|r>post|'
r'r:name|r:word|r:dnum|r:num|recognizer|forth-recognizer|'
r'rec:num|rec:float|rec:word)' + delimiter, Name.Decorator),
# defining words. The next word is a new command name
(r'(Evalue|Rvalue|Uvalue|Edefer|Rdefer|Udefer)(\s+)',
bygroups(Keyword.Namespace, Text), 'worddef'),
(valid_name, Name.Function), # Anything else is executed
],
'worddef': [
(r'\S+', Name.Class, '#pop'),
],
'stringdef': [
(r'[^"]+', String, '#pop'),
],
}
| gpl-3.0 |
mikalstill/nova | nova/tests/unit/compute/test_compute_xen.py | 3 | 3052 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for expectations of behaviour from the Xen driver."""
import mock
from nova.compute import manager
from nova.compute import power_state
from nova import context
from nova import objects
from nova.objects import instance as instance_obj
from nova.tests.unit.compute import eventlet_utils
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import vm_utils
class ComputeXenTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(ComputeXenTestCase, self).setUp()
self.flags(compute_driver='xenapi.XenAPIDriver')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self, stubs.FakeSessionForVMTests)
self.compute = manager.ComputeManager()
# execute power syncing synchronously for testing:
self.compute._sync_power_pool = eventlet_utils.SyncPool()
def test_sync_power_states_instance_not_found(self):
db_instance = fake_instance.fake_db_instance()
ctxt = context.get_admin_context()
instance_list = instance_obj._make_instance_list(ctxt,
objects.InstanceList(), [db_instance], None)
instance = instance_list[0]
@mock.patch.object(vm_utils, 'lookup')
@mock.patch.object(objects.InstanceList, 'get_by_host')
@mock.patch.object(self.compute.driver, 'get_num_instances')
@mock.patch.object(self.compute, '_sync_instance_power_state')
def do_test(mock_compute_sync_powerstate,
mock_compute_get_num_instances,
mock_instance_list_get_by_host,
mock_vm_utils_lookup):
mock_instance_list_get_by_host.return_value = instance_list
mock_compute_get_num_instances.return_value = 1
mock_vm_utils_lookup.return_value = None
self.compute._sync_power_states(ctxt)
mock_instance_list_get_by_host.assert_called_once_with(
ctxt, self.compute.host, expected_attrs=[], use_slave=True)
mock_compute_get_num_instances.assert_called_once_with()
mock_compute_sync_powerstate.assert_called_once_with(
ctxt, instance, power_state.NOSTATE, use_slave=True)
mock_vm_utils_lookup.assert_called_once_with(
self.compute.driver._session, instance['name'],
False)
do_test()
| apache-2.0 |
hootnot/postcode-api-wrapper | postcodepy/postcodepy.py | 1 | 8659 | """Postcode API module."""
import json
import requests
class EndpointsMixin(object):
"""EndpointsMixin - API endpoints for the API class.
each endpoint of the API has a representative method in EndpointsMixin
Parameters that apply to the API url just need to be passed
as a keyword argument.
"""
def get_postcodedata(self, postcode, nr, addition="", **params):
"""get_postcodedata - fetch information for 'postcode'.
Parameters
----------
postcode : string
The full (dutch) postcode
nr : int
The housenumber
addition : string (optional)
the extension to a housenumber
params : dict (optional)
a list of parameters to send with the request.
returns :
a response dictionary
"""
endpoint = 'rest/addresses/%s/%s' % (postcode, nr)
if addition:
endpoint += '/' + addition
retValue = self._API__request(endpoint, params=params)
# then it should match the houseNumberAdditions
if addition and addition.upper() not in \
[a.upper() for a in retValue['houseNumberAdditions']]:
raise PostcodeError(
"ERRHouseNumberAdditionInvalid",
{"exceptionId": "ERRHouseNumberAdditionInvalid",
"exception": "Invalid housenumber addition: '%s'" %
retValue['houseNumberAddition'],
"validHouseNumberAdditions":
retValue['houseNumberAdditions']})
return retValue
def get_signalcheck(self, sar, **params):
"""get_signalcheck - perform a signal check.
Parameters
----------
sar : dict
signal-api-request specified as a dictionary of parameters.
All of these parameters are optional. For details
check https://api.postcode.nl/documentation/signal-api-example.
returns :
a response dictionary
"""
params = sar
endpoint = 'rest/signal/check'
# The 'sar'-request dictionary should be sent as valid JSON data, so
# we need to convert it to JSON
# when we construct the request in API.request
retValue = self._API__request(endpoint, 'POST',
params=params, convJSON=True)
return retValue
class API(EndpointsMixin, object):
"""API - postcode API class."""
def __init__(self, environment="practice", access_key=None,
access_secret=None, headers=None):
"""Instantiate API wrapper.
Parameters
----------
environment : str
the environment to use. Currently only 'live'.
access_key : str
the access key provided by postcode.nl . If not provided
an ERRauthAccessUnknownKey exception is raised
access_secret : str
the access secret provided by postcode.nl . If not provided
an ERRauthAccessUnknownSecret exception is raised
headers : dict
optional headers to set
returns :
a response dictionary
"""
if environment == 'practice':
raise PostcodeError("ERRnoPractice")
elif environment == 'live':
self.api_url = 'https://api.postcode.nl'
self.client = requests.Session()
if not access_key:
raise PostcodeError("ERRauthAccessUnknownKey")
if not access_secret:
raise PostcodeError("ERRauthAccessUnknownSecret")
if headers:
self.client.headers.update(headers)
# Enable basic authentication
self.client.auth = (access_key, access_secret)
def __request(self, endpoint, method='GET', params=None, convJSON=False):
"""request - Returns dict of response from postcode.nl API.
This method is called only by the EndpointMixin methods.
"""
url = '%s/%s' % (self.api_url, endpoint)
method = method.lower()
params = params or {}
if convJSON:
params = json.dumps(params)
func = getattr(self.client, method)
request_args = {}
if method == 'get':
request_args['params'] = params
else:
request_args['data'] = params
try:
# Normally some valid HTTP-response will be the case
# if not some exception regarding the request / connection has
# occurred
# this will be one of the exceptions of the request module
# if so, we will a PostcodeError exception and pass the request
# exception message
response = func(url, **request_args)
except requests.RequestException as e:
raise PostcodeError("ERRrequest", {"exception": e.__doc__})
content = response.content.decode('utf-8')
content = json.loads(content)
if response.status_code == 200:
return content
# Errors, otherwise we did not get here ...
if 'exceptionId' in content:
raise PostcodeError(content['exceptionId'], content)
raise PostcodeError("UnknownExceptionFromPostcodeNl")
class PostcodeError(Exception):
"""PostcodeError - Generic error class, catches response errors."""
__eid = [
# Request exceptions
'ERRrequest',
# Module exceptions
'ERRnoPractice',
'ERRauthAccessUnknownKey',
'ERRauthAccessUnknownSecret',
# API exceptions
'PostcodeNl_Controller_Plugin_HttpBasicAuthentication_Exception',
'PostcodeNl_Controller_Plugin_HttpBasicAuthentication_NotAuthorizedException',
'PostcodeNl_Api_RestClient_AuthenticationException',
'PostcodeNl_Controller_Plugin_HttpBasicAuthentication_PasswordNotCorrectException',
'React_Controller_Action_InvalidParameterException',
'PostcodeNl_Controller_Address_InvalidPostcodeException',
'PostcodeNl_Controller_Address_InvalidHouseNumberException',
'PostcodeNl_Controller_Address_NoPostcodeSpecifiedException',
'PostcodeNl_Controller_Address_NoHouseNumberSpecifiedException',
'PostcodeNl_Controller_Address_PostcodeTooLongException',
'React_Model_Property_Validation_Number_ValueTooHighException',
'PostcodeNl_Service_PostcodeAddress_AddressNotFoundException',
#
'ERRHouseNumberAdditionInvalid',
# NEEDS TO BE LAST !
'ERRUnknownExceptionFromPostcodeNl',
]
def __init__(self, exceptionId, response_data=None):
"""instantiate PostcodeError instance.
Parameters
----------
exceptionId : str
the id of the exception. It should match one of the known exception
id's. If it does not match it is set to:
ERRUnknownExceptionFromPostcodeNl
response_data : data
the data received at the moment the exception occurred
"""
if exceptionId in self.__eid:
self.exceptionId = exceptionId
else:
self.exceptionId = self.__eid[-1]
self.response_data = response_data
self.msg = ""
# add additional data if we have it
if response_data and "exception" in response_data:
self.msg += response_data['exception']
super(PostcodeError, self).__init__(self.msg)
# ----------------------------------------------------------------------
if __name__ == "__main__": # pragma: no cover
import sys
import os
# First and third are OK, the 2nd is not OK and raises an Exception
# the exception is written to stderr
api = API(environment='live', access_key=os.getenv("ACCESS_KEY"),
access_secret=os.getenv("ACCESS_SECRET"))
for pc in [('1071XX', 1),
('1077XX', 1),
('7514BP', 129),
('7514BP', 129, 'A'),
('7514BP', 129, 'a'),
('7514BP', 129, 'b'),
]:
try:
retValue = api.get_postcodedata(*pc)
print("\nresults for: {}".format(str(pc)))
print(json.dumps(retValue, sort_keys=True, indent=2))
except PostcodeError as e:
sys.stderr.write("{}, {}, {}".format(
str(pc), e.exceptionId,
json.dumps(e.response_data,
sort_keys=True,
indent=2)))
| mit |
rstebbing/powerlifting-meet-manager | table.py | 1 | 24349 | ##########################################
# File: table.py #
# Copyright Richard Stebbing 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
from PyQt4 import QtCore, QtGui
from collections import namedtuple
from lifter import Lifter, LifterCollection
import wilks
import pickle_
from string import Template
# Setup logger
from log import getLogger
logger = getLogger('qt')
# Section
Section = namedtuple('Section', 'attribute heading format conversion is_lift')
# Globals
HTML_TEMPLATE = Template(
'''<style type="text/css">
table.results
{
font-family:sans-serif;
border-collapse:collapse;
}
table.results td, th
{
font-size:1.0em;
border:1px solid black;
padding:3px 7px 2px 7px;
}
table.results th
{
font-size:1.0em;
font-weight:bold;
text-align:left;
padding-top:5px;
padding-bottom:4px;
background-color:#A9BBFF;
color:black;
}
table.results tr.alt td
{
background-color:#E0E0E0;
}
h1.results
{
font-family:sans-serif;
}
</style>
<html>
<body>
${title}
<div>
<p style="font-family:sans-serif; font-weight:bold;font-size:1.2em;">Overall</p>
<p style="font-family:sans-serif;">
<span style="font-weight:bold;">Best team: </span>${best_team}<br />
<span style="font-weight:bold;">Best lifter: </span>${best_lifter}</p>
</div>
<div>
<p style="font-family:sans-serif; font-weight:bold;font-size:1.2em;">Summary</p>
<table class="results", style="table-layout:auto;">
<tbody>
${tsum}
</tbody>
</table>
</div>
<div>
<p style="font-family:sans-serif; font-weight:bold;font-size:1.2em;">Results</p>
<table class="results", style="width:100%;">
<tbody>
${tbody}
</tbody>
</table>
</div>
''')
# TableModel
class TableModel(QtCore.QAbstractTableModel):
TRANSLATE_SECTION = [
Section('gender', 'M/F', '%s', None, False),
Section('flight', 'Flight', '%d', 'toInt', False),
Section('team', 'Team', '%s', None, False),
Section('name', 'Name', '%s', None, False),
Section('weight', 'Weight', '%.1f', None, False),
Section('rack_height', 'Rack Height', '%d', 'toInt', False),
Section('squat_0', 'Squat 1', '%.1f', 'toDouble', True),
Section('squat_1', 'Squat 2', '%.1f', 'toDouble', True),
Section('squat_2', 'Squat 3', '%.1f', 'toDouble', True),
Section('bench_0', 'Bench 1', '%.1f', 'toDouble', True),
Section('bench_1', 'Bench 2', '%.1f', 'toDouble', True),
Section('bench_2', 'Bench 3', '%.1f', 'toDouble', True),
Section('deadlift_0', 'Deadlift 1', '%.1f', 'toDouble', True),
Section('deadlift_1', 'Deadlift 2', '%.1f', 'toDouble', True),
Section('deadlift_2', 'Deadlift 3', '%.1f', 'toDouble', True),
Section('total', 'Total', '%.1f', None, False),
Section('points', 'Points', '%.2f', None, False)
]
model_changed = QtCore.pyqtSignal()
def __init__(self, top=3, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.flight_filter = None
self.last_clicked = None
self.next_sort = QtCore.Qt.AscendingOrder
self.lifters_map = LifterCollection(top=top)
self.sorted_by('lifter_id')
self.reset()
# Required Qt methods
def headerData(self, section, orient, role):
if role == QtCore.Qt.DisplayRole and orient == QtCore.Qt.Horizontal:
section_info = self.TRANSLATE_SECTION[section]
if section_info.attribute == 'flight':
if self.flight_filter is not None:
return section_info.heading + ' [%d]' % self.flight_filter
return section_info.heading
return QtCore.QVariant()
def index_to_lifter(self, index):
if not index.isValid():
return None
lifter_index, section = index.row(), index.column()
lifter = self.lifters[lifter_index]
section_info = self.TRANSLATE_SECTION[section]
return lifter, section_info
def data(self, index, role):
if not index.isValid():
return QtCore.QVariant()
# Get active lifter and section info
lifter, section_info = self.index_to_lifter(index)
if role == QtCore.Qt.DisplayRole:
# Get section info
value = getattr(lifter, section_info.attribute)
return section_info.format % value
# Handle lift representation here
elif role == QtCore.Qt.ForegroundRole:
pass
elif role == QtCore.Qt.BackgroundRole:
pass
elif role == QtCore.Qt.FontRole:
if section_info.is_lift:
# Translate attribute string into lift and attempt
lift, attempt_str = section_info.attribute.split('_')
attempt = int(attempt_str)
# Get record
record = lifter.get_lift(lift, attempt)[0]
# Set font accordingly
font = QtGui.QFont()
if record == Lifter.GOOD_LIFT:
font.setBold(True)
elif record == Lifter.FAIL_LIFT:
font.setStrikeOut(True)
elif record == Lifter.PASS_LIFT:
font.setStrikeOut(True)
font.setItalic(True)
elif record == Lifter.SET_LIFT:
font.setItalic(True)
return font
return QtCore.QVariant()
def setData(self, index, value, role):
if not index.isValid():
return False
if role == QtCore.Qt.EditRole:
# Resolve lifter and section
lifter, section_info = self.index_to_lifter(index)
# None conversion means it isn't editable
if section_info.conversion is None:
return False
# Convert value
value, ok = getattr(value, section_info.conversion)()
if not ok:
return False
# Catch entry error
try:
setattr(lifter, section_info.attribute, value)
except ValueError, ex:
logger.error('Previous attempt not completed.\n%s', ex.message)
return False
# Emit change over the specified index
top_left = self.index(index.row(), 0)
bottom_right = self.index(index.row(), self.columnCount(None)-1)
self.dataChanged.emit(top_left, bottom_right)
# Emit change of the model
self.model_changed.emit()
return True
return False
def validate_lift(self, index, valid):
if not index.isValid():
return
# Get lifter and section if valid
lifter, section_info = self.index_to_lifter(index)
# If section is not a lift then don't do anything
if not section_info.is_lift:
return
# Translate attribute string into lift and attempt
lift, attempt_str = section_info.attribute.split('_')
attempt = int(attempt_str)
# Validate the lift
lifter.validate_lift(lift, attempt, valid)
# Emit signals
self.model_changed.emit()
self.dataChanged.emit(index, index)
def flags(self, index):
if not index.isValid():
return QtCore.Qt.NoItemFlags
# Default flags
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
# Resolve index and check if also editable
lifter, section_info = self.index_to_lifter(index)
if section_info.conversion is not None:
flags |= QtCore.Qt.ItemIsEditable
return flags
def rowCount(self, parent):
return len(self.lifters)
def columnCount(self, parent):
return len(self.TRANSLATE_SECTION)
def section_clicked(self, section):
section_info = self.TRANSLATE_SECTION[section]
# If flight
if section_info.attribute == 'flight':
# Get flights
flights = self.lifters_map.flights()
# Get next flight filter
reset_filter = True
try:
index = flights.index(self.flight_filter) + 1
except ValueError:
pass
else:
reset_filter = False
if index >= len(flights):
self.flight_filter = None
else:
self.flight_filter = flights[index]
if reset_filter:
self.flight_filter = flights[0]
# If NOT flight, sort by attribute, then weight and lifter_id
else:
next_sort = QtCore.Qt.AscendingOrder
if self.last_clicked is not None and \
self.last_clicked == section_info.attribute:
if self.next_sort == QtCore.Qt.AscendingOrder:
next_sort = QtCore.Qt.DescendingOrder
self.next_sort = next_sort
self.last_clicked = section_info.attribute
self.sorted_by(section_info.attribute, 'weight', 'lifter_id')
# All paths lead to large change
self.reset()
def reset(self):
# Apply flight filter and reset it if required
reset_all = True
if self.flight_filter is not None:
self.lifters = [l for l in self.lifters_ if l.flight == \
self.flight_filter]
if len(self.lifters) > 0:
reset_all = False
if reset_all:
self.lifters = self.lifters_[:]
self.flight_filter = None
# reset
QtCore.QAbstractTableModel.reset(self)
# Sort method
def sorted_by(self, *args):
if len(args) > 0:
# Save args
self.sort_args = args
# Sort based on args
sort_args = list(self.sort_args)
# Only reverse first attribute as others are used to tie-break
if self.next_sort == QtCore.Qt.DescendingOrder:
sort_args[0] = 'REV_' + sort_args[0]
self.lifters_ = self.lifters_map.sorted_by(*sort_args)
# Add / remove methods
def add(self, lifter):
self.lifters_map.add(lifter)
self.sorted_by()
self.reset()
# Emit change of model
self.model_changed.emit()
def remove(self, index):
if not index.isValid():
return
lifter, section_info = self.index_to_lifter(index)
self.lifters_map.remove(lifter)
self.sorted_by()
self.reset()
# Emit change of model
self.model_changed.emit()
# Save / load / export
def save(self, file_):
pickle_.dump(file_, self.lifters_map)
def load(self, file_):
self.lifters_map = pickle_.load(file_)
self.sorted_by()
self.reset()
def export(self, file_):
# Results summary
# Get overall info
best_lifter, best_total, team_info = self.lifters_map.overall_info()
# Overall
best_team = '%s [%.2f]' % best_total
best_lifter = '%s [%.2f]' % \
(best_lifter.name, best_lifter.points)
# Team summary
tsum = ''
# Headers
tsum += '<tr>'
for heading in ['Team / lifter', 'Points']:
tsum += '<th>%s</th>' % heading
tsum += '</tr>\n'
# Summary
row = 0
for team, info in team_info.iteritems():
# Prepare data to output
data = [(team, info[0])]
for lifter in info[1]:
data.append( (' ' * 4 + lifter.name, lifter.points) )
# Output the data
for perf, points in data:
# Alternate colours of rows
if row % 2 == 1:
row_str = '<tr class="alt">'
else:
row_str = '<tr>'
# Manually increment row
row += 1
row_str += '<td>%s</td><td>%.2f</td></tr>\n' % (perf, points)
tsum += row_str
# Main results
tbody = ''
# Headers
tbody += '<tr>'
for section_info in self.TRANSLATE_SECTION:
tbody += '<th>%s</th>' % section_info.heading
tbody += '</tr>\n'
# Get lifters sorted by points, then weight, then id
lifters = self.lifters_map.sorted_by(
'REV_points', 'weight', 'lifter_id'
)
# Results table
for row, lifter in enumerate(lifters):
# Alternate colours of rows
if row % 2 == 1:
row_str = '<tr class="alt">'
else:
row_str = '<tr>'
# Add data
for section_info in self.TRANSLATE_SECTION:
# Get data as string
value = getattr(lifter, section_info.attribute)
data = section_info.format % value
# If a lift, set up style string
style_str = '"'
if section_info.is_lift:
# Translate attribute string into lift and attempt
lift, attempt_str = section_info.attribute.split('_')
attempt = int(attempt_str)
# Get record
record = lifter.get_lift(lift, attempt)[0]
# Set font accordingly
if record == Lifter.GOOD_LIFT:
style_str += 'font-weight:bold;'
elif record == Lifter.FAIL_LIFT:
style_str += 'text-decoration:line-through;'
elif record == Lifter.PASS_LIFT:
style_str += 'text-decoration:line-through;' \
'font-style:italic;'
elif record == Lifter.SET_LIFT:
style_str += 'font-style:italic;'
style_str += '"'
# If style str is added
if len(style_str) > 2:
row_str += '<td style=%s>%s</td>' % (style_str, data)
else:
row_str += '<td>%s</td>' % data
row_str += '</tr>\n'
tbody += row_str
# XXX Set title
title = ''
# Save full table
html_table = HTML_TEMPLATE.substitute(
title=title,
best_team=best_team,
best_lifter=best_lifter,
tsum=tsum,
tbody=tbody)
with open(file_, 'w') as fp:
fp.write(html_table)
# TableView
class TableView(QtGui.QTableView):
PERFORMANCE_TEXT = '&Performance'
SUMMARY_TEXT = '&Summary'
GOOD_LIFT = '&Good lift'
FAIL_LIFT = '&Fail lift'
PASS_LIFT = '&Pass lift'
def __init__(self, model, parent=None):
QtGui.QTableView.__init__(self, parent)
self.setModel(model)
self.setup_menus()
self.setup_ui()
def setup_menus(self):
menu = QtGui.QMenu()
menu.addAction(self.PERFORMANCE_TEXT)
menu.addAction(self.SUMMARY_TEXT)
self.general_menu = menu
menu = QtGui.QMenu()
menu.addAction(self.GOOD_LIFT)
menu.addAction(self.FAIL_LIFT)
menu.addAction(self.PASS_LIFT)
menu.addSeparator()
menu.addAction(self.PERFORMANCE_TEXT)
menu.addAction(self.SUMMARY_TEXT)
self.lift_menu = menu
def setup_ui(self):
self.verticalHeader().setVisible(False)
# Default header column behaviour is to stretch
self.horizontalHeader().setResizeMode(
QtGui.QHeaderView.Stretch
)
# Otherwise resize to contents
headings = [i.heading for i in TableModel.TRANSLATE_SECTION]
for heading in ['M/F', 'Flight', 'Weight', 'Team', 'Name']:
i = headings.index(heading)
self.horizontalHeader().setResizeMode(i,
QtGui.QHeaderView.ResizeToContents
)
# Set general size policy
self.setSizePolicy(
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding
)
# Set click connections
# self.setSortingEnabled(True)
self.horizontalHeader().setClickable(True)
self.horizontalHeader().sectionClicked.connect(
self.model().section_clicked
)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
def contextMenuEvent(self, event):
# Check from mouse
if event.reason() != QtGui.QContextMenuEvent.Mouse:
return
# Check index
index = self.indexAt(event.pos())
if not index.isValid():
return
# From the index get the lifter and section info
lifter, section_info = self.model().index_to_lifter(index)
# Execute the menu
if section_info.is_lift:
menu = self.lift_menu
else:
menu = self.general_menu
action = menu.exec_(self.mapToGlobal(event.pos()))
# Interpret the result
if action is None:
return
if action.text() == self.PERFORMANCE_TEXT:
# Perforamnce dialog
dialog = PerformanceDialog(self)
dialog.set_lifter(lifter)
dialog.exec_()
return
elif action.text() == self.SUMMARY_TEXT:
# Summary dialog
dialog = SummaryDialog(self)
dialog.set_lifter(lifter)
dialog.exec_()
return
if section_info.is_lift:
# Determine if the lift is good, fail, or pass
if action.text() == self.PASS_LIFT:
valid = None
elif action.text() == self.GOOD_LIFT:
valid = True
else:
valid = False
# Validate the lift
self.model().validate_lift(index, valid)
# PerformanceDialog
class PerformanceDialog(QtGui.QDialog):
def __init__(self, parent=None, flags=QtCore.Qt.Dialog):
QtGui.QDialog.__init__(self, parent, flags)
self.setup_ui()
def setup_ui(self):
self.lifter_info = QtGui.QLabel('')
left_layout = QtGui.QGridLayout()
attributes = Lifter.LIFTS + ['total', 'points']
for i, attr in enumerate(attributes):
title = attr[0].upper() + attr[1:]
label_0 = QtGui.QLabel('%s:' % title)
label = QtGui.QLabel('')
left_layout.addWidget(label_0, i, 0)
left_layout.addWidget(label, i, 1)
setattr(self, '%s_label' % attr, label)
offset = len(attributes)
attributes = ['team_total', 'best_total', 'best_team', 'difference',
'projected_points', 'projected_total']
titles = ['Team total', 'Best total', 'Best team', 'Difference',
'Projected points', 'Projected total']
for i, (attr, title) in enumerate(zip(attributes, titles)):
label_0 = QtGui.QLabel('%s:' % title)
if i < 4:
post = 'label'
label = QtGui.QLabel('')
else:
post = 'edit'
label = QtGui.QLineEdit('')
left_layout.addWidget(label_0, i + offset, 0)
left_layout.addWidget(label, i + offset, 1)
setattr(self, '%s_%s' % (attr, post), label)
# Connections
self.projected_points_edit.textEdited.connect(
self.slot_projected_points
)
self.projected_total_edit.textEdited.connect(
self.slot_projected_total
)
# Main layout
main_layout = QtGui.QVBoxLayout()
main_layout.addWidget(self.lifter_info)
main_layout.addLayout(left_layout)
self.setLayout(main_layout)
def set_lifter(self, lifter):
self.lifter = lifter
self.setWindowTitle('Performance: %s' % lifter.name)
# Set lifter information
self.lifter_info.setText(
'%s, %.1f, %s' % (lifter.name, lifter.weight, lifter.team)
)
# Set best lifts
for lift in Lifter.LIFTS:
value = lifter.best_lift(lift)
label = getattr(self, '%s_label' % lift)
label.setText('%.1f' % value)
# Set total and points
for attr in ['total', 'points']:
label = getattr(self, '%s_label' % attr)
label.setText('%.2f' % getattr(lifter, attr))
# Get team info
_, best_total, team_info = lifter.overall_info()
team_total, _ = team_info[lifter.team]
# Get difference to best_total
difference = best_total[1] - team_total
# Set team points
self.team_total_label.setText('%.2f' % team_total)
self.best_team_label.setText('%s' % best_total[0])
self.best_total_label.setText('%.2f' % best_total[1])
self.difference_label.setText('%.2f' % difference)
self.projected_points_edit.setText('%.2f' % (difference + lifter.points))
# Call slot manually (not called with programmatic change to text)
self.slot_projected_points(self.projected_points_edit.text())
# Slots
def slot_projected_points(self, text):
points, ok = text.toDouble()
if not ok:
return
total = wilks.required_total(self.lifter.gender, self.lifter.weight,
points)
self.projected_total_edit.setText('%.2f' % total)
def slot_projected_total(self, text):
total, ok = text.toDouble()
if not ok:
return
points = wilks.points(self.lifter.gender, self.lifter.weight,
total)
self.projected_points_edit.setText('%.2f' % points)
# SummaryDialog
class SummaryDialog(QtGui.QDialog):
def __init__(self, parent=None, flags=QtCore.Qt.Dialog):
QtGui.QDialog.__init__(self, parent, flags)
self.setup_ui()
def setup_ui(self):
self.tree = QtGui.QTreeWidget(self)
self.tree.setColumnCount(2)
header_item = QtGui.QTreeWidgetItem(
None,
['Team / lifter', 'Points']
)
self.tree.setHeaderItem(header_item)
header = self.tree.header()
header.setResizeMode(QtGui.QHeaderView.ResizeToContents)
attributes = ['best_team', 'best_lifter']
titles = ['Best team', 'Best lifter']
label_layout = QtGui.QVBoxLayout()
for attr, title in zip(attributes, titles):
label_0 = QtGui.QLabel('%s:' % title)
label = QtGui.QLabel('')
line_layout = QtGui.QHBoxLayout()
line_layout.addWidget(label_0)
line_layout.addWidget(label)
line_layout.addStretch(1)
label_layout.addLayout(line_layout)
setattr(self, '%s_label' % attr, label)
main_layout = QtGui.QVBoxLayout()
main_layout.addWidget(self.tree)
main_layout.addLayout(label_layout)
self.setLayout(main_layout)
def set_lifter(self, lifter):
self.lifter = lifter
self.setWindowTitle('Summary: %s' % lifter.name)
# Get team info as sorted by team name
best_lifter, best_total, team_info = lifter.overall_info()
team_info = sorted(team_info.iteritems(), key=lambda x: x[0].lower())
# Add to the tree widget and get best team total
best_total = (None, 0.)
for i, (team, info) in enumerate(team_info):
if info[0] > best_total[1]:
best_total = (team, info[0])
# Construct team item
team_item = QtGui.QTreeWidgetItem(
None,
[team, '%.2f' % info[0]]
)
# Construct member items
for lifter in info[1]:
item = QtGui.QTreeWidgetItem(
None,
[lifter.name, '%.2f' % lifter.points]
)
team_item.addChild(item)
# Save top level item
self.tree.insertTopLevelItem(i, team_item)
# Set best team
self.best_team_label.setText('%s [%.2f]' % best_total)
# Set the best lifter
self.best_lifter_label.setText('%s [%.2f]' % \
(best_lifter.name, best_lifter.points))
| mit |
ilya-epifanov/ansible-modules-core | inventory/add_host.py | 154 | 2000 | # -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: add_host
short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory
description:
- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
Takes variables so you can define the new hosts more fully.
version_added: "0.9"
options:
name:
aliases: [ 'hostname', 'host' ]
description:
- The hostname/ip of the host to add to the inventory, can include a colon and a port number.
required: true
groups:
aliases: [ 'groupname', 'group' ]
description:
- The groups to add the hostname to, comma separated.
required: false
notes:
- This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it
to iterate use a with\_ directive.
author:
- "Ansible Core Team"
- "Seth Vidal"
'''
EXAMPLES = '''
# add host to group 'just_created' with variable foo=42
- add_host: name={{ ip_from_ec2 }} groups=just_created foo=42
# add a host with a non-standard port local to your machines
- add_host: name={{ new_ip }}:{{ new_port }}
# add a host alias that we reach through a tunnel
- add_host: hostname={{ new_ip }}
ansible_ssh_host={{ inventory_hostname }}
ansible_ssh_port={{ new_port }}
'''
| gpl-3.0 |
gnufede/sqjobs | sqjobs/brokers/base.py | 1 | 1413 | from abc import ABCMeta, abstractmethod
from six import add_metaclass
from uuid import uuid4
@add_metaclass(ABCMeta)
class Broker(object):
@abstractmethod
def add_job(self, job_class, *args, **kwargs):
"""
Add a job to the broker
:param job_class: python class of the payload job
:param args: arguments to execute the job
:param kwargs: keyword arguments to execute the job
"""
raise NotImplementedError
def gen_job_id(self):
"""
Generate a new unique job ID
"""
return str(uuid4())
def serialize_job(self, job_class, job_id, args, kwargs):
"""
Serialize a job into a string to be sent to the broker
:param job_class: python class of the payload job
:param job_id: the ID of the job
:param args: arguments of the job
:param kwargs: keyword arguments of the job
"""
return self.connector.serialize_job(job_class, job_id, args, kwargs)
def unserialize_job(self, job_class, queue_name, payload):
"""
Build a job given a payload returned from the broker
:param job_class: python class of the payload job
:param queue_name: queue where the job was located
:param payload: python dict with the job arguments
"""
return self.connector.unserialize_job(job_class, queue_name, payload)
| bsd-3-clause |
kawamon/hue | desktop/libs/librdbms/java/query.py | 2 | 1577 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import range
from py4j.java_gateway import JavaGateway
gateway = JavaGateway()
jdbc_driver = 'com.mysql.jdb.Driver'
db_url = 'jdbc:mysql://localhost/hue'
username = 'root'
password = 'root'
conn = gateway.jvm.java.sql.DriverManager.getConnection(db_url, username, password)
try:
stmt = conn.createStatement()
try:
rs = stmt.executeQuery('select username,email from auth_user')
try:
md = rs.getMetaData()
for i in range(md.getColumnCount()):
print(md.getColumnTypeName(i + 1))
while next(rs):
username = rs.getString("username")
email = rs.getString("email")
print(username, email)
finally:
rs.close()
finally:
stmt.close()
finally:
conn.close()
| apache-2.0 |
fparrel/vigicrues_tools | chcantabrico_scrap.py | 1 | 1262 | #!/usr/bin/env python
import json
import requests
import datetime
from serialize import saveValues, loadStations
from chcantabrico_get_stations import getFlow4Level
def parseLine(line):
d, v = line.split(';')
return datetime.datetime.strptime(d,"%d/%m/%Y %H:%M:%S"),float(v)
def main():
flow4level_updated = False
stations = loadStations('chcantabrico')
for station in stations:
url = 'https://www.chcantabrico.es/evolucion-de-niveles/-/descarga/csv/nivel/%s' % station['id']
print(url)
r = requests.get(url)
csv = r.text.encode(r.encoding)
lines = csv.split('\n')[2:]
values = map(parseLine,filter(lambda line: line.strip()!='',lines))
if len(values)>0:
saveValues('chcantabrico','nivel_%s'%station['id'],values)
flow4level = getFlow4Level(station['url'])
len_bf = len(station['flow4level'])
station['flow4level'].update(flow4level) #update `station` dict in place with new value(s)
if len(station['flow4level'])>len_bf:
flow4level_updated = True
if flow4level_updated:
print('New value got for flow4level')
json.dump(stations,open('stations_chcantabrico.json','w'))
if __name__=='__main__':
main()
| gpl-3.0 |
mariosky/evo-drawings | venv/lib/python2.7/site-packages/django/views/decorators/cache.py | 129 | 2286 | from functools import wraps
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
sites.get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
if len(args) != 1 or callable(args[0]):
raise TypeError("cache_page has a single mandatory positional argument: timeout")
cache_timeout = args[0]
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
if kwargs:
raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix")
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
| agpl-3.0 |
FabianWe/foodle | foodle/foodle_polls/filters.py | 1 | 1306 | # The MIT License (MIT)
#
# Copyright (c) 2016 Fabian Wenzelmann <fabianwenzelmann@posteo.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from django.template.defaulttags import register
import pytz
import json
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
| mit |
seanli9jan/tensorflow | tensorflow/contrib/receptive_field/python/util/graph_compute_order.py | 47 | 7500 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library to compute order of computations in a graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters
from tensorflow.python.platform import tf_logging as logging
def parse_graph_nodes(graph_def):
"""Helper function to parse GraphDef's nodes.
It returns a dict mapping from node name to NodeDef.
Args:
graph_def: A GraphDef object.
Returns:
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
"""
name_to_node = {}
for node_def in graph_def.node:
name_to_node[node_def.name] = node_def
return name_to_node
# Named tuple used to collect information from each node in a computation graph.
_node_info = collections.namedtuple(
'NodeInfo', field_names=['order', 'node', 'input_size', 'output_size'])
def _compute_output_resolution(input_spatial_resolution, kernel_size, stride,
total_padding):
"""Computes output resolution, given input resolution and layer parameters.
Note that this computation is done only over one dimension (eg, x or y).
If any of the inputs is None, returns None.
Args:
input_spatial_resolution: Input spatial resolution (int).
kernel_size: Kernel size (int).
stride: Stride (int).
total_padding: Total padding to be applied (int).
Returns:
output_resolution: Output dimension (int) or None.
"""
if (input_spatial_resolution is None) or (kernel_size is None) or (
stride is None) or (total_padding is None):
return None
return int(
math.ceil((
input_spatial_resolution + total_padding - kernel_size + 1) / stride))
def _get_computed_nodes(name_to_node,
current,
node_info,
input_node_name='',
input_node_size=None):
"""Traverses the graph recursively to compute its topological order.
Optionally, the function may also compute the input and output feature map
resolutions at each node. In this case, input_node_name and input_node_size
must be set. Note that if a node's op type is unknown, the input and output
resolutions are ignored and set to None.
Args:
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
current: Current node name.
node_info: Map of nodes we've already traversed, containing their _node_info
information.
input_node_name: Name of node with fixed input resolution (optional).
input_node_size: Fixed input resolution to use (optional).
Returns:
order: Order in topological sort for 'current'.
input_size: Tensor spatial resolution at input of current node.
output_size: Tensor spatial resolution at output of current node.
"""
if current in node_info:
return (node_info[current].order, node_info[current].input_size,
node_info[current].output_size)
node_def = name_to_node[current]
if current == input_node_name:
order = 0
input_size = None
output_size = input_node_size
node_info[current] = _node_info(order, node_def, input_size, output_size)
return (order, input_size, output_size)
input_size = None
output_size = None
order = 0
number_inputs = 0
for each in node_def.input:
# Parses name of input node.
if each.startswith('^'):
# The character '^' denotes a control dependency, so this input node can
# be safely ignored.
continue
each = each.split(':')[0]
# Recursively computes ordering.
(parent_order, _, parent_output_size) = _get_computed_nodes(
name_to_node, each, node_info, input_node_name, input_node_size)
order = max(order, parent_order + 1)
if number_inputs == 0:
# For all the types of nodes we consider, the first input corresponds to
# the feature map.
input_size = parent_output_size
number_inputs += 1
# Figure out output size for this layer.
logging.vlog(3, 'input_size = %s', input_size)
if input_size is None:
output_size = None
else:
(kernel_size_x, kernel_size_y, stride_x, stride_y, _, _, total_padding_x,
total_padding_y) = (
parse_layer_parameters.get_layer_params(
node_def, name_to_node, input_size, force=True))
logging.vlog(3, 'kernel_size_x = %s, kernel_size_y = %s, '
'stride_x = %s, stride_y = %s, '
'total_padding_x = %s, total_padding_y = %s' %
(kernel_size_x, kernel_size_y, stride_x, stride_y,
total_padding_x, total_padding_y))
output_size = [None] * 2
output_size[0] = _compute_output_resolution(input_size[0], kernel_size_x,
stride_x, total_padding_x)
output_size[1] = _compute_output_resolution(input_size[1], kernel_size_y,
stride_y, total_padding_y)
logging.vlog(3, 'output_size = %s', output_size)
node_info[current] = _node_info(order, node_def, input_size, output_size)
return order, input_size, output_size
def get_compute_order(graph_def, input_node_name='', input_node_size=None):
"""Computes order of computation for a given CNN graph.
Optionally, the function may also compute the input and output feature map
resolutions at each node. In this case, input_node_name and input_node_size
must be set. Note that if a node's op type is unknown, the input and output
resolutions are ignored and set to None.
Args:
graph_def: GraphDef object.
input_node_name: Name of node with fixed input resolution (optional). This
is usually the node name for the input image in a CNN.
input_node_size: 2D list of integers, fixed input resolution to use
(optional). This is usually the input resolution used for the input image
in a CNN (common examples are: [224, 224], [299, 299], [321, 321]).
Returns:
node_info: Default dict keyed by node name, mapping to a named tuple with
the following fields:
- order: Integer denoting topological order;
- node: NodeDef for the given node;
- input_size: 2D list of integers, denoting the input spatial resolution
to the node;
- output_size: 2D list of integers, denoting the output spatial resolution
of the node.
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
"""
name_to_node = parse_graph_nodes(graph_def)
node_info = collections.defaultdict(_node_info)
for each in graph_def.node:
_get_computed_nodes(name_to_node, each.name, node_info, input_node_name,
input_node_size)
return node_info, name_to_node
| apache-2.0 |
shahbaz17/zamboni | sites/prod/settings_mkt.py | 8 | 4322 | from mkt.settings import * # noqa
from settings_base import * # noqa
from .. import splitstrip
import private_mkt
SERVER_EMAIL = 'zmarketplaceprod@addons.mozilla.org'
SECRET_KEY = private_mkt.SECRET_KEY
DOMAIN = getattr(private_mkt, 'DOMAIN', 'marketplace.firefox.com')
SITE_URL = getattr(private_mkt, 'SITE_URL', 'https://' + DOMAIN)
BROWSERID_AUDIENCES = [SITE_URL]
STATIC_URL = os.getenv('CUSTOM_CDN', 'https://marketplace.cdn.mozilla.net/')
LOCAL_MIRROR_URL = '%s_files' % STATIC_URL
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + (STATIC_URL[:-1],)
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
MEDIA_URL = STATIC_URL + 'media/'
CACHE_PREFIX = 'marketplace.%s' % CACHE_PREFIX
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
SYSLOG_TAG = "http_app_mkt_prod"
SYSLOG_TAG2 = "http_app_mkt_prod_timer"
SYSLOG_CSP = "http_app_mkt_prod_csp"
# Celery
BROKER_URL = private_mkt.BROKER_URL
CELERY_ALWAYS_EAGER = False
CELERYD_PREFETCH_MULTIPLIER = 1
LOGGING['loggers'].update({
'z.task': {'level': logging.DEBUG},
'z.receipt': {'level': logging.ERROR},
'elasticsearch': {'level': logging.INFO},
})
STATSD_PREFIX = 'marketplace'
GRAPHITE_PREFIX = STATSD_PREFIX
CEF_PRODUCT = STATSD_PREFIX
IMPALA_BROWSE = True
IMPALA_REVIEWS = True
WEBAPPS_RECEIPT_KEY = private_mkt.WEBAPPS_RECEIPT_KEY
WEBAPPS_RECEIPT_URL = private_mkt.WEBAPPS_RECEIPT_URL
MIDDLEWARE_CLASSES = tuple(m for m in MIDDLEWARE_CLASSES if m not in (csp,))
WEBAPPS_UNIQUE_BY_DOMAIN = True
SENTRY_DSN = private_mkt.SENTRY_DSN
SOLITUDE_HOSTS = ('https://payments.firefox.com',)
SOLITUDE_OAUTH = {'key': private_mkt.SOLITUDE_OAUTH_KEY,
'secret': private_mkt.SOLITUDE_OAUTH_SECRET}
# Bug 748403
SIGNING_SERVER = private_mkt.SIGNING_SERVER
SIGNING_SERVER_ACTIVE = True
SIGNING_VALID_ISSUERS = ['marketplace.cdn.mozilla.net']
# Bug 793876
SIGNED_APPS_SERVER_ACTIVE = True
SIGNED_APPS_SERVER = private_mkt.SIGNED_APPS_SERVER
SIGNED_APPS_REVIEWER_SERVER_ACTIVE = True
SIGNED_APPS_REVIEWER_SERVER = private_mkt.SIGNED_APPS_REVIEWER_SERVER
CARRIER_URLS = splitstrip(private_mkt.CARRIER_URLS)
# Pass through the DSN to the Raven client and force signal
# registration so that exceptions are passed through to sentry
# RAVEN_CONFIG = {'dsn': SENTRY_DSN, 'register_signals': True}
MONOLITH_PASSWORD = private_mkt.MONOLITH_PASSWORD
# Payment settings.
APP_PURCHASE_KEY = DOMAIN
APP_PURCHASE_AUD = DOMAIN
APP_PURCHASE_TYP = 'mozilla/payments/pay/v1'
# This must match private.SECRET in webpay settings.
APP_PURCHASE_SECRET = private_mkt.APP_PURCHASE_SECRET
PRODUCT_ICON_PATH = NETAPP_STORAGE + '/product-icons'
DUMPED_APPS_PATH = NETAPP_STORAGE + '/dumped-apps'
DUMPED_USERS_PATH = NETAPP_STORAGE + '/dumped-users'
if NEWRELIC_ENABLE:
NEWRELIC_INI = '/etc/newrelic.d/marketplace.firefox.com.ini'
ES_DEFAULT_NUM_REPLICAS = 2
ES_USE_PLUGINS = False
BANGO_BASE_PORTAL_URL = 'https://mozilla.bango.com/login/al.aspx?'
ALLOWED_CLIENTS_EMAIL_API = private_mkt.ALLOWED_CLIENTS_EMAIL_API
POSTFIX_AUTH_TOKEN = private_mkt.POSTFIX_AUTH_TOKEN
POSTFIX_DOMAIN = DOMAIN
# IARC content ratings.
IARC_COMPANY = 'Mozilla'
IARC_ENV = 'prod'
IARC_MOCK = False
IARC_PASSWORD = private_mkt.IARC_PASSWORD
IARC_PLATFORM = 'Firefox'
IARC_SERVICE_ENDPOINT = 'https://www.globalratings.com/IARCProdService/IARCServices.svc' # noqa
IARC_STOREFRONT_ID = 4
IARC_SUBMISSION_ENDPOINT = 'https://www.globalratings.com/IARCProdRating/Submission.aspx' # noqa
IARC_ALLOW_CERT_REUSE = False
BOKU_SIGNUP_URL = 'https://developer.mozilla.org/en-US/Marketplace/Publishing/Pricing/Providers/Boku' # noqa
PRE_GENERATE_APKS = True
PRE_GENERATE_APK_URL = 'https://controller.apk.firefox.com/application.apk'
VALIDATOR_TIMEOUT = 180
FXA_AUTH_DOMAIN = 'api.accounts.firefox.com'
FXA_OAUTH_URL = 'https://oauth.accounts.firefox.com'
FXA_CLIENT_ID = getattr(private_mkt, 'FXA_CLIENT_ID', '')
FXA_CLIENT_SECRET = getattr(private_mkt, 'FXA_CLIENT_SECRET', '')
FXA_SECRETS = {
FXA_CLIENT_ID: FXA_CLIENT_SECRET,
}
DEFAULT_PAYMENT_PROVIDER = 'bango'
PAYMENT_PROVIDERS = ['bango']
RECOMMENDATIONS_API_URL = 'https://recommend.marketplace.firefox.com'
RECOMMENDATIONS_ENABLED = True
QA_APP_ID = 455996
DEV_PAY_PROVIDERS = None
# Bug 1145338
IAF_OVERRIDE_APPS = private_mkt.IAF_OVERRIDE_APPS
| bsd-3-clause |
odyaka341/pyglet | tests/resource/RES_LOAD_IMAGE.py | 32 | 2593 | #!/usr/bin/python
# $Id:$
import os
import sys
import unittest
from pyglet.gl import *
from pyglet import image
from pyglet import resource
from pyglet import window
__noninteractive = True
# Test image is laid out
# M R
# B G
# In this test the image is sampled at four points from top-right clockwise:
# R G B M (red, green, blue, magenta)
class TestCase(unittest.TestCase):
def setUp(self):
self.w = window.Window(width=10, height=10)
self.w.dispatch_events()
resource.path.append('@' + __name__)
resource.reindex()
def tearDown(self):
self.w.close()
def check(self, img, colors):
glClear(GL_COLOR_BUFFER_BIT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
img.blit(img.anchor_x, img.anchor_y)
buffer = image.get_buffer_manager().get_color_buffer().get_image_data()
bytes = buffer.get_data('RGBA', buffer.width * 4)
def sample(x, y):
i = y * buffer.pitch + x * len(buffer.format)
r, g, b, _ = bytes[i:i+len(buffer.format)]
if type(r) is str:
r, g, b = map(ord, (r, g, b))
return {
(255, 0, 0): 'r',
(0, 255, 0): 'g',
(0, 0, 255): 'b',
(255, 0, 255): 'm'}.get((r, g, b), 'x')
samples = ''.join([
sample(3, 3), sample(3, 0), sample(0, 0), sample(0, 3)])
self.assertTrue(samples == colors, samples)
def test0(self):
self.check(resource.image('rgbm.png'), 'rgbm')
def test2(self):
self.check(resource.image('rgbm.png', flip_x=True), 'mbgr')
def test3(self):
self.check(resource.image('rgbm.png', flip_y=True), 'grmb')
def test4(self):
self.check(resource.image('rgbm.png', flip_x=True, flip_y=True), 'bmrg')
def test5(self):
self.check(resource.image('rgbm.png', rotate=90), 'mrgb')
def test5a(self):
self.check(resource.image('rgbm.png', rotate=-270), 'mrgb')
def test6(self):
self.check(resource.image('rgbm.png', rotate=180), 'bmrg')
def test6a(self):
self.check(resource.image('rgbm.png', rotate=-180), 'bmrg')
def test7(self):
self.check(resource.image('rgbm.png', rotate=270), 'gbmr')
def test7a(self):
self.check(resource.image('rgbm.png', rotate=-90), 'gbmr')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
KiranJKurian/XScheduler | venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py | 618 | 21334 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| mit |
Bysmyyr/chromium-crosswalk | third_party/WebKit/LayoutTests/http/tests/websocket/permessage-deflate-manual_wsh.py | 43 | 4707 | # Copyright 2013, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import urlparse
import zlib
from mod_pywebsocket import common, util
from mod_pywebsocket.extensions import PerMessageDeflateExtensionProcessor
from mod_pywebsocket.extensions import ExtensionProcessorInterface
from mod_pywebsocket.common import ExtensionParameter
_GOODBYE_MESSAGE = u'Goodbye'
_ENABLE_MESSAGE = u'EnableCompression'
_DISABLE_MESSAGE = u'DisableCompression'
_bfinal = False
_client_max_window_bits = 15
def _get_permessage_deflate_extension_processor(request):
for extension_processor in request.ws_extension_processors:
if isinstance(extension_processor,
PerMessageDeflateExtensionProcessor):
return extension_processor
return None
def web_socket_do_extra_handshake(request):
global _bfinal
global _client_max_window_bits
processor = _get_permessage_deflate_extension_processor(request)
# Remove extension processors other than
# PerMessageDeflateExtensionProcessor to avoid conflict.
request.ws_extension_processors = [processor]
if not processor:
return
r = request.ws_resource.split('?', 1)
if len(r) == 1:
return
parameters = urlparse.parse_qs(r[1], keep_blank_values=True)
if 'client_max_window_bits' in parameters:
window_bits = int(parameters['client_max_window_bits'][0])
processor.set_client_max_window_bits(window_bits)
_client_max_window_bits = window_bits
if 'client_no_context_takeover' in parameters:
processor.set_client_no_context_takeover(True)
if 'set_bfinal' in parameters:
_bfinal = True
def receive(request):
stream = request.ws_stream
possibly_compressed_body = b''
compress = False
while True:
frame = stream._receive_frame_as_frame_object()
if frame.opcode == common.OPCODE_CLOSE:
message = stream._get_message_from_frame(frame)
stream._process_close_message(message)
return (False, None)
compress = compress or frame.rsv1
possibly_compressed_body += frame.payload
if frame.fin:
break
if compress:
return (compress, possibly_compressed_body + b'\x00\x00\xff\xff')
else:
return (compress, possibly_compressed_body)
def web_socket_transfer_data(request):
processor = _get_permessage_deflate_extension_processor(request)
processor.set_bfinal(_bfinal)
inflater = util._Inflater(_client_max_window_bits)
while True:
compress, possibly_compressed_body = receive(request)
body = None
if possibly_compressed_body is None:
return
if compress:
inflater.append(possibly_compressed_body)
body = inflater.decompress(-1)
else:
body = possibly_compressed_body
text = body.decode('utf-8')
if processor:
if text == _ENABLE_MESSAGE:
processor.enable_outgoing_compression()
elif text == _DISABLE_MESSAGE:
processor.disable_outgoing_compression()
request.ws_stream.send_message(text, binary=False)
if text == _GOODBYE_MESSAGE:
return
# vi:sts=4 sw=4 et
| bsd-3-clause |
unho/pootle | pootle/core/models/revision.py | 11 | 2038 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from ..cache import get_cache
cache = get_cache('redis')
class NoRevision(Exception):
pass
class Revision(object):
"""Wrapper around the revision counter stored in Redis."""
CACHE_KEY = 'pootle:revision'
INITIAL = 0
@classmethod
def initialize(cls, force=False):
"""Initializes the revision with `cls.INITIAL`.
:param force: whether to overwrite the number if there's a
revision already set or not.
:return: `True` if the initial value was set, `False` otherwise.
"""
if force:
return cls.set(cls.INITIAL)
return cls.add(cls.INITIAL)
@classmethod
def get(cls):
"""Gets the current revision number.
:return: The current revision number, or `None` if
there's no revision set.
"""
return cache.get(cls.CACHE_KEY)
@classmethod
def set(cls, value):
"""Sets the revision number to `value`, regardless of whether
there's a value previously set or not.
:return: `True` if the value was set, `False` otherwise.
"""
return cache.set(cls.CACHE_KEY, value)
@classmethod
def add(cls, value):
"""Sets the revision number to `value`, only if there's no
revision already set.
:return: `True` if the value was set, `False` otherwise.
"""
return cache.add(cls.CACHE_KEY, value)
@classmethod
def incr(cls):
"""Increments the revision number.
:return: the new revision number after incrementing it, or the
initial number if there's no revision stored yet.
"""
try:
return cache.incr(cls.CACHE_KEY)
except ValueError:
raise NoRevision()
| gpl-3.0 |
jtg-gg/skia | platform_tools/android/gyp_gen/vars_dict_lib.py | 146 | 4422 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import types
# The goal of this class is to store a set of unique items in the order in
# which they are inserted. This is important for the final makefile, where
# we want to make sure the image decoders are in a particular order. See
# images.gyp for more information.
class OrderedSet(object):
"""Ordered set of unique items that supports addition and removal.
Retains the order in which items are inserted.
"""
def __init__(self):
self.__ordered_set = []
def add(self, item):
"""Add item, if it is not already in the set.
item is appended to the end if it is not already in the set.
Args:
item: The item to add.
"""
if item not in self.__ordered_set:
self.__ordered_set.append(item)
def __contains__(self, item):
"""Whether the set contains item.
Args:
item: The item to search for in the set.
Returns:
bool: Whether the item is in the set.
"""
return item in self.__ordered_set
def __iter__(self):
"""Iterator for the set.
"""
return self.__ordered_set.__iter__()
def remove(self, item):
"""
Remove item from the set.
Args:
item: Item to be removed.
Raises:
ValueError if item is not in the set.
"""
self.__ordered_set.remove(item)
def __len__(self):
"""Number of items in the set.
"""
return len(self.__ordered_set)
def __getitem__(self, index):
"""Return item at index.
"""
return self.__ordered_set[index]
def reset(self):
"""Reset to empty.
"""
self.__ordered_set = []
def set(self, other):
"""Replace this ordered set with another.
Args:
other: OrderedSet to replace this one. After this call, this OrderedSet
will contain exactly the same elements as other.
"""
self.__ordered_set = list(other.__ordered_set)
VAR_NAMES = ['LOCAL_CFLAGS',
'LOCAL_CPPFLAGS',
'LOCAL_SRC_FILES',
'LOCAL_SHARED_LIBRARIES',
'LOCAL_STATIC_LIBRARIES',
'LOCAL_C_INCLUDES',
'LOCAL_EXPORT_C_INCLUDE_DIRS',
'DEFINES',
'KNOWN_TARGETS',
# These are not parsed by gyp, but set manually.
'LOCAL_MODULE_TAGS',
'LOCAL_MODULE']
class VarsDict(collections.namedtuple('VarsDict', VAR_NAMES)):
"""Custom class for storing the arguments to Android.mk variables.
Can also be treated as a dictionary with fixed keys.
"""
__slots__ = ()
def __new__(cls):
lists = []
# TODO (scroggo): Is there a better way add N items?
for __unused__ in range(len(VAR_NAMES)):
lists.append(OrderedSet())
return tuple.__new__(cls, lists)
def keys(self):
"""Return the field names as strings.
"""
return self._fields
def __getitem__(self, index):
"""Return an item, indexed by a number or a string.
"""
if type(index) == types.IntType:
# Treat the index as an array index into a tuple.
return tuple.__getitem__(self, index)
if type(index) == types.StringType:
# Treat the index as a key into a dictionary.
return eval('self.%s' % index)
return None
def intersect(var_dict_list):
"""Compute intersection of VarsDicts.
Find the intersection of a list of VarsDicts and trim each input to its
unique entries.
Args:
var_dict_list: list of VarsDicts. WARNING: each VarsDict will be
modified in place, to remove the common elements!
Returns:
VarsDict containing list entries common to all VarsDicts in
var_dict_list
"""
intersection = VarsDict()
# First VarsDict
var_dict_a = var_dict_list[0]
# The rest.
other_var_dicts = var_dict_list[1:]
for key in var_dict_a.keys():
# Copy A's list, so we can continue iterating after modifying the original.
a_list = list(var_dict_a[key])
for item in a_list:
# If item is in all lists, add to intersection, and remove from all.
in_all_lists = True
for var_dict in other_var_dicts:
if not item in var_dict[key]:
in_all_lists = False
break
if in_all_lists:
intersection[key].add(item)
for var_dict in var_dict_list:
var_dict[key].remove(item)
return intersection
| bsd-3-clause |
uri-mog/dreampie | dreampielib/gui/file_dialogs.py | 3 | 4605 | # Copyright 2010 Noam Yorav-Raphael
#
# This file is part of DreamPie.
#
# DreamPie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DreamPie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DreamPie. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['open_dialog', 'save_dialog']
"""
Easy to use wrappers around GTK file dialogs.
"""
import os
from os.path import abspath, dirname, basename, exists
import gtk
# Support translation in the future
_ = lambda s: s
def open_dialog(func, title, parent, filter_name, filter_pattern):
"""
Display the Open dialog.
func - a function which gets a file name and does something. If it throws
an IOError, it will be catched and the user will get another chance.
title - window title
parent - parent window, or None
filter_name - "HTML Files"
filter_pattern - "*.html"
"""
d = gtk.FileChooserDialog(
title, parent,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
fil = gtk.FileFilter()
fil.set_name(filter_name)
fil.add_pattern(filter_pattern)
d.add_filter(fil)
while True:
r = d.run()
if r != gtk.RESPONSE_OK:
break
filename = abspath(d.get_filename().decode('utf8'))
try:
func(filename)
except IOError, e:
m = gtk.MessageDialog(d, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING,
gtk.BUTTONS_OK)
m.props.text = _('Error when loading file: %s') % e
m.run()
m.destroy()
else:
break
d.destroy()
def save_dialog(func, title, parent, filter_name, filter_pattern, auto_ext=None,
prev_dir=None, prev_name=None):
"""
Display the Save As dialog.
func - a function which gets a file name and does something. If it throws
an IOError, it will be catched and the user will get another chance.
title - window title
parent - parent window, or None
filter_name - "HTML Files"
filter_pattern - "*.html"
auto_ext - "html", if not None will be added if no extension given.
prev_dir, prev_name - will set the default if given.
Return True if file was saved.
"""
d = gtk.FileChooserDialog(
title, parent,
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
fil = gtk.FileFilter()
fil.set_name(filter_name)
fil.add_pattern(filter_pattern)
d.add_filter(fil)
if prev_dir:
d.set_current_folder(prev_dir)
if prev_name:
d.set_current_name(prev_name)
saved = False
while True:
r = d.run()
if r != gtk.RESPONSE_OK:
break
filename = abspath(d.get_filename()).decode('utf8')
if auto_ext and not os.path.splitext(filename)[1]:
filename += os.path.extsep + auto_ext
if exists(filename):
m = gtk.MessageDialog(d, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION)
m.props.text = _('A file named "%s" already exists. Do '
'you want to replace it?'
) % basename(filename)
m.props.secondary_text = _(
'The file already exists in "%s". Replacing it will '
'overwrite its contents.'
) % basename(dirname(filename))
m.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
m.add_button(_('_Replace'), gtk.RESPONSE_OK)
m.set_default_response(gtk.RESPONSE_CANCEL)
mr = m.run()
m.destroy()
if mr == gtk.RESPONSE_CANCEL:
continue
try:
func(filename)
except IOError, e:
m = gtk.MessageDialog(d, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING,
gtk.BUTTONS_OK)
m.props.text = _('Error when saving file: %s') % e
m.run()
m.destroy()
else:
saved = True
break
d.destroy()
return saved | gpl-3.0 |
a4fr/my_nerd_bot | GoogleAppEngine/lib/requests/packages/urllib3/util/url.py | 553 | 5836 | from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| apache-2.0 |
vitmod/enigma2-test | lib/python/Components/FanControl.py | 35 | 4097 | import os
from Components.config import config, ConfigSubList, ConfigSubsection, ConfigSlider
from Tools.BoundFunction import boundFunction
import NavigationInstance
from enigma import iRecordableService, pNavigation
from boxbranding import getBoxType
class FanControl:
# ATM there's only support for one fan
def __init__(self):
if os.path.exists("/proc/stb/fp/fan_vlt") or os.path.exists("/proc/stb/fp/fan_pwm") or os.path.exists("/proc/stb/fp/fan_speed"):
self.fancount = 1
else:
self.fancount = 0
self.createConfig()
config.misc.standbyCounter.addNotifier(self.standbyCounterChanged, initial_call = False)
def setVoltage_PWM(self):
for fanid in range(self.getFanCount()):
cfg = self.getConfig(fanid)
self.setVoltage(fanid, cfg.vlt.value)
self.setPWM(fanid, cfg.pwm.value)
print "[FanControl]: setting fan values: fanid = %d, voltage = %d, pwm = %d" % (fanid, cfg.vlt.value, cfg.pwm.value)
def setVoltage_PWM_Standby(self):
for fanid in range(self.getFanCount()):
cfg = self.getConfig(fanid)
self.setVoltage(fanid, cfg.vlt_standby.value)
self.setPWM(fanid, cfg.pwm_standby.value)
print "[FanControl]: setting fan values (standby mode): fanid = %d, voltage = %d, pwm = %d" % (fanid, cfg.vlt_standby.value, cfg.pwm_standby.value)
def getRecordEvent(self, recservice, event):
recordings = len(NavigationInstance.instance.getRecordings(False,pNavigation.isRealRecording))
if event == iRecordableService.evEnd:
if recordings == 0:
self.setVoltage_PWM_Standby()
elif event == iRecordableService.evStart:
if recordings == 1:
self.setVoltage_PWM()
def leaveStandby(self):
NavigationInstance.instance.record_event.remove(self.getRecordEvent)
recordings = NavigationInstance.instance.getRecordings(False,pNavigation.isRealRecording)
if not recordings:
self.setVoltage_PWM()
def standbyCounterChanged(self, configElement):
from Screens.Standby import inStandby
inStandby.onClose.append(self.leaveStandby)
recordings = NavigationInstance.instance.getRecordings(False,pNavigation.isRealRecording)
NavigationInstance.instance.record_event.append(self.getRecordEvent)
if not recordings:
self.setVoltage_PWM_Standby()
def createConfig(self):
def setVlt(fancontrol, fanid, configElement):
fancontrol.setVoltage(fanid, configElement.value)
def setPWM(fancontrol, fanid, configElement):
fancontrol.setPWM(fanid, configElement.value)
config.fans = ConfigSubList()
for fanid in range(self.getFanCount()):
fan = ConfigSubsection()
fan.vlt = ConfigSlider(default = 15, increment = 5, limits = (0, 255))
if getBoxType() == 'tm2t':
fan.pwm = ConfigSlider(default = 150, increment = 5, limits = (0, 255))
if getBoxType() == 'tmsingle':
fan.pwm = ConfigSlider(default = 100, increment = 5, limits = (0, 255))
else:
fan.pwm = ConfigSlider(default = 50, increment = 5, limits = (0, 255))
fan.vlt_standby = ConfigSlider(default = 5, increment = 5, limits = (0, 255))
fan.pwm_standby = ConfigSlider(default = 0, increment = 5, limits = (0, 255))
fan.vlt.addNotifier(boundFunction(setVlt, self, fanid))
fan.pwm.addNotifier(boundFunction(setPWM, self, fanid))
config.fans.append(fan)
def getConfig(self, fanid):
return config.fans[fanid]
def getFanCount(self):
return self.fancount
def hasRPMSensor(self, fanid):
return os.path.exists("/proc/stb/fp/fan_speed")
def hasFanControl(self, fanid):
return os.path.exists("/proc/stb/fp/fan_vlt") or os.path.exists("/proc/stb/fp/fan_pwm")
def getFanSpeed(self, fanid):
return int(open("/proc/stb/fp/fan_speed", "r").readline().strip()[:-4])
def getVoltage(self, fanid):
return int(open("/proc/stb/fp/fan_vlt", "r").readline().strip(), 16)
def setVoltage(self, fanid, value):
if value > 255:
return
open("/proc/stb/fp/fan_vlt", "w").write("%x" % value)
def getPWM(self, fanid):
return int(open("/proc/stb/fp/fan_pwm", "r").readline().strip(), 16)
def setPWM(self, fanid, value):
if value > 255:
return
open("/proc/stb/fp/fan_pwm", "w").write("%x" % value)
fancontrol = FanControl()
| gpl-2.0 |
TeamEOS/external_chromium_org | net/tools/testserver/backoff_server.py | 189 | 2760 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP server for manually testing exponential
back-off functionality in Chrome.
"""
import BaseHTTPServer
import sys
import urlparse
AJAX_TEST_PAGE = '''
<html>
<head>
<script>
function reportResult(txt) {
var element = document.createElement('p');
element.innerHTML = txt;
document.body.appendChild(element);
}
function fetch() {
var response_code = document.getElementById('response_code');
xmlhttp = new XMLHttpRequest();
xmlhttp.open("GET",
"http://%s:%d/%s?code=" + response_code.value,
true);
xmlhttp.onreadystatechange = function() {
reportResult(
'readyState=' + xmlhttp.readyState + ', status=' + xmlhttp.status);
}
try {
xmlhttp.send(null);
} catch (e) {
reportResult('Exception: ' + e);
}
}
</script>
</head>
<body>
<form action="javascript:fetch()">
Response code to get: <input id="response_code" type="text" value="503">
<input type="submit">
</form>
</body>
</html>'''
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
keep_running = True
local_ip = ''
port = 0
def do_GET(self):
if self.path == '/quitquitquit':
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('QUITTING')
RequestHandler.keep_running = False
return
if self.path.startswith('/ajax/'):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(AJAX_TEST_PAGE % (self.local_ip,
self.port,
self.path[6:]))
return
params = urlparse.parse_qs(urlparse.urlparse(self.path).query)
if not params or not 'code' in params or params['code'][0] == '200':
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('OK')
else:
status_code = int(params['code'][0])
self.send_response(status_code)
self.end_headers()
self.wfile.write('Error %d' % int(status_code))
def main():
if len(sys.argv) != 3:
print "Usage: %s LOCAL_IP PORT" % sys.argv[0]
sys.exit(1)
RequestHandler.local_ip = sys.argv[1]
port = int(sys.argv[2])
RequestHandler.port = port
print "To stop the server, go to http://localhost:%d/quitquitquit" % port
httpd = BaseHTTPServer.HTTPServer(('', port), RequestHandler)
while RequestHandler.keep_running:
httpd.handle_request()
if __name__ == '__main__':
main()
| bsd-3-clause |
bogde/xbmc.serialepenet.ro | bs4/builder/_lxml.py | 446 | 8661 | __all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| mit |
valentin-krasontovitsch/ansible | lib/ansible/playbook/base.py | 1 | 25893 | # Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import operator
from copy import copy as shallowcopy
from functools import partial
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.module_utils.six import iteritems, string_types, with_metaclass
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils._text import to_text, to_native
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.parsing.dataloader import DataLoader
from ansible.utils.display import Display
from ansible.utils.sentinel import Sentinel
from ansible.utils.vars import combine_vars, isidentifier, get_unique_id
display = Display()
def _generic_g(prop_name, self):
try:
value = self._attributes[prop_name]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, prop_name))
if value is Sentinel:
value = self._attr_defaults[prop_name]
return value
def _generic_g_method(prop_name, self):
try:
if self._squashed:
return self._attributes[prop_name]
method = "_get_attr_%s" % prop_name
return getattr(self, method)()
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, prop_name))
def _generic_g_parent(prop_name, self):
try:
if self._squashed or self._finalized:
value = self._attributes[prop_name]
else:
try:
value = self._get_parent_attribute(prop_name)
except AttributeError:
value = self._attributes[prop_name]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, prop_name))
if value is Sentinel:
value = self._attr_defaults[prop_name]
return value
def _generic_s(prop_name, self, value):
self._attributes[prop_name] = value
def _generic_d(prop_name, self):
del self._attributes[prop_name]
class BaseMeta(type):
"""
Metaclass for the Base object, which is used to construct the class
attributes based on the FieldAttributes available.
"""
def __new__(cls, name, parents, dct):
def _create_attrs(src_dict, dst_dict):
'''
Helper method which creates the attributes based on those in the
source dictionary of attributes. This also populates the other
attributes used to keep track of these attributes and via the
getter/setter/deleter methods.
'''
keys = list(src_dict.keys())
for attr_name in keys:
value = src_dict[attr_name]
if isinstance(value, Attribute):
if attr_name.startswith('_'):
attr_name = attr_name[1:]
# here we selectively assign the getter based on a few
# things, such as whether we have a _get_attr_<name>
# method, or if the attribute is marked as not inheriting
# its value from a parent object
method = "_get_attr_%s" % attr_name
if method in src_dict or method in dst_dict:
getter = partial(_generic_g_method, attr_name)
elif ('_get_parent_attribute' in dst_dict or '_get_parent_attribute' in src_dict) and value.inherit:
getter = partial(_generic_g_parent, attr_name)
else:
getter = partial(_generic_g, attr_name)
setter = partial(_generic_s, attr_name)
deleter = partial(_generic_d, attr_name)
dst_dict[attr_name] = property(getter, setter, deleter)
dst_dict['_valid_attrs'][attr_name] = value
dst_dict['_attributes'][attr_name] = Sentinel
dst_dict['_attr_defaults'][attr_name] = value.default
if value.alias is not None:
dst_dict[value.alias] = property(getter, setter, deleter)
dst_dict['_valid_attrs'][value.alias] = value
dst_dict['_alias_attrs'][value.alias] = attr_name
def _process_parents(parents, dst_dict):
'''
Helper method which creates attributes from all parent objects
recursively on through grandparent objects
'''
for parent in parents:
if hasattr(parent, '__dict__'):
_create_attrs(parent.__dict__, dst_dict)
new_dst_dict = parent.__dict__.copy()
new_dst_dict.update(dst_dict)
_process_parents(parent.__bases__, new_dst_dict)
# create some additional class attributes
dct['_attributes'] = {}
dct['_attr_defaults'] = {}
dct['_valid_attrs'] = {}
dct['_alias_attrs'] = {}
# now create the attributes based on the FieldAttributes
# available, including from parent (and grandparent) objects
_create_attrs(dct, dct)
_process_parents(parents, dct)
return super(BaseMeta, cls).__new__(cls, name, parents, dct)
class FieldAttributeBase(with_metaclass(BaseMeta, object)):
def __init__(self):
# initialize the data loader and variable manager, which will be provided
# later when the object is actually loaded
self._loader = None
self._variable_manager = None
# other internal params
self._validated = False
self._squashed = False
self._finalized = False
# every object gets a random uuid:
self._uuid = get_unique_id()
# we create a copy of the attributes here due to the fact that
# it was initialized as a class param in the meta class, so we
# need a unique object here (all members contained within are
# unique already).
self._attributes = self.__class__._attributes.copy()
self._attr_defaults = self.__class__._attr_defaults.copy()
for key, value in self._attr_defaults.items():
if callable(value):
self._attr_defaults[key] = value()
# and init vars, avoid using defaults in field declaration as it lives across plays
self.vars = dict()
def dump_me(self, depth=0):
''' this is never called from production code, it is here to be used when debugging as a 'complex print' '''
if depth == 0:
display.debug("DUMPING OBJECT ------------------------------------------------------")
display.debug("%s- %s (%s, id=%s)" % (" " * depth, self.__class__.__name__, self, id(self)))
if hasattr(self, '_parent') and self._parent:
self._parent.dump_me(depth + 2)
dep_chain = self._parent.get_dep_chain()
if dep_chain:
for dep in dep_chain:
dep.dump_me(depth + 2)
if hasattr(self, '_play') and self._play:
self._play.dump_me(depth + 2)
def preprocess_data(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
for base_class in self.__class__.mro():
method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None)
if method:
return method(ds)
return ds
def load_data(self, ds, variable_manager=None, loader=None):
''' walk the input datastructure and assign any values '''
if ds is None:
raise AnsibleAssertionError('ds (%s) should not be None but it is.' % ds)
# cache the datastructure internally
setattr(self, '_ds', ds)
# the variable manager class is used to manage and merge variables
# down to a single dictionary for reference in templating, etc.
self._variable_manager = variable_manager
# the data loader class is used to parse data from strings and files
if loader is not None:
self._loader = loader
else:
self._loader = DataLoader()
# call the preprocess_data() function to massage the data into
# something we can more easily parse, and then call the validation
# function on it to ensure there are no incorrect key values
ds = self.preprocess_data(ds)
self._validate_attributes(ds)
# Walk all attributes in the class. We sort them based on their priority
# so that certain fields can be loaded before others, if they are dependent.
for name, attr in sorted(iteritems(self._valid_attrs), key=operator.itemgetter(1)):
# copy the value over unless a _load_field method is defined
target_name = name
if name in self._alias_attrs:
target_name = self._alias_attrs[name]
if name in ds:
method = getattr(self, '_load_%s' % name, None)
if method:
self._attributes[target_name] = method(name, ds[name])
else:
self._attributes[target_name] = ds[name]
# run early, non-critical validation
self.validate()
# return the constructed object
return self
def get_ds(self):
try:
return getattr(self, '_ds')
except AttributeError:
return None
def get_loader(self):
return self._loader
def get_variable_manager(self):
return self._variable_manager
def _validate_debugger(self, attr, name, value):
valid_values = frozenset(('always', 'on_failed', 'on_unreachable', 'on_skipped', 'never'))
if value and isinstance(value, string_types) and value not in valid_values:
raise AnsibleParserError("'%s' is not a valid value for debugger. Must be one of %s" % (value, ', '.join(valid_values)), obj=self.get_ds())
return value
def _validate_attributes(self, ds):
'''
Ensures that there are no keys in the datastructure which do
not map to attributes for this object.
'''
valid_attrs = frozenset(self._valid_attrs.keys())
for key in ds:
if key not in valid_attrs:
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
def validate(self, all_vars=None):
''' validation that is done at parse time, not load time '''
all_vars = {} if all_vars is None else all_vars
if not self._validated:
# walk all fields in the object
for (name, attribute) in iteritems(self._valid_attrs):
if name in self._alias_attrs:
name = self._alias_attrs[name]
# run validator only if present
method = getattr(self, '_validate_%s' % name, None)
if method:
method(attribute, name, getattr(self, name))
else:
# and make sure the attribute is of the type it should be
value = self._attributes[name]
if value is not None:
if attribute.isa == 'string' and isinstance(value, (list, dict)):
raise AnsibleParserError(
"The field '%s' is supposed to be a string type,"
" however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds()
)
self._validated = True
def squash(self):
'''
Evaluates all attributes and sets them to the evaluated version,
so that all future accesses of attributes do not need to evaluate
parent attributes.
'''
if not self._squashed:
for name in self._valid_attrs.keys():
self._attributes[name] = getattr(self, name)
self._squashed = True
def copy(self):
'''
Create a copy of this object and return it.
'''
new_me = self.__class__()
for name in self._valid_attrs.keys():
if name in self._alias_attrs:
continue
new_me._attributes[name] = shallowcopy(self._attributes[name])
new_me._attr_defaults[name] = shallowcopy(self._attr_defaults[name])
new_me._loader = self._loader
new_me._variable_manager = self._variable_manager
new_me._validated = self._validated
new_me._finalized = self._finalized
new_me._uuid = self._uuid
# if the ds value was set on the object, copy it to the new copy too
if hasattr(self, '_ds'):
new_me._ds = self._ds
return new_me
def post_validate(self, templar):
'''
we can't tell that everything is of the right type until we have
all the variables. Run basic types (from isa) as well as
any _post_validate_<foo> functions.
'''
# save the omit value for later checking
omit_value = templar._available_variables.get('omit')
for (name, attribute) in iteritems(self._valid_attrs):
if attribute.static:
value = getattr(self, name)
if templar.is_template(value):
display.warning('"%s" is not templatable, but we found: %s, '
'it will not be templated and will be used "as is".' % (name, value))
continue
if getattr(self, name) is None:
if not attribute.required:
continue
else:
raise AnsibleParserError("the field '%s' is required but was not set" % name)
elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
# Intermediate objects like Play() won't have their fields validated by
# default, as their values are often inherited by other objects and validated
# later, so we don't want them to fail out early
continue
try:
# Run the post-validator if present. These methods are responsible for
# using the given templar to template the values, if required.
method = getattr(self, '_post_validate_%s' % name, None)
if method:
value = method(attribute, getattr(self, name), templar)
elif attribute.isa == 'class':
value = getattr(self, name)
else:
# if the attribute contains a variable, template it now
value = templar.template(getattr(self, name))
# if this evaluated to the omit value, set the value back to
# the default specified in the FieldAttribute and move on
if omit_value is not None and value == omit_value:
if callable(attribute.default):
setattr(self, name, attribute.default())
else:
setattr(self, name, attribute.default)
continue
# and make sure the attribute is of the type it should be
if value is not None:
if attribute.isa == 'string':
value = to_text(value)
elif attribute.isa == 'int':
value = int(value)
elif attribute.isa == 'float':
value = float(value)
elif attribute.isa == 'bool':
value = boolean(value, strict=False)
elif attribute.isa == 'percent':
# special value, which may be an integer or float
# with an optional '%' at the end
if isinstance(value, string_types) and '%' in value:
value = value.replace('%', '')
value = float(value)
elif attribute.isa == 'list':
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
if attribute.listof is not None:
for item in value:
if not isinstance(item, attribute.listof):
raise AnsibleParserError("the field '%s' should be a list of %s, "
"but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
elif attribute.required and attribute.listof == string_types:
if item is None or item.strip() == "":
raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
elif attribute.isa == 'set':
if value is None:
value = set()
elif not isinstance(value, (list, set)):
if isinstance(value, string_types):
value = value.split(',')
else:
# Making a list like this handles strings of
# text and bytes properly
value = [value]
if not isinstance(value, set):
value = set(value)
elif attribute.isa == 'dict':
if value is None:
value = dict()
elif not isinstance(value, dict):
raise TypeError("%s is not a dictionary" % value)
elif attribute.isa == 'class':
if not isinstance(value, attribute.class_type):
raise TypeError("%s is not a valid %s (got a %s instead)" % (name, attribute.class_type, type(value)))
value.post_validate(templar=templar)
# and assign the massaged value back to the attribute field
setattr(self, name, value)
except (TypeError, ValueError) as e:
value = getattr(self, name)
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s."
"The error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds(), orig_exc=e)
except (AnsibleUndefinedVariable, UndefinedError) as e:
if templar._fail_on_undefined_errors and name != 'name':
if name == 'args':
msg = "The task includes an option with an undefined variable. The error was: %s" % (to_native(e))
else:
msg = "The field '%s' has an invalid value, which includes an undefined variable. The error was: %s" % (name, to_native(e))
raise AnsibleParserError(msg, obj=self.get_ds(), orig_exc=e)
self._finalized = True
def _load_vars(self, attr, ds):
'''
Vars in a play can be specified either as a dictionary directly, or
as a list of dictionaries. If the later, this method will turn the
list into a single dictionary.
'''
def _validate_variable_keys(ds):
for key in ds:
if not isidentifier(key):
raise TypeError("'%s' is not a valid variable name" % key)
try:
if isinstance(ds, dict):
_validate_variable_keys(ds)
return combine_vars(self.vars, ds)
elif isinstance(ds, list):
all_vars = self.vars
for item in ds:
if not isinstance(item, dict):
raise ValueError
_validate_variable_keys(item)
all_vars = combine_vars(all_vars, item)
return all_vars
elif ds is None:
return {}
else:
raise ValueError
except ValueError as e:
raise AnsibleParserError("Vars in a %s must be specified as a dictionary, or a list of dictionaries" % self.__class__.__name__,
obj=ds, orig_exc=e)
except TypeError as e:
raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds, orig_exc=e)
def _extend_value(self, value, new_value, prepend=False):
'''
Will extend the value given with new_value (and will turn both
into lists if they are not so already). The values are run through
a set to remove duplicate values.
'''
if not isinstance(value, list):
value = [value]
if not isinstance(new_value, list):
new_value = [new_value]
# Due to where _extend_value may run for some attributes
# it is possible to end up with Sentinel in the list of values
# ensure we strip them
value[:] = [v for v in value if v is not Sentinel]
new_value[:] = [v for v in new_value if v is not Sentinel]
if prepend:
combined = new_value + value
else:
combined = value + new_value
return [i for i, _ in itertools.groupby(combined) if i is not None]
def dump_attrs(self):
'''
Dumps all attributes to a dictionary
'''
attrs = dict()
for (name, attribute) in iteritems(self._valid_attrs):
attr = getattr(self, name)
if attribute.isa == 'class' and attr is not None and hasattr(attr, 'serialize'):
attrs[name] = attr.serialize()
else:
attrs[name] = attr
return attrs
def from_attrs(self, attrs):
'''
Loads attributes from a dictionary
'''
for (attr, value) in iteritems(attrs):
if attr in self._valid_attrs:
attribute = self._valid_attrs[attr]
if attribute.isa == 'class' and isinstance(value, dict):
obj = attribute.class_type()
obj.deserialize(value)
setattr(self, attr, obj)
else:
setattr(self, attr, value)
def serialize(self):
'''
Serializes the object derived from the base object into
a dictionary of values. This only serializes the field
attributes for the object, so this may need to be overridden
for any classes which wish to add additional items not stored
as field attributes.
'''
repr = self.dump_attrs()
# serialize the uuid field
repr['uuid'] = self._uuid
repr['finalized'] = self._finalized
repr['squashed'] = self._squashed
return repr
def deserialize(self, data):
'''
Given a dictionary of values, load up the field attributes for
this object. As with serialize(), if there are any non-field
attribute data members, this method will need to be overridden
and extended.
'''
if not isinstance(data, dict):
raise AnsibleAssertionError('data (%s) should be a dict but is a %s' % (data, type(data)))
for (name, attribute) in iteritems(self._valid_attrs):
if name in data:
setattr(self, name, data[name])
else:
if callable(attribute.default):
setattr(self, name, attribute.default())
else:
setattr(self, name, attribute.default)
# restore the UUID field
setattr(self, '_uuid', data.get('uuid'))
self._finalized = data.get('finalized', False)
self._squashed = data.get('squashed', False)
class Base(FieldAttributeBase):
_name = FieldAttribute(isa='string', default='', always_post_validate=True, inherit=False)
# connection/transport
_connection = FieldAttribute(isa='string')
_port = FieldAttribute(isa='int')
_remote_user = FieldAttribute(isa='string')
# variables
_vars = FieldAttribute(isa='dict', priority=100, inherit=False)
# module default params
_module_defaults = FieldAttribute(isa='list', extend=True, prepend=True)
# flags and misc. settings
_environment = FieldAttribute(isa='list', extend=True, prepend=True)
_no_log = FieldAttribute(isa='bool')
_run_once = FieldAttribute(isa='bool')
_ignore_errors = FieldAttribute(isa='bool')
_ignore_unreachable = FieldAttribute(isa='bool')
_check_mode = FieldAttribute(isa='bool')
_diff = FieldAttribute(isa='bool')
_any_errors_fatal = FieldAttribute(isa='bool', default=C.ANY_ERRORS_FATAL)
# explicitly invoke a debugger on tasks
_debugger = FieldAttribute(isa='string')
# param names which have been deprecated/removed
DEPRECATED_ATTRIBUTES = [
'sudo', 'sudo_user', 'sudo_pass', 'sudo_exe', 'sudo_flags',
'su', 'su_user', 'su_pass', 'su_exe', 'su_flags',
]
| gpl-3.0 |
gentunian/tellapic | src/client/python/pyqt/main_test.py | 2 | 14074 | from PyQt4 import QtCore, QtGui
#from waiting import Ui_Waiting
from main import Ui_Main
import pytellapic
import select
import time
import Queue
import threading
import signal
import os
class MainTest(QtGui.QDialog):
ctl = { pytellapic.CTL_CL_FILEASK : 'CTL_CL_FILEASK',
pytellapic.CTL_CL_FILEOK : 'CTL_CL_FILEOK',
pytellapic.CTL_SV_PWDFAIL : 'CTL_SV_PWDFAIL',
pytellapic.CTL_SV_PWDOK : 'CTL_SV_PWDOK',
pytellapic.CTL_SV_PWDASK : 'CTL_SV_PWDASK',
pytellapic.CTL_SV_CLIST : 'CTL_SV_CLIST',
pytellapic.CTL_SV_CLRM : 'CTL_SV_CLRM',
pytellapic.CTL_SV_ID : 'CTL_SV_ID0',
pytellapic.CTL_SV_NAMEINUSE : 'CTL_SV_NAMEINUSE',
pytellapic.CTL_SV_AUTHOK : 'CTL_SV_AUTHOK'
}
ctli = { pytellapic.CTL_SV_FILE : 'CTL_SV_FILE',
pytellapic.CTL_CL_PWD : 'CTL_CL_PWD',
pytellapic.CTL_CL_NAME : 'CTL_CL_NAME',
pytellapic.CTL_SV_CLADD : 'CTL_SV_CLADD'
}
ctlchat = { pytellapic.CTL_CL_BMSG : 'CTL_CL_BMSG',
pytellapic.CTL_CL_PMSG : 'CTL_CL_PMSG'
}
ctldrawing = { pytellapic.CTL_CL_FIG : 'CTL_CL_FIG',
pytellapic.CTL_CL_DRW : 'CTL_CL_DRW'
}
cbyte = {pytellapic.CTL_CL_BMSG : 'CTL_CL_BMSG',
pytellapic.CTL_CL_PMSG : 'CTL_CL_PMSG',
pytellapic.CTL_CL_FIG : 'CTL_CL_FIG',
pytellapic.CTL_CL_DRW : 'CTL_CL_DRW',
pytellapic.CTL_CL_CLIST: 'CTL_CL_CLIST',
pytellapic.CTL_CL_PWD: 'CTL_CL_PWD',
pytellapic.CTL_CL_FILEASK: 'CTL_CL_FILEASK',
pytellapic.CTL_CL_FILEOK: 'CTL_CL_FILEOK',
pytellapic.CTL_CL_DISC: 'CTL_CL_DISC',
pytellapic.CTL_CL_NAME:'CTL_CL_NAME',
pytellapic.CTL_SV_CLRM:'CTL_SV_CLRM',
pytellapic.CTL_SV_CLADD:'CTL_SV_CLADD',
pytellapic.CTL_SV_CLIST:'CTL_SV_CLIST',
pytellapic.CTL_SV_PWDASK:'CTL_SV_PWDASK',
pytellapic.CTL_SV_PWDOK:'CTL_SV_PWDOK',
pytellapic.CTL_SV_PWDFAIL:'CTL_SV_PWDFAIL',
pytellapic.CTL_SV_FILE:'CTL_SV_FILE',
pytellapic.CTL_SV_ID:'CTL_SV_ID',
pytellapic.CTL_SV_NAMEINUSE: 'CTL_SV_NAMEINUSE',
pytellapic.CTL_SV_AUTHOK: 'CTL_SV_AUTHOK',
pytellapic.CTL_FAIL : 'CTL_FAIL'}
def __init__(self, queue, endcommand, fd):
QtGui.QDialog.__init__(self)
# Set up the user interface from Designer.
self.fd = fd
self.id = fd
self.ui = Ui_Main()
self.ui.setupUi(self)
self.setModal(False)
self.queue = queue
self.endcommand = endcommand
self.ui.receiveButton.setEnabled(False)
# Make some local modifications.
#self.ui.colorDepthCombo.addItem("2 colors (1 bit per pixel)")
# Connect up the buttons.
#self.connect(self.ui.exitButton, QtCore.SIGNAL("clicked()"), self, QtCore.SLOT("reject()"))
@QtCore.pyqtSlot()
def on_sendButton_clicked(self):
for value, name in self.cbyte.iteritems():
if name == self.ui.headerCByte.currentText():
break
if value in self.ctl:
pytellapic.tellapic_send_ctl(self.fd, int(self.ui.svcontrolIdFrom.text()), value)
elif value in self.ctli:
pytellapic.tellapic_send_ctle(self.fd, int(self.ui.svcontrolIdFrom.text()), value, self.ui.svcontrolInfo.toPlainText().length(), str(self.ui.svcontrolInfo.toPlainText()))
elif value in self.ctlchat:
try:
idto = int(self.ui.chatIdTo.text())
pytellapic.tellapic_send_chatp(self.fd, int(self.ui.chatIdFrom.text()), idto, self.ui.chatText.toPlainText().size(), str(self.ui.chatText.toPlainText()))
except:
idto = 0
pytellapic.tellapic_send_chatb(self.fd, int(self.ui.chatIdFrom.text()), self.ui.chatText.toPlainText().size(), str(self.ui.chatText.toPlainText()))
elif value in self.ctldrawing:
if value == pytellapic.CTL_CL_FIG:
stream = pytellapic.stream_t()
stream.header.endian = 0
stream.header.cbyte = value
stream.header.ssize = pytellapic.FIG_STREAM_SIZE
stream.data.drawing.idfrom = self.id
stream.data.drawing.dcbyte = int(self.ui.drawingDCByte.text())
stream.data.drawing.dnumber = int(self.ui.drawingNumber.text())
stream.data.drawing.width = float(self.ui.drawingWidth.text())
stream.data.drawing.opacity = float(self.ui.drawingOpacity.text())
stream.data.drawing.color.red = 255
stream.data.drawing.color.green = 125
stream.data.drawing.color.blue = 1
stream.data.drawing.point1.x = int(self.ui.drawingXCoordinate.text())
stream.data.drawing.point1.y = int(self.ui.drawingYCoordinate.text())
stream.data.drawing.type.figure.endcaps = int(self.ui.figureEndCaps.text())
stream.data.drawing.type.figure.linejoin = int(self.ui.figureLineJoin.text())
stream.data.drawing.type.figure.miterlimit = float(self.ui.figureMiterLimit.text())
stream.data.drawing.type.figure.dash_phase = float(self.ui.figureDashPhase.text())
stream.data.drawing.type.figure.point2.x = int(self.ui.figureEndXCoordinate.text())
stream.data.drawing.type.figure.point2.y = int(self.ui.figureEndYCoordinate.text())
pytellapic.tellapic_send(self.fd, stream)
else:
pass
@QtCore.pyqtSlot()
def on_exitButton_clicked(self):
print("exiting gui...")
pytellapic.tellapic_close_fd(self.fd)
self.endcommand()
@QtCore.pyqtSlot()
def on_receiveButton_clicked(self):
if self.fd == 0:
self.fd = pytellapic.tellapic_connect_to("arg1v1.dyndns.org", 4455)
if self.fd <= 0:
return 0
stream = pytellapic.tellapic_read_stream_b(self.fd);
self.updateUi(stream)
@QtCore.pyqtSlot(int)
def on_tabWidget_currentChanged(self, i):
if i == 0:
self.ui.headerCByte.setCurrentIndex(16)
elif i == 1:
self.ui.headerCByte.setCurrentIndex(0)
else:
self.ui.headerCByte.setCurrentIndex(2)
@QtCore.pyqtSlot(int)
def on_recieveTabWidget_currentChanged(self, i):
if i == 0:
self.ui.receiveHeaderCByte.setCurrentIndex(16)
elif i == 1:
self.ui.receiveHeaderCByte.setCurrentIndex(0)
else:
self.ui.receiveHeaderCByte.setCurrentIndex(2)
@QtCore.pyqtSlot(int)
def on_headerCByte_activated(self, i):
if i >= 0 and i <= 1:
self.ui.tabWidget.setEnabled(True)
self.ui.tabWidget.setCurrentIndex(1)
if i == 0:
self.ui.chatIdTo.setEnabled(False)
else:
self.ui.chatIdTo.setEnabled(True)
elif i >= 2 and i <= 3 :
self.ui.tabWidget.setEnabled(True)
self.ui.tabWidget.setCurrentIndex(2)
elif i > 3 and i < 19:
self.ui.tabWidget.setEnabled(True)
self.ui.tabWidget.setCurrentIndex(0)
else:
self.ui.tabWidget.setEnabled(False)
@QtCore.pyqtSlot(int)
def on_receiveHeaderCByte_activated(self, i):
if i >= 0 and i <= 1:
self.ui.receiveTabWidget.setCurrentIndex(1)
elif i >= 2 and i <= 3 :
self.ui.receiveTabWidget.setCurrentIndex(2)
else:
self.ui.receiveTabWidget.setCurrentIndex(0)
def updateUi(self):
#print("updateUi()")
while self.queue.qsize():
try:
msg = self.queue.get(0)
print("trying read...")
stream = pytellapic.tellapic_read_stream_b(self.fd)
print("stream cbyte read: ", stream.header.cbyte, "size: ", stream.header.ssize)
self.ui.receiveHeaderEndian.setChecked(False)
self.ui.receiveHeaderSSize.setText(str(stream.header.ssize))
self.ui.receiveHeaderCByte.setCurrentIndex(self.ui.receiveHeaderCByte.findText(self.cbyte[stream.header.cbyte]))
self.on_receiveHeaderCByte_activated(self.ui.receiveHeaderCByte.currentIndex())
if stream.header.cbyte in self.ctl:
self.ui.receiveSvcontrolIdFrom.setText(str(stream.data.control.idfrom))
elif stream.header.cbyte in self.ctli:
self.ui.receiveSvcontrolIdFrom.setText(str(stream.data.control.idfrom))
self.ui.receiveSvcontrolInfo.appendPlainText(str(stream.data.control.info))
elif stream.header.cbyte in self.ctlchat:
self.ui.receiveChatIdFrom.setText(str(stream.data.chat.idfrom))
if stream.header.cbyte == pytellapic.CTL_CL_BMSG:
self.ui.receiveChatText.appendPlainText(str(stream.data.chat.type.broadmsg))
else:
self.ui.receiveChatIdTo.setText(str(stream.data.chat.type.private.idto))
self.ui.receiveChatText.appendPlaintText(str(stream.data.chat.type.privmsg.text))
elif stream.header.cbyte in self.ctldrawing:
if stream.header.cbyte == pytellapic.CTL_CL_FIG:
self.ui.receiveDrawingDCByte.setText(str(stream.data.drawing.dcbyte))
self.ui.receiveDrawingNumber.setText(str(stream.data.drawing.number))
self.ui.receiveDrawingWidth.setText(str(stream.data.drawing.width))
self.ui.receiveDrawingOpacity.setText(str(stream.data.drawing.opacity))
self.ui.receiveDrawingColor.setText(str(stream.data.drawing.color.red) + str(stream.data.drawing.color.green) + str(stream.data.drawing.color.blue))
self.ui.receiveDrawingXCoordinate.setText(str(stream.data.drawing.point1.x))
self.ui.receiveDrawingYCoordinate.setText(str(stream.data.drawing.point1.y))
self.ui.receiveFigureEndCaps.setText(str(stream.data.drawing.type.figure.endcaps))
self.ui.receiveFigureLineJoin.setText(str(stream.data.drawing.type.figure.linejoin))
self.ui.receiveFigureMiterLimit.setText(str(stream.data.drawing.type.figure.miterlimit))
self.ui.receiveFigureDashPhase.setText(str(stream.data.drawing.type.figure.dash_phase))
self.ui.receiveFigureEndXCoordinate.setText(str(stream.data.drawing.type.figure.point2.x))
self.ui.receiveFigureEndYCoordinate.setText(str(stream.data.drawing.type.figure.point2.y))
else:
pass
self.queue.task_done()
except Queue.Empty:
pass
# stolen from: http://www.informit.com/articles/article.aspx?p=30708&seqNum=3
class ThreadClient:
def __init__(self, host, port):
# Create the queue
self.queue = Queue.Queue()
self.fd = pytellapic.tellapic_connect_to(host, port)
# Set up the GUI part
self.gui = MainTest(self.queue, self.endApplication, self.fd)
self.gui.show()
# A timer to periodically call periodicCall :-)
self.timer = QtCore.QTimer()
QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout()"), self.periodicCall)
# Start the timer -- this replaces the initial call
# to periodicCall
self.timer.start(100)
# Set up the thread to do asynchronous I/O
# More can be made if necessary
self.running = 1
self.thread1 = threading.Thread(target=self.workerThread1)
self.thread1.start()
def periodicCall(self):
"""
Check every 100 ms if there is something new in the queue.
"""
self.gui.updateUi()
if not self.running:
root.quit()
def endApplication(self):
while self.queue.qsize():
self.queue.get(0)
self.queue.task_done()
self.running = 0
pytellapic.tellapic_close_fd(self.fd)
print("ending thread")
os.kill(os.getpid(), signal.SIGTERM)
def workerThread1(self):
"""
This is where we handle the asynchronous I/O. For example,
it may be a 'select()'.
One important thing to remember is that the thread has to
yield control.
"""
while self.running:
# To simulate asynchronous I/O, we create a random number
# at random intervals. Replace the following 2 lines
# with the real thing.
try:
print("waiting on select")
r, w, e = select.select([self.fd], [], [])
self.queue.put(1)
self.queue.join()
except select.error, v:
print("error")
raise
break
def usage():
print("python2 main_test.py -h <hostname> -p <port>")
print("or")
print("python2 main_test.py --host=<hostname> --port=<port>")
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(description='Connects to a tellapic server')
parser.add_argument('-c', '--host', required=True, nargs=1, type=str, help='the host name to connect to.')
parser.add_argument('-p', '--port', required=True, nargs=1, type=int, help='the HOST port to use.')
args = parser.parse_args()
root = QtGui.QApplication(sys.argv)
client = ThreadClient(args.host[0], args.port[0])
sys.exit(root.exec_())
| gpl-3.0 |
partofthething/home-assistant | homeassistant/components/deconz/switch.py | 7 | 2449 | """Support for deCONZ switches."""
from homeassistant.components.switch import DOMAIN, SwitchEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import NEW_LIGHT, POWER_PLUGS, SIRENS
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches for deCONZ component.
Switches are based on the same device class as lights in deCONZ.
"""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_switch(lights=gateway.api.lights.values()):
"""Add switch from deCONZ."""
entities = []
for light in lights:
if (
light.type in POWER_PLUGS
and light.uniqueid not in gateway.entities[DOMAIN]
):
entities.append(DeconzPowerPlug(light, gateway))
elif (
light.type in SIRENS and light.uniqueid not in gateway.entities[DOMAIN]
):
entities.append(DeconzSiren(light, gateway))
if entities:
async_add_entities(entities)
gateway.listeners.append(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_LIGHT), async_add_switch
)
)
async_add_switch()
class DeconzPowerPlug(DeconzDevice, SwitchEntity):
"""Representation of a deCONZ power plug."""
TYPE = DOMAIN
@property
def is_on(self):
"""Return true if switch is on."""
return self._device.state
async def async_turn_on(self, **kwargs):
"""Turn on switch."""
data = {"on": True}
await self._device.async_set_state(data)
async def async_turn_off(self, **kwargs):
"""Turn off switch."""
data = {"on": False}
await self._device.async_set_state(data)
class DeconzSiren(DeconzDevice, SwitchEntity):
"""Representation of a deCONZ siren."""
TYPE = DOMAIN
@property
def is_on(self):
"""Return true if switch is on."""
return self._device.is_on
async def async_turn_on(self, **kwargs):
"""Turn on switch."""
await self._device.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn off switch."""
await self._device.turn_off()
| mit |
pu239ppy/carbon | lib/carbon/tests/test_cache.py | 5 | 9322 | from unittest import TestCase
from mock import Mock, PropertyMock, patch
from carbon.cache import _MetricCache, DrainStrategy, MaxStrategy, RandomStrategy, SortedStrategy
class MetricCacheTest(TestCase):
def setUp(self):
settings = {
'MAX_CACHE_SIZE': float('inf'),
'CACHE_SIZE_LOW_WATERMARK': float('inf')
}
self._settings_patch = patch.dict('carbon.conf.settings', settings)
self._settings_patch.start()
self.strategy_mock = Mock(spec=DrainStrategy)
self.metric_cache = _MetricCache(self.strategy_mock)
def tearDown(self):
self._settings_patch.stop()
def test_cache_is_a_dict(self):
self.assertTrue(issubclass(_MetricCache, dict))
def test_initial_size(self):
self.assertEqual(0, self.metric_cache.size)
def test_store_new_metric(self):
self.metric_cache.store('foo', (123456, 1.0))
self.assertEqual(1, self.metric_cache.size)
self.assertEqual([(123456, 1.0)], self.metric_cache['foo'].items())
def test_store_multiple_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.assertEqual(2, self.metric_cache.size)
result = self.metric_cache['foo'].items()
self.assertTrue((123456, 1.0) in result)
self.assertTrue((123457, 2.0) in result)
def test_store_duplicate_timestamp(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123456, 2.0))
self.assertEqual(1, self.metric_cache.size)
self.assertEqual([(123456, 2.0)], self.metric_cache['foo'].items())
def test_store_checks_fullness(self):
is_full_mock = PropertyMock()
with patch.object(_MetricCache, 'is_full', is_full_mock):
with patch('carbon.cache.events'):
metric_cache = _MetricCache()
metric_cache.store('foo', (123456, 1.0))
self.assertEqual(1, is_full_mock.call_count)
def test_store_on_full_triggers_events(self):
is_full_mock = PropertyMock(return_value=True)
with patch.object(_MetricCache, 'is_full', is_full_mock):
with patch('carbon.cache.events') as events_mock:
self.metric_cache.store('foo', (123456, 1.0))
events_mock.cacheFull.assert_called_with()
def test_pop_multiple_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
result = self.metric_cache.pop('foo')
self.assertTrue((123456, 1.0) in result)
self.assertTrue((123457, 2.0) in result)
def test_pop_reduces_size(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.pop('foo')
self.assertEqual(0, self.metric_cache.size)
def test_pop_triggers_space_check(self):
with patch.object(self.metric_cache, '_check_available_space') as check_space_mock:
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.pop('foo')
self.assertEqual(1, check_space_mock.call_count)
def test_pop_returns_sorted_timestamps(self):
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('foo', (123456, 1.0))
result = self.metric_cache.pop('foo')
expected = [(123456, 1.0), (123457, 2.0), (123458, 3.0)]
self.assertEqual(expected, result)
def test_pop_raises_on_missing(self):
self.assertRaises(KeyError, self.metric_cache.pop, 'foo')
def test_get_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.assertEqual([(123456, 1.0)], self.metric_cache.get_datapoints('foo'))
def test_get_datapoints_doesnt_pop(self):
self.metric_cache.store('foo', (123456, 1.0))
self.assertEqual([(123456, 1.0)], self.metric_cache.get_datapoints('foo'))
self.assertEqual(1, self.metric_cache.size)
self.assertEqual([(123456, 1.0)], self.metric_cache.get_datapoints('foo'))
def test_get_datapoints_returns_empty_on_missing(self):
self.assertEqual([], self.metric_cache.get_datapoints('foo'))
def test_get_datapoints_returns_sorted_timestamps(self):
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('foo', (123456, 1.0))
result = self.metric_cache.get_datapoints('foo')
expected = [(123456, 1.0), (123457, 2.0), (123458, 3.0)]
self.assertEqual(expected, result)
def test_drain_metric_respects_strategy(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('bar', (123456, 1.0))
self.metric_cache.store('baz', (123456, 1.0))
self.strategy_mock.return_value.choose_item.side_effect = ['bar', 'baz', 'foo']
self.assertEqual('bar', self.metric_cache.drain_metric()[0])
self.assertEqual('baz', self.metric_cache.drain_metric()[0])
self.assertEqual('foo', self.metric_cache.drain_metric()[0])
def test_drain_metric_works_without_strategy(self):
metric_cache = _MetricCache() # No strategy
metric_cache.store('foo', (123456, 1.0))
self.assertEqual('foo', metric_cache.drain_metric()[0])
def test_is_full_short_circuits_on_inf(self):
with patch.object(self.metric_cache, 'size') as size_mock:
self.metric_cache.is_full
size_mock.assert_not_called()
def test_is_full(self):
self._settings_patch.values['MAX_CACHE_SIZE'] = 2.0
self._settings_patch.start()
with patch('carbon.cache.events'):
self.assertFalse(self.metric_cache.is_full)
self.metric_cache.store('foo', (123456, 1.0))
self.assertFalse(self.metric_cache.is_full)
self.metric_cache.store('foo', (123457, 1.0))
self.assertTrue(self.metric_cache.is_full)
def test_counts_one_datapoint(self):
self.metric_cache.store('foo', (123456, 1.0))
self.assertEqual([('foo', 1)], self.metric_cache.counts)
def test_counts_two_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.assertEqual([('foo', 2)], self.metric_cache.counts)
def test_counts_multiple_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('bar', (123458, 3.0))
self.assertTrue(('foo', 2) in self.metric_cache.counts)
self.assertTrue(('bar', 1) in self.metric_cache.counts)
class DrainStrategyTest(TestCase):
def setUp(self):
self.metric_cache = _MetricCache()
def test_max_strategy(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('bar', (123459, 4.0))
self.metric_cache.store('bar', (123460, 5.0))
self.metric_cache.store('baz', (123461, 6.0))
max_strategy = MaxStrategy(self.metric_cache)
# foo has 3
self.assertEqual('foo', max_strategy.choose_item())
# add 2 more 'bar' for 4 total
self.metric_cache.store('bar', (123462, 8.0))
self.metric_cache.store('bar', (123463, 9.0))
self.assertEqual('bar', max_strategy.choose_item())
self.metric_cache.pop('foo')
self.metric_cache.pop('bar')
self.assertEqual('baz', max_strategy.choose_item())
def test_sorted_strategy_static_cache(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('bar', (123459, 4.0))
self.metric_cache.store('bar', (123460, 5.0))
self.metric_cache.store('baz', (123461, 6.0))
sorted_strategy = SortedStrategy(self.metric_cache)
# In order from most to least
self.assertEqual('foo', sorted_strategy.choose_item())
self.assertEqual('bar', sorted_strategy.choose_item())
self.assertEqual('baz', sorted_strategy.choose_item())
def test_sorted_strategy_changing_sizes(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('bar', (123459, 4.0))
self.metric_cache.store('bar', (123460, 5.0))
self.metric_cache.store('baz', (123461, 6.0))
sorted_strategy = SortedStrategy(self.metric_cache)
# In order from most to least foo, bar, baz
self.assertEqual('foo', sorted_strategy.choose_item())
# 'baz' gets 2 more, now greater than 'bar'
self.metric_cache.store('baz', (123461, 6.0))
self.metric_cache.store('baz', (123461, 6.0))
# But 'bar' is popped anyway, because sort has already happened
self.assertEqual('bar', sorted_strategy.choose_item())
self.assertEqual('baz', sorted_strategy.choose_item())
# Sort happens again
self.assertEqual('foo', sorted_strategy.choose_item())
self.assertEqual('bar', sorted_strategy.choose_item())
self.assertEqual('baz', sorted_strategy.choose_item())
class RandomStrategyTest(TestCase):
def setUp(self):
self.metric_cache = _MetricCache()
def test_random_strategy(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('bar', (123457, 2.0))
self.metric_cache.store('baz', (123458, 3.0))
strategy = RandomStrategy(self.metric_cache)
for _i in range(3):
item = strategy.choose_item()
self.assertTrue(item in self.metric_cache)
self.metric_cache.pop(item)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.