code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from __future__ import unicode_literals
from django.apps import AppConfig
class ProfileConfig(AppConfig):
name = "profiles"
verbose_name = 'User Profiles'
def ready(self):
from . import signals # noqa
|
ramaseshan/symptomchecker
|
symptomcheck/src/profiles/apps.py
|
Python
|
gpl-2.0
| 226
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import sys
import unittest
import torch
from fairseq.distributed import utils as dist_utils
from .utils import objects_are_equal, spawn_and_init
class DistributedTest(unittest.TestCase):
def setUp(self):
if not torch.cuda.is_available():
raise unittest.SkipTest("CUDA not available, skipping test")
if sys.platform == "win32":
raise unittest.SkipTest("NCCL doesn't support Windows, skipping test")
if torch.cuda.device_count() < 2:
raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping")
class TestBroadcastObject(DistributedTest):
def test_str(self):
spawn_and_init(
functools.partial(
TestBroadcastObject._test_broadcast_object, "hello world"
),
world_size=2,
)
def test_tensor(self):
spawn_and_init(
functools.partial(
TestBroadcastObject._test_broadcast_object,
torch.rand(5),
),
world_size=2,
)
def test_complex(self):
spawn_and_init(
functools.partial(
TestBroadcastObject._test_broadcast_object,
{
"a": "1",
"b": [2, torch.rand(2, 3), 3],
"c": (torch.rand(2, 3), 4),
"d": {5, torch.rand(5)},
"e": torch.rand(5),
"f": torch.rand(5).int().cuda(),
},
),
world_size=2,
)
@staticmethod
def _test_broadcast_object(ref_obj, rank, group):
obj = dist_utils.broadcast_object(
ref_obj if rank == 0 else None, src_rank=0, group=group
)
assert objects_are_equal(ref_obj, obj)
class TestAllGatherList(DistributedTest):
def test_str_equality(self):
spawn_and_init(
functools.partial(
TestAllGatherList._test_all_gather_list_equality,
"hello world",
),
world_size=2,
)
def test_tensor_equality(self):
spawn_and_init(
functools.partial(
TestAllGatherList._test_all_gather_list_equality,
torch.rand(5),
),
world_size=2,
)
def test_complex_equality(self):
spawn_and_init(
functools.partial(
TestAllGatherList._test_all_gather_list_equality,
{
"a": "1",
"b": [2, torch.rand(2, 3), 3],
"c": (torch.rand(2, 3), 4),
"d": {5, torch.rand(5)},
"e": torch.rand(5),
"f": torch.rand(5).int(),
},
),
world_size=2,
)
@staticmethod
def _test_all_gather_list_equality(ref_obj, rank, group):
objs = dist_utils.all_gather_list(ref_obj, group)
for obj in objs:
assert objects_are_equal(ref_obj, obj)
def test_rank_tensor(self):
spawn_and_init(
TestAllGatherList._test_all_gather_list_rank_tensor, world_size=2
)
@staticmethod
def _test_all_gather_list_rank_tensor(rank, group):
obj = torch.tensor([rank])
objs = dist_utils.all_gather_list(obj, group)
for i, obj in enumerate(objs):
assert obj.item() == i
if __name__ == "__main__":
unittest.main()
|
pytorch/fairseq
|
tests/distributed/test_utils.py
|
Python
|
mit
| 3,656
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class NewRelicMetricFilters(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'app_name': 'str',
'metric_filter_regex': 'str'
}
attribute_map = {
'app_name': 'appName',
'metric_filter_regex': 'metricFilterRegex'
}
def __init__(self, app_name=None, metric_filter_regex=None, _configuration=None): # noqa: E501
"""NewRelicMetricFilters - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._app_name = None
self._metric_filter_regex = None
self.discriminator = None
if app_name is not None:
self.app_name = app_name
if metric_filter_regex is not None:
self.metric_filter_regex = metric_filter_regex
@property
def app_name(self):
"""Gets the app_name of this NewRelicMetricFilters. # noqa: E501
:return: The app_name of this NewRelicMetricFilters. # noqa: E501
:rtype: str
"""
return self._app_name
@app_name.setter
def app_name(self, app_name):
"""Sets the app_name of this NewRelicMetricFilters.
:param app_name: The app_name of this NewRelicMetricFilters. # noqa: E501
:type: str
"""
self._app_name = app_name
@property
def metric_filter_regex(self):
"""Gets the metric_filter_regex of this NewRelicMetricFilters. # noqa: E501
:return: The metric_filter_regex of this NewRelicMetricFilters. # noqa: E501
:rtype: str
"""
return self._metric_filter_regex
@metric_filter_regex.setter
def metric_filter_regex(self, metric_filter_regex):
"""Sets the metric_filter_regex of this NewRelicMetricFilters.
:param metric_filter_regex: The metric_filter_regex of this NewRelicMetricFilters. # noqa: E501
:type: str
"""
self._metric_filter_regex = metric_filter_regex
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NewRelicMetricFilters, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NewRelicMetricFilters):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NewRelicMetricFilters):
return True
return self.to_dict() != other.to_dict()
|
wavefrontHQ/python-client
|
wavefront_api_client/models/new_relic_metric_filters.py
|
Python
|
apache-2.0
| 4,730
|
from biicode.common.utils.serializer import Serializer, SetDeserializer
from biicode.common.model.brl.block_name import BlockName
from biicode.common.find.policy import Policy
from biicode.common.model.symbolic.reference import ReferencedDependencies
from collections import defaultdict
from biicode.common.utils.bii_logging import logger
from biicode.common.model.declare.declaration import Declaration
from biicode.common.model.symbolic.block_version_table import BlockVersionTable
class FinderRequest(object):
def __init__(self, policy=None):
self.unresolved = set() # Unresolved declarations to be found
self.block_names = set() # Current hive src blocks, to forbid cycles
self.existing = ReferencedDependencies() # Current resolved deps
self.existing_common_table = BlockVersionTable()
self.policy = policy
self.find = True # To find for UNRESOLVED
self.update = False # To activate UPDATES
self.downgrade = False # To activate DOWNGRADES
self.modify = False # Allow changing deps to non-descendant branches
def __len__(self):
l = 0
if self.find:
l += len(self.unresolved)
if self.modify or self.update or self.downgrade:
l += len(self.existing)
return l
def possible_blocks(self):
'''
Returns: { block_name: set(Declaration) }
'''
possible_blocks = defaultdict(set)
for declaration in self.unresolved:
try:
block = declaration.block()
if block and block not in self.block_names:
# FIXME: If block is in self.block_names the client could had filter that
possible_blocks[block].add(declaration)
except Exception as e:
logger.debug('Could not obtain block from decl %s: %s' % (declaration, str(e)))
return possible_blocks
def __repr__(self):
result = []
result.append('Unresolved: ' + str(self.unresolved))
result.append('Existing: ' + str(self.existing))
result.append('CurrentBlocks: ' + str(self.block_names))
return '\n'.join(result)
SERIAL_UNRESOLVED_KEY = "u"
SERIAL_TRACKING_KEY = "t"
SERIAL_EXISTING_KEY = "e"
SERIAL_POLICY = "p"
SERIAL_CRITERIA = 'c'
SERIAL_DEP_COMMON_TABLE = 'd'
@staticmethod
def deserialize(data):
'''From dictionary to object FinderRequest'''
ret = FinderRequest()
ret.block_names = SetDeserializer(BlockName).deserialize(data[FinderRequest.SERIAL_TRACKING_KEY])
ret.existing = ReferencedDependencies.deserialize(data[FinderRequest.SERIAL_EXISTING_KEY])
ret.unresolved = SetDeserializer(Declaration).deserialize(data[FinderRequest.SERIAL_UNRESOLVED_KEY])
ret.policy = Policy.deserialize(data[FinderRequest.SERIAL_POLICY])
criteria = data[FinderRequest.SERIAL_CRITERIA]
ret.find, ret.update, ret.downgrade, ret.modify = criteria
ret.existing_common_table = BlockVersionTable.deserialize(data[FinderRequest.SERIAL_DEP_COMMON_TABLE])
return ret
def serialize(self):
return Serializer().build((FinderRequest.SERIAL_UNRESOLVED_KEY, self.unresolved),
(FinderRequest.SERIAL_TRACKING_KEY, self.block_names),
(FinderRequest.SERIAL_EXISTING_KEY, self.existing),
(FinderRequest.SERIAL_POLICY, self.policy),
(FinderRequest.SERIAL_DEP_COMMON_TABLE, self.existing_common_table),
(FinderRequest.SERIAL_CRITERIA, (self.find,
self.update,
self.downgrade,
self.modify)))
def __eq__(self, other):
if self is other:
return True
return isinstance(other, self.__class__) \
and other.unresolved == self.unresolved \
and other.block_names == self.block_names \
and other.existing == self.existing \
and other.policy == self.policy \
and other.find == self.find \
and other.update == self.update \
and other.downgrade == self.downgrade \
and other.modify == self.modify \
and other.existing_common_table == self.existing_common_table
def __ne__(self, other):
return not self.__eq__(other)
|
zhangf911/common
|
find/finder_request.py
|
Python
|
mit
| 4,606
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD Style.
from numpy.testing import assert_array_almost_equal
import numpy as np
from scipy import sparse
from ..base import LinearRegression
from ...utils import check_random_state
def test_linear_regression():
"""
Test LinearRegression on a simple dataset.
"""
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
|
cdegroc/scikit-learn
|
sklearn/linear_model/tests/test_base.py
|
Python
|
bsd-3-clause
| 1,363
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.slim.nets import inception
slim = tf.contrib.slim
class InceptionV3Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
final_endpoint, end_points = inception.inception_v3_base(inputs)
self.assertTrue(final_endpoint.op.name.startswith(
'InceptionV3/Mixed_7c'))
self.assertListEqual(final_endpoint.get_shape().as_list(),
[batch_size, 8, 8, 2048])
expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v3_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV3/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed7c(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3_base(
inputs, final_endpoint='Mixed_7c')
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'MaxPool_3a_3x3': [batch_size, 73, 73, 64],
'Conv2d_3b_1x1': [batch_size, 73, 73, 80],
'Conv2d_4a_3x3': [batch_size, 71, 71, 192],
'MaxPool_5a_3x3': [batch_size, 35, 35, 192],
'Mixed_5b': [batch_size, 35, 35, 256],
'Mixed_5c': [batch_size, 35, 35, 288],
'Mixed_5d': [batch_size, 35, 35, 288],
'Mixed_6a': [batch_size, 17, 17, 768],
'Mixed_6b': [batch_size, 17, 17, 768],
'Mixed_6c': [batch_size, 17, 17, 768],
'Mixed_6d': [batch_size, 17, 17, 768],
'Mixed_6e': [batch_size, 17, 17, 768],
'Mixed_7a': [batch_size, 8, 8, 1280],
'Mixed_7b': [batch_size, 8, 8, 2048],
'Mixed_7c': [batch_size, 8, 8, 2048]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v3_arg_scope()):
inception.inception_v3_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(21802784, total_params)
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('AuxLogits' in end_points)
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Mixed_7c' in end_points)
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 2048])
self.assertTrue('PreLogits' in end_points)
pre_logits = end_points['PreLogits']
self.assertListEqual(pre_logits.get_shape().as_list(),
[batch_size, 1, 1, 2048])
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v3(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v3(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 2048])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 299, 299
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np}
tf.initialize_all_variables().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 299, 299, 3])
logits, _ = inception.inception_v3(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.initialize_all_variables().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
tf.test.main()
|
naturali/tensorflow
|
tensorflow/contrib/slim/python/slim/nets/inception_v3_test.py
|
Python
|
apache-2.0
| 12,169
|
from datetime import timedelta
import numpy as np
import pytest
from pandas import (
DataFrame,
DatetimeIndex,
Timedelta,
date_range,
period_range,
to_datetime,
)
import pandas._testing as tm
class TestToTimestamp:
def test_frame_to_time_stamp(self):
K = 5
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
df = DataFrame(np.random.randn(len(index), K), index=index)
df["mix"] = "a"
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
result = df.to_timestamp("D", "end")
tm.assert_index_equal(result.index, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
result = df.to_timestamp("D", "start")
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq="A-DEC"):
return date_range(
to_datetime("1/1/2001") + delta,
to_datetime("12/31/2009") + delta,
freq=freq,
)
delta = timedelta(hours=23)
result = df.to_timestamp("H", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp("T", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
result = df.to_timestamp("S", "end")
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
# columns
df = df.T
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
result = df.to_timestamp("D", "end", axis=1)
tm.assert_index_equal(result.columns, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
result = df.to_timestamp("D", "start", axis=1)
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23)
result = df.to_timestamp("H", "end", axis=1)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp("T", "end", axis=1)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.columns, exp_index)
result = df.to_timestamp("S", "end", axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.columns, exp_index)
# invalid axis
with pytest.raises(ValueError, match="axis"):
df.to_timestamp(axis=2)
result1 = df.to_timestamp("5t", axis=1)
result2 = df.to_timestamp("t", axis=1)
expected = date_range("2001-01-01", "2009-01-01", freq="AS")
assert isinstance(result1.columns, DatetimeIndex)
assert isinstance(result2.columns, DatetimeIndex)
tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
assert result1.columns.freqstr == "AS-JAN"
assert result2.columns.freqstr == "AS-JAN"
|
TomAugspurger/pandas
|
pandas/tests/frame/methods/test_to_timestamp.py
|
Python
|
bsd-3-clause
| 4,020
|
"""
RequestOperation to create a tarball from a list of LFNs.
Download a list of files to local storage, then tars it and uploads it to a StorageElement
This operation requires the following arguments:
* ArchiveLFN: The LFN of the tarball
* SourceSE: Where the files to be archived are downloaded from
* TarballSE: Where the tarball will be uploaded to
* RegisterDescendent: If True the tarball will be registered as a descendent of the LFNs
"""
import os
import shutil
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
__RCSID__ = '$Id$'
class ArchiveFiles(OperationHandlerBase):
"""ArchiveFiles operation handler."""
def __init__(self, operation=None, csPath=None):
"""Initialize the ArchifeFiles handler.
:param self: self reference
:param Operation operation: Operation instance
:param string csPath: CS path for this handler
"""
OperationHandlerBase.__init__(self, operation, csPath)
gMonitor.registerActivity('ArchiveFilesAtt', 'Request attempt',
'RequestExecutingAgent', 'Files/min', gMonitor.OP_SUM)
gMonitor.registerActivity('ArchiveFilesOK', 'Requests successful',
'RequestExecutingAgent', 'Files/min', gMonitor.OP_SUM)
gMonitor.registerActivity('ArchiveFilesFail', 'Requests failed',
'RequestExecutingAgent', 'Files/min', gMonitor.OP_SUM)
self.cacheFolder = os.environ.get('AGENT_WORKDIRECTORY')
self.parameterDict = {}
self.waitingFiles = []
self.lfns = []
def __call__(self):
"""Process the ArchiveFiles operation."""
try:
gMonitor.addMark('ArchiveFilesAtt', 1)
self._run()
gMonitor.addMark('ArchiveFilesOK', 1)
except RuntimeError as e:
self.log.info('Failed to execute ArchiveFiles', repr(e))
gMonitor.addMark('ArchiveFilesFail', 1)
return S_ERROR(str(e))
except Exception as e:
self.log.exception('Failed to execute ArchiveFiles', repr(e), lException=e)
gMonitor.addMark('ArchiveFilesFail', 1)
return S_ERROR(str(e))
finally:
self._cleanup()
return S_OK()
def _run(self):
"""Execute the download and tarring."""
self.parameterDict = DEncode.decode(self.operation.Arguments)[0] # tuple: dict, number of characters
self.cacheFolder = os.path.join(self.cacheFolder, self.request.RequestName)
self._checkArchiveLFN()
for parameter, value in self.parameterDict.iteritems():
self.log.info('Parameters: %s = %s' % (parameter, value))
self.log.info('Cache folder: %r' % self.cacheFolder)
self.waitingFiles = self.getWaitingFilesList()
self.lfns = [opFile.LFN for opFile in self.waitingFiles]
self._checkReplicas()
self._downloadFiles()
self._tarFiles()
self._uploadTarBall()
self._registerDescendent()
self._markFilesDone()
def _checkArchiveLFN(self):
"""Make sure the archive LFN does not exist yet."""
archiveLFN = self.parameterDict['ArchiveLFN']
exists = returnSingleResult(self.fc.isFile(archiveLFN))
self.log.debug('Checking for Tarball existence %r' % exists)
if exists['OK'] and exists['Value']:
raise RuntimeError('Tarball %r already exists' % archiveLFN)
def _checkReplicas(self):
"""Make sure the source files are at the sourceSE."""
resReplica = self.fc.getReplicas(self.lfns)
if not resReplica['OK']:
self.log.error('Failed to get replica information:', resReplica['Message'])
raise RuntimeError('Failed to get replica information')
atSource = []
notAt = []
failed = []
sourceSE = self.parameterDict['SourceSE']
for lfn, replInfo in resReplica['Value']['Successful'].iteritems():
if sourceSE in replInfo:
atSource.append(lfn)
else:
self.log.warn('LFN %r not found at source, only at: %s' % (lfn, ','.join(replInfo.keys())))
notAt.append(lfn)
for lfn, errorMessage in resReplica['Value']['Failed'].iteritems():
self.log.warn('Failed to get replica info', '%s: %s' % (lfn, errorMessage))
if 'No such file or directory' in errorMessage:
continue
failed.append(lfn)
if failed:
self.log.error('LFNs failed to get replica info:', '%r' % ' '.join(failed))
raise RuntimeError('Failed to get some replica information')
if notAt:
self.log.error('LFNs not at sourceSE:', '%r' % ' '.join(notAt))
raise RuntimeError('Some replicas are not at the source')
def _downloadFiles(self):
"""Download the files."""
self._checkFilePermissions()
for index, opFile in enumerate(self.waitingFiles):
lfn = opFile.LFN
self.log.info('Processing file (%d/%d) %r' % (index, len(self.waitingFiles), lfn))
sourceSE = self.parameterDict['SourceSE']
attempts = 0
destFolder = os.path.join(self.cacheFolder, os.path.dirname(lfn)[1:])
self.log.debug('Local Cache Folder: %s' % destFolder)
if not os.path.exists(destFolder):
os.makedirs(destFolder)
while True:
attempts += 1
download = returnSingleResult(self.dm.getFile(lfn, destinationDir=destFolder, sourceSE=sourceSE))
if download['OK']:
self.log.info('Downloaded file %r to %r' % (lfn, destFolder))
break
errorString = download['Message']
self.log.error('Failed to download file:', errorString)
opFile.Error = errorString
opFile.Attempt += 1
self.operation.Error = opFile.Error
if 'No such file or directory' in opFile.Error:
# The File does not exist, we just ignore this and continue, otherwise we never archive the other files
opFile.Status = 'Done'
download = S_OK()
break
if attempts > 10:
self.log.error('Completely failed to download file:', errorString)
raise RuntimeError('Completely failed to download file: %s' % errorString)
return
def _checkFilePermissions(self):
"""Check that the request owner has permission to read and remove the files.
Otherwise the error might show up after considerable time was spent.
"""
permissions = self.fc.hasAccess(self.lfns, 'removeFile')
if not permissions['OK']:
raise RuntimeError('Could not resolve permissions')
if permissions['Value']['Failed']:
for lfn in permissions['Value']['Failed']:
self.log.error('Cannot archive file:', lfn)
for opFile in self.waitingFiles:
if opFile.LFN == lfn:
opFile.Status = 'Failed'
opFile.Error = 'Permission denied'
break
raise RuntimeError('Do not have sufficient permissions')
return
def _tarFiles(self):
"""Tar the files."""
tarFileName = os.path.splitext(os.path.basename(self.parameterDict['ArchiveLFN']))[0]
baseDir = self.parameterDict['ArchiveLFN'].strip('/').split('/')[0]
shutil.make_archive(tarFileName, format='tar', root_dir=self.cacheFolder, base_dir=baseDir,
dry_run=False, logger=self.log)
def _uploadTarBall(self):
"""Upload the tarball to specified LFN."""
lfn = self.parameterDict['ArchiveLFN']
self.log.info('Uploading tarball to %r' % lfn)
localFile = os.path.basename(lfn)
tarballSE = self.parameterDict['TarballSE']
upload = returnSingleResult(self.dm.putAndRegister(lfn, localFile, tarballSE))
if not upload['OK']:
raise RuntimeError('Failed to upload tarball: %s' % upload['Message'])
self.log.verbose('Uploading finished')
def _registerDescendent(self):
"""Register the tarball as a descendent of the archived LFNs.
Actually registers all LFNs as an ancestor to the Tarball.
"""
registerDescendents = self.parameterDict.get('RegisterDescendent', None)
if not registerDescendents:
self.log.verbose('Will not register tarball as descendent to the Archived LFNs.')
return
self.log.info('Will register tarball as descendent to the Archived LFNs.')
tarballLFN = self.parameterDict['ArchiveLFN']
ancestorDict = {tarballLFN: {'Ancestors': self.lfns}}
for _trial in range(3):
resAncestors = returnSingleResult(self.fc.addFileAncestors(ancestorDict))
if resAncestors['OK']:
break
else:
self.log.error('Failed to register ancestors', resAncestors['Message'])
raise RuntimeError('Failed to register ancestors')
self.log.info('Successfully registered ancestors')
def _markFilesDone(self):
"""Mark all the files as done."""
self.log.info('Marking files as done')
for opFile in self.waitingFiles:
opFile.Status = 'Done'
def _cleanup(self):
"""Remove the tarball and the downloaded files."""
self.log.info('Cleaning files and tarball')
try:
if 'ArchiveLFN' in self.parameterDict:
os.remove(os.path.basename(self.parameterDict['ArchiveLFN']))
except OSError as e:
self.log.debug('Error when removing tarball: %s' % str(e))
try:
shutil.rmtree(self.cacheFolder, ignore_errors=True)
except OSError as e:
self.log.debug('Error when removing cacheFolder: %s' % str(e))
|
fstagni/DIRAC
|
DataManagementSystem/Agent/RequestOperations/ArchiveFiles.py
|
Python
|
gpl-3.0
| 9,341
|
import pytest
@pytest.mark.page('forms_with_input_elements.html')
class TestInputType(object):
def test_returns_an_email_type(self, browser):
assert browser.input(name='html5_email').type == 'email'
|
lmtierney/watir-snake
|
tests/browser/input_tests.py
|
Python
|
mit
| 213
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_gcs`."""
import warnings
from airflow.providers.google.cloud.operators.bigquery_to_gcs import BigQueryToGCSOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_gcs`.",
DeprecationWarning, stacklevel=2
)
class BigQueryToCloudStorageOperator(BigQueryToGCSOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.bigquery_to_gcs.BigQueryToGCSOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.operators.bigquery_to_gcs.BigQueryToGCSOperator`.""",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwargs)
|
wileeam/airflow
|
airflow/contrib/operators/bigquery_to_gcs.py
|
Python
|
apache-2.0
| 1,678
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from unittest.mock import DEFAULT
from werkzeug.urls import url_parse, url_decode
from odoo import exceptions
from odoo.addons.test_mail.models.test_mail_models import MailTestSimple
from odoo.addons.test_mail.tests.common import TestMailCommon, TestRecipients
from odoo.tests.common import tagged, HttpCase
from odoo.tools import mute_logger
class TestChatterTweaks(TestMailCommon, TestRecipients):
@classmethod
def setUpClass(cls):
super(TestChatterTweaks, cls).setUpClass()
cls.test_record = cls.env['mail.test.simple'].with_context(cls._test_context).create({'name': 'Test', 'email_from': 'ignasse@example.com'})
def test_post_no_subscribe_author(self):
original = self.test_record.message_follower_ids
self.test_record.with_user(self.user_employee).with_context({'mail_create_nosubscribe': True}).message_post(
body='Test Body', message_type='comment', subtype_xmlid='mail.mt_comment')
self.assertEqual(self.test_record.message_follower_ids.mapped('partner_id'), original.mapped('partner_id'))
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_no_subscribe_recipients(self):
original = self.test_record.message_follower_ids
self.test_record.with_user(self.user_employee).with_context({'mail_create_nosubscribe': True}).message_post(
body='Test Body', message_type='comment', subtype_xmlid='mail.mt_comment', partner_ids=[self.partner_1.id, self.partner_2.id])
self.assertEqual(self.test_record.message_follower_ids.mapped('partner_id'), original.mapped('partner_id'))
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_subscribe_recipients(self):
original = self.test_record.message_follower_ids
self.test_record.with_user(self.user_employee).with_context({'mail_create_nosubscribe': True, 'mail_post_autofollow': True}).message_post(
body='Test Body', message_type='comment', subtype_xmlid='mail.mt_comment', partner_ids=[self.partner_1.id, self.partner_2.id])
self.assertEqual(self.test_record.message_follower_ids.mapped('partner_id'), original.mapped('partner_id') | self.partner_1 | self.partner_2)
def test_chatter_mail_create_nolog(self):
""" Test disable of automatic chatter message at create """
rec = self.env['mail.test.simple'].with_user(self.user_employee).with_context({'mail_create_nolog': True}).create({'name': 'Test'})
self.flush_tracking()
self.assertEqual(rec.message_ids, self.env['mail.message'])
rec = self.env['mail.test.simple'].with_user(self.user_employee).with_context({'mail_create_nolog': False}).create({'name': 'Test'})
self.flush_tracking()
self.assertEqual(len(rec.message_ids), 1)
def test_chatter_mail_notrack(self):
""" Test disable of automatic value tracking at create and write """
rec = self.env['mail.test.track'].with_user(self.user_employee).create({'name': 'Test', 'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(len(rec.message_ids), 1,
"A creation message without tracking values should have been posted")
self.assertEqual(len(rec.message_ids.sudo().tracking_value_ids), 0,
"A creation message without tracking values should have been posted")
rec.with_context({'mail_notrack': True}).write({'user_id': self.user_admin.id})
self.flush_tracking()
self.assertEqual(len(rec.message_ids), 1,
"No new message should have been posted with mail_notrack key")
rec.with_context({'mail_notrack': False}).write({'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(len(rec.message_ids), 2,
"A tracking message should have been posted")
self.assertEqual(len(rec.message_ids.sudo().mapped('tracking_value_ids')), 1,
"New tracking message should have tracking values")
def test_chatter_tracking_disable(self):
""" Test disable of all chatter features at create and write """
rec = self.env['mail.test.track'].with_user(self.user_employee).with_context({'tracking_disable': True}).create({'name': 'Test', 'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(rec.sudo().message_ids, self.env['mail.message'])
self.assertEqual(rec.sudo().mapped('message_ids.tracking_value_ids'), self.env['mail.tracking.value'])
rec.write({'user_id': self.user_admin.id})
self.flush_tracking()
self.assertEqual(rec.sudo().mapped('message_ids.tracking_value_ids'), self.env['mail.tracking.value'])
rec.with_context({'tracking_disable': False}).write({'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(len(rec.sudo().mapped('message_ids.tracking_value_ids')), 1)
rec = self.env['mail.test.track'].with_user(self.user_employee).with_context({'tracking_disable': False}).create({'name': 'Test', 'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(len(rec.sudo().message_ids), 1,
"Creation message without tracking values should have been posted")
self.assertEqual(len(rec.sudo().mapped('message_ids.tracking_value_ids')), 0,
"Creation message without tracking values should have been posted")
def test_cache_invalidation(self):
""" Test that creating a mail-thread record does not invalidate the whole cache. """
# make a new record in cache
record = self.env['res.partner'].new({'name': 'Brave New Partner'})
self.assertTrue(record.name)
# creating a mail-thread record should not invalidate the whole cache
self.env['res.partner'].create({'name': 'Actual Partner'})
self.assertTrue(record.name)
class TestDiscuss(TestMailCommon, TestRecipients):
@classmethod
def setUpClass(cls):
super(TestDiscuss, cls).setUpClass()
cls.test_record = cls.env['mail.test.simple'].with_context(cls._test_context).create({
'name': 'Test',
'email_from': 'ignasse@example.com'
})
@mute_logger('openerp.addons.mail.models.mail_mail')
def test_mark_all_as_read(self):
def _employee_crash(*args, **kwargs):
""" If employee is test employee, consider he has no access on document """
recordset = args[0]
if recordset.env.uid == self.user_employee.id and not recordset.env.su:
if kwargs.get('raise_exception', True):
raise exceptions.AccessError('Hop hop hop Ernest, please step back.')
return False
return DEFAULT
with patch.object(MailTestSimple, 'check_access_rights', autospec=True, side_effect=_employee_crash):
with self.assertRaises(exceptions.AccessError):
self.env['mail.test.simple'].with_user(self.user_employee).browse(self.test_record.ids).read(['name'])
employee_partner = self.env['res.partner'].with_user(self.user_employee).browse(self.partner_employee.ids)
# mark all as read clear needactions
msg1 = self.test_record.message_post(body='Test', message_type='comment', subtype_xmlid='mail.mt_comment', partner_ids=[employee_partner.id])
self._reset_bus()
with self.assertBus(
[(self.cr.dbname, 'res.partner', employee_partner.id)],
message_items=[{
'type': 'mail.message/mark_as_read',
'payload': {
'message_ids': [msg1.id],
'needaction_inbox_counter': 0,
},
}]):
employee_partner.env['mail.message'].mark_all_as_read(domain=[])
na_count = employee_partner._get_needaction_count()
self.assertEqual(na_count, 0, "mark all as read should conclude all needactions")
# mark all as read also clear inaccessible needactions
msg2 = self.test_record.message_post(body='Zest', message_type='comment', subtype_xmlid='mail.mt_comment', partner_ids=[employee_partner.id])
needaction_accessible = len(employee_partner.env['mail.message'].search([['needaction', '=', True]]))
self.assertEqual(needaction_accessible, 1, "a new message to a partner is readable to that partner")
msg2.sudo().partner_ids = self.env['res.partner']
employee_partner.env['mail.message'].search([['needaction', '=', True]])
needaction_length = len(employee_partner.env['mail.message'].search([['needaction', '=', True]]))
self.assertEqual(needaction_length, 1, "message should still be readable when notified")
na_count = employee_partner._get_needaction_count()
self.assertEqual(na_count, 1, "message not accessible is currently still counted")
self._reset_bus()
with self.assertBus(
[(self.cr.dbname, 'res.partner', employee_partner.id)],
message_items=[{
'type': 'mail.message/mark_as_read',
'payload': {
'message_ids': [msg2.id],
'needaction_inbox_counter': 0,
},
}]):
employee_partner.env['mail.message'].mark_all_as_read(domain=[])
na_count = employee_partner._get_needaction_count()
self.assertEqual(na_count, 0, "mark all read should conclude all needactions even inacessible ones")
def test_set_message_done_user(self):
with self.assertSinglePostNotifications([{'partner': self.partner_employee, 'type': 'inbox'}], message_info={'content': 'Test'}):
message = self.test_record.message_post(
body='Test', message_type='comment', subtype_xmlid='mail.mt_comment',
partner_ids=[self.user_employee.partner_id.id])
message.with_user(self.user_employee).set_message_done()
self.assertMailNotifications(message, [{'notif': [{'partner': self.partner_employee, 'type': 'inbox', 'is_read': True}]}])
# TDE TODO: it seems bus notifications could be checked
def test_set_star(self):
msg = self.test_record.with_user(self.user_admin).message_post(body='My Body', subject='1')
msg_emp = self.env['mail.message'].with_user(self.user_employee).browse(msg.id)
# Admin set as starred
msg.toggle_message_starred()
self.assertTrue(msg.starred)
# Employee set as starred
msg_emp.toggle_message_starred()
self.assertTrue(msg_emp.starred)
# Do: Admin unstars msg
msg.toggle_message_starred()
self.assertFalse(msg.starred)
self.assertTrue(msg_emp.starred)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_mail_cc_recipient_suggestion(self):
record = self.env['mail.test.cc'].create({'email_cc': 'cc1@example.com, cc2@example.com, cc3 <cc3@example.com>'})
suggestions = record._message_get_suggested_recipients()[record.id]
self.assertEqual(sorted(suggestions), [
(False, '"cc3" <cc3@example.com>', None, 'CC Email'),
(False, 'cc1@example.com', None, 'CC Email'),
(False, 'cc2@example.com', None, 'CC Email'),
], 'cc should be in suggestions')
def test_inbox_message_fetch_needaction(self):
user1 = self.env['res.users'].create({'login': 'user1', 'name': 'User 1'})
user1.notification_type = 'inbox'
user2 = self.env['res.users'].create({'login': 'user2', 'name': 'User 2'})
user2.notification_type = 'inbox'
message1 = self.test_record.with_user(self.user_admin).message_post(body='Message 1', partner_ids=[user1.partner_id.id, user2.partner_id.id])
message2 = self.test_record.with_user(self.user_admin).message_post(body='Message 2', partner_ids=[user1.partner_id.id, user2.partner_id.id])
# both notified users should have the 2 messages in Inbox initially
messages = self.env['mail.message'].with_user(user1)._message_fetch(domain=[['needaction', '=', True]])
self.assertEqual(len(messages), 2)
messages = self.env['mail.message'].with_user(user2)._message_fetch(domain=[['needaction', '=', True]])
self.assertEqual(len(messages), 2)
# first user is marking one message as done: the other message is still Inbox, while the other user still has the 2 messages in Inbox
message1.with_user(user1).set_message_done()
messages = self.env['mail.message'].with_user(user1)._message_fetch(domain=[['needaction', '=', True]])
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].get('id'), message2.id)
messages = self.env['mail.message'].with_user(user2)._message_fetch(domain=[['needaction', '=', True]])
self.assertEqual(len(messages), 2)
def test_notification_has_error_filter(self):
"""Ensure message_has_error filter is only returning threads for which
the current user is author of a failed message."""
message = self.test_record.with_user(self.user_admin).message_post(
body='Test', message_type='comment', subtype_xmlid='mail.mt_comment',
partner_ids=[self.user_employee.partner_id.id]
)
self.assertFalse(message.has_error)
with self.mock_mail_gateway(sim_error='connect_smtp_notfound'):
self.user_admin.notification_type = 'email'
message2 = self.test_record.with_user(self.user_employee).message_post(
body='Test', message_type='comment', subtype_xmlid='mail.mt_comment',
partner_ids=[self.user_admin.partner_id.id]
)
self.assertTrue(message2.has_error)
# employee is author of message which has a failure
threads_employee = self.test_record.with_user(self.user_employee).search([('message_has_error', '=', True)])
self.assertEqual(len(threads_employee), 1)
# admin is also author of a message, but it doesn't have a failure
# and the failure from employee's message should not be taken into account for admin
threads_admin = self.test_record.with_user(self.user_admin).search([('message_has_error', '=', True)])
self.assertEqual(len(threads_admin), 0)
@tagged('-at_install', 'post_install')
class TestMultiCompany(HttpCase):
def test_redirect_to_records(self):
self.company_A = self.env['res.company'].create({
'name': 'Company A',
'user_ids': [(4, self.ref('base.user_admin'))],
})
self.company_B = self.env['res.company'].create({
'name': 'Company B',
})
self.multi_company_record = self.env['mail.test.multi.company'].create({
'name': 'Multi Company Record',
'company_id': self.company_A.id,
})
# Test Case 0
# Not logged, redirect to web/login
response = self.url_open('/mail/view?model=%s&res_id=%s' % (
self.multi_company_record._name,
self.multi_company_record.id), timeout=15)
path = url_parse(response.url).path
self.assertEqual(path, '/web/login')
self.authenticate('admin', 'admin')
# Test Case 1
# Logged into company 1, try accessing record in company A
# _redirect_to_record should add company A in allowed_company_ids
response = self.url_open('/mail/view?model=%s&res_id=%s' % (
self.multi_company_record._name,
self.multi_company_record.id), timeout=15)
self.assertEqual(response.status_code, 200)
fragment = url_parse(response.url).fragment
cids = url_decode(fragment)['cids']
self.assertEqual(cids, '1,%s' % (self.company_A.id))
# Test Case 2
# Logged into company 1, try accessing record in company B
# _redirect_to_record should redirect to messaging as the user
# doesn't have any access for this company
self.multi_company_record.company_id = self.company_B
response = self.url_open('/mail/view?model=%s&res_id=%s' % (
self.multi_company_record._name,
self.multi_company_record.id), timeout=15)
self.assertEqual(response.status_code, 200)
fragment = url_parse(response.url).fragment
action = url_decode(fragment)['action']
self.assertEqual(action, 'mail.action_discuss')
|
jeremiahyan/odoo
|
addons/test_mail/tests/test_mail_thread_internals.py
|
Python
|
gpl-3.0
| 16,837
|
"""
This file includes the monkey-patch for requests' PATCH method, as we are using
older version of django that does not contains the PATCH method in its test client.
"""
# pylint: disable=protected-access
from __future__ import unicode_literals
from urlparse import urlparse
from django.test.client import RequestFactory, Client, FakePayload
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
def request_factory_patch(self, path, data=None, content_type=MULTIPART_CONTENT, **extra):
"""
Construct a PATCH request.
"""
# pylint: disable=invalid-name
patch_data = self._encode_data(data or {}, content_type)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(patch_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'PATCH',
'wsgi.input': FakePayload(patch_data),
}
r.update(extra)
return self.request(**r)
def client_patch(self, path, data=None, content_type=MULTIPART_CONTENT, follow=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data or {}, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
if not hasattr(RequestFactory, 'patch'):
setattr(RequestFactory, 'patch', request_factory_patch)
if not hasattr(Client, 'patch'):
setattr(Client, 'patch', client_patch)
|
beni55/edx-platform
|
openedx/core/lib/django_test_client_utils.py
|
Python
|
agpl-3.0
| 1,555
|
#!/usr/bin/env python
"""import_dictionary.py: Imports key-size n-gram words from a collection of English n-gram words"""
import sys
import os
if not os.path.exists('.\dictionary'):
os.makedirs('.\dictionary')
for i in range(2,6):
infile = 'w' + str(i) + '_.txt'
outfile = '.\dictionary\w' + str(i) + '.txt'
if os.path.exists(outfile):
os.remove(outfile)
fin = open(infile, "r")
fout = open(outfile, "w")
for x in fin.readlines():
line = x.split('\t')
if (len(line) == 3):
line = [w.replace('-', '') for w in line[1:3]]
length = sum(len(s) for s in line[:2])
if (length == 20):
fout.write(''.join(line[:3]))
elif (len(line) == 4):
line = [w.replace('-', '') for w in line[1:4]]
length = sum(len(s) for s in line[:4])
if (length == 20):
fout.write(''.join(line[:4]))
elif (len(line) == 5):
line = [w.replace('-', '') for w in line[1:5]]
length = sum(len(s) for s in line[:5])
if (length == 20):
fout.write(''.join(line[:5]))
elif (len(line) == 6):
line = [w.replace('-', '') for w in line[1:6]]
length = sum(len(s) for s in line[:6])
if (length == 20):
fout.write(''.join(line[:6]))
print str(i) + '-gram data extracted.'
fin.close()
fout.close()
|
anish-shekhawat/double-transposition
|
import_dictionary.py
|
Python
|
mit
| 1,452
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
)
ANSIBLE_METADATA = {'status': 'preview',
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_groups
short_description: Module to manage groups in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage groups in oVirt"
options:
name:
description:
- "Name of the the group to manage."
required: true
state:
description:
- "Should the group be present/absent."
choices: ['present', 'absent']
default: present
authz_name:
description:
- "Authorization provider of the group. In previous versions of oVirt known as domain."
required: true
aliases: ['domain']
namespace:
description:
- "Namespace of the authorization provider, where group resides."
required: false
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add group group1 from authorization provider example.com-authz
ovirt_groups:
name: group1
domain: example.com-authz
# Add group group1 from authorization provider example.com-authz
# In case of multi-domain Active Directory setup, you should pass
# also namespace, so it adds correct group:
ovirt_groups:
name: group1
namespace: dc=ad2,dc=example,dc=com
domain: example.com-authz
# Remove group group1 with authorization provider example.com-authz
ovirt_groups:
state: absent
name: group1
domain: example.com-authz
'''
RETURN = '''
id:
description: ID of the group which is managed
returned: On success if group is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
group:
description: "Dictionary of all the group attributes. Group attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/group."
returned: On success if group is found.
'''
def _group(connection, module):
groups = connection.system_service().groups_service().list(
search="name={name}".format(
name=module.params['name'],
)
)
# If found more groups, filter them by namespace and authz name:
# (filtering here, as oVirt backend doesn't support it)
if len(groups) > 1:
groups = [
g for g in groups if (
equal(module.params['namespace'], g.namespace) and
equal(module.params['authz_name'], g.domain.name)
)
]
return groups[0] if groups else None
class GroupsModule(BaseModule):
def build_entity(self):
return otypes.Group(
domain=otypes.Domain(
name=self._module.params['authz_name']
),
name=self._module.params['name'],
namespace=self._module.params['namespace'],
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(required=True),
authz_name=dict(required=True, aliases=['domain']),
namespace=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
connection = create_connection(module.params.pop('auth'))
groups_service = connection.system_service().groups_service()
groups_module = GroupsModule(
connection=connection,
module=module,
service=groups_service,
)
group = _group(connection, module)
state = module.params['state']
if state == 'present':
ret = groups_module.create(entity=group)
elif state == 'absent':
ret = groups_module.remove(entity=group)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == "__main__":
main()
|
Rajeshkumar90/ansible-modules-extras
|
cloud/ovirt/ovirt_groups.py
|
Python
|
gpl-3.0
| 5,266
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.utils import PY2
import logging
import re
import sys
from datetime import datetime
from path import Path
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.entry import Entry
log = logging.getLogger('filesystem')
class Filesystem(object):
"""
Uses local path content as an input. Can use recursion if configured.
Recursion is False by default. Can be configured to true or get integer that will specify max depth in relation to
base folder.
All files/dir/symlinks are retrieved by default. Can be changed by using the 'retrieve' property.
Example 1:: Single path
filesystem: /storage/movies/
Example 2:: List of paths
filesystem:
- /storage/movies/
- /storage/tv/
Example 3:: Object with list of paths
filesystem:
path:
- /storage/movies/
- /storage/tv/
mask: '*.mkv'
Example 4::
filesystem:
path:
- /storage/movies/
- /storage/tv/
recursive: 4 # 4 levels deep from each base folder
retrieve: files # Only files will be retrieved
Example 5::
filesystem:
path:
- /storage/movies/
- /storage/tv/
recursive: yes # No limit to depth, all sub dirs will be accessed
retrieve: # Only files and dirs will be retrieved
- files
- dirs
"""
retrieval_options = ['files', 'dirs', 'symlinks']
paths = one_or_more({'type': 'string', 'format': 'path'}, unique_items=True)
schema = {
'oneOf': [
paths,
{'type': 'object',
'properties': {
'path': paths,
'mask': {'type': 'string'},
'regexp': {'type': 'string', 'format': 'regex'},
'recursive': {'oneOf': [{'type': 'integer', 'minimum': 2}, {'type': 'boolean'}]},
'retrieve': one_or_more({'type': 'string', 'enum': retrieval_options}, unique_items=True)
},
'required': ['path'],
'additionalProperties': False}]
}
def prepare_config(self, config):
from fnmatch import translate
config = config
# Converts config to a dict with a list of paths
if not isinstance(config, dict):
config = {'path': config}
if not isinstance(config['path'], list):
config['path'] = [config['path']]
config.setdefault('recursive', False)
# If mask was specified, turn it in to a regexp
if config.get('mask'):
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
config.setdefault('regexp', '.')
# Sets the default retrieval option to files
config.setdefault('retrieve', self.retrieval_options)
return config
def create_entry(self, filepath, test_mode):
"""
Creates a single entry using a filepath and a type (file/dir)
"""
filepath = filepath.abspath()
entry = Entry()
entry['location'] = filepath
if PY2:
import urllib
import urlparse
entry['url'] = urlparse.urljoin('file:', urllib.pathname2url(filepath.encode('utf8')))
else:
import pathlib
entry['url'] = pathlib.Path(filepath).absolute().as_uri()
entry['filename'] = filepath.name
if filepath.isfile():
entry['title'] = filepath.namebase
else:
entry['title'] = filepath.name
try:
entry['timestamp'] = datetime.fromtimestamp(filepath.getmtime())
except Exception as e:
log.warning('Error setting timestamp for %s: %s' % (filepath, e))
entry['timestamp'] = None
entry['accessed'] = datetime.fromtimestamp(filepath.getatime())
entry['modified'] = datetime.fromtimestamp(filepath.getmtime())
entry['created'] = datetime.fromtimestamp(filepath.getctime())
if entry.isvalid():
if test_mode:
log.info("Test mode. Entry includes:")
log.info(" Title: %s" % entry["title"])
log.info(" URL: %s" % entry["url"])
log.info(" Filename: %s" % entry["filename"])
log.info(" Location: %s" % entry["location"])
log.info(" Timestamp: %s" % entry["timestamp"])
return entry
else:
log.error('Non valid entry created: %s ' % entry)
return
def get_max_depth(self, recursion, base_depth):
if recursion is False:
return base_depth + 1
elif recursion is True:
return float('inf')
else:
return base_depth + recursion
def get_folder_objects(self, folder, recursion):
if recursion is False:
return folder.listdir()
else:
return folder.walk(errors='ignore')
def get_entries_from_path(self, path_list, match, recursion, test_mode, get_files, get_dirs, get_symlinks):
entries = []
for folder in path_list:
log.verbose('Scanning folder %s. Recursion is set to %s.' % (folder, recursion))
folder = Path(folder).expanduser()
log.debug('Scanning %s' % folder)
base_depth = len(folder.splitall())
max_depth = self.get_max_depth(recursion, base_depth)
folder_objects = self.get_folder_objects(folder, recursion)
for path_object in folder_objects:
log.debug('Checking if %s qualifies to be added as an entry.' % path_object)
try:
path_object.exists()
except UnicodeError:
log.error('File %s not decodable with filesystem encoding: %s' % (
path_object, sys.getfilesystemencoding()))
continue
entry = None
object_depth = len(path_object.splitall())
if object_depth <= max_depth:
if match(path_object):
if (path_object.isdir() and get_dirs) or (
path_object.islink() and get_symlinks) or (
path_object.isfile() and not path_object.islink() and get_files):
entry = self.create_entry(path_object, test_mode)
else:
log.debug("Path object's %s type doesn't match requested object types." % path_object)
if entry and entry not in entries:
entries.append(entry)
return entries
def on_task_input(self, task, config):
config = self.prepare_config(config)
path_list = config['path']
test_mode = task.options.test
match = re.compile(config['regexp'], re.IGNORECASE).match
recursive = config['recursive']
get_files = 'files' in config['retrieve']
get_dirs = 'dirs' in config['retrieve']
get_symlinks = 'symlinks' in config['retrieve']
log.verbose('Starting to scan folders.')
return self.get_entries_from_path(path_list, match, recursive, test_mode, get_files, get_dirs, get_symlinks)
@event('plugin.register')
def register_plugin():
plugin.register(Filesystem, 'filesystem', api_ver=2)
|
sean797/Flexget
|
flexget/plugins/input/filesystem.py
|
Python
|
mit
| 7,624
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import shutil
import tempfile
from airflow import configuration
from airflow import models, DAG
from airflow.exceptions import AirflowSensorTimeout
from airflow.contrib.sensors.file_sensor import FileSensor
from airflow.settings import Session
from airflow.utils.timezone import datetime
TEST_DAG_ID = 'unit_tests'
DEFAULT_DATE = datetime(2015, 1, 1)
configuration.load_test_config()
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class FileSensorTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
from airflow.contrib.hooks.fs_hook import FSHook
hook = FSHook()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'provide_context': True
}
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once', default_args=args)
dag.schedule_interval = '@once'
self.hook = hook
self.dag = dag
def test_simple(self):
with tempfile.NamedTemporaryFile() as tmp:
task = FileSensor(
task_id="test",
filepath=tmp.name[1:],
fs_conn_id='fs_default',
dag=self.dag,
timeout=0,
)
task._hook = self.hook
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_file_in_nonexistent_dir(self):
dir = tempfile.mkdtemp()
task = FileSensor(
task_id="test",
filepath=dir[1:] + "/file",
fs_conn_id='fs_default',
dag=self.dag,
timeout=0,
)
task._hook = self.hook
try:
with self.assertRaises(AirflowSensorTimeout):
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
finally:
shutil.rmtree(dir)
def test_empty_dir(self):
dir = tempfile.mkdtemp()
task = FileSensor(
task_id="test",
filepath=dir[1:],
fs_conn_id='fs_default',
dag=self.dag,
timeout=0,
)
task._hook = self.hook
try:
with self.assertRaises(AirflowSensorTimeout):
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
finally:
shutil.rmtree(dir)
def test_file_in_dir(self):
dir = tempfile.mkdtemp()
task = FileSensor(
task_id="test",
filepath=dir[1:],
fs_conn_id='fs_default',
dag=self.dag,
timeout=0,
)
task._hook = self.hook
try:
# `touch` the dir
open(dir + "/file", "a").close()
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
finally:
shutil.rmtree(dir)
if __name__ == '__main__':
unittest.main()
|
danielvdende/incubator-airflow
|
tests/contrib/sensors/test_file_sensor.py
|
Python
|
apache-2.0
| 3,968
|
from django.db import models
class InfantCnsManager(models.Manager):
def get_by_natural_key(self, cns, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(cns=cns, congenital_anomalies=infant_congenital_anomalities)
class InfantFacialDefectManager(models.Manager):
def get_by_natural_key(self, facial_defect, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(facial_defect=facial_defect, congenital_anomalies=infant_congenital_anomalities)
class InfantCleftDisorderManager(models.Manager):
def get_by_natural_key(self, cleft_disorder, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(cleft_disorder=cleft_disorder, congenital_anomalies=infant_congenital_anomalities)
class InfantMouthUpGiManager(models.Manager):
def get_by_natural_key(self, mouth_up_gi, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(mouth_up_gi=mouth_up_gi, congenital_anomalies=infant_congenital_anomalities)
class InfantCardioDisorderManager(models.Manager):
def get_by_natural_key(self, cardio_disorder, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(cardio_disorder=cardio_disorder, congenital_anomalies=infant_congenital_anomalities)
class InfantRespiratoryDefectManager(models.Manager):
def get_by_natural_key(self, respiratory_defect, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(respiratory_defect=respiratory_defect, congenital_anomalies=infant_congenital_anomalities)
class InfantLowerGiManager(models.Manager):
def get_by_natural_key(self, lower_gi, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(lower_gi=lower_gi, congenital_anomalies=infant_congenital_anomalities)
class InfantFemaleGenitalManager(models.Manager):
def get_by_natural_key(self, female_genital, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(female_genital=female_genital, congenital_anomalies=infant_congenital_anomalities)
class InfantMaleGenitalManager(models.Manager):
def get_by_natural_key(self, male_genital, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(male_genital=male_genital, congenital_anomalies=infant_congenital_anomalities)
class InfantRenalManager(models.Manager):
def get_by_natural_key(self, InfantRenal, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(InfantRenal=InfantRenal, congenital_anomalies=infant_congenital_anomalities)
class InfantMusculoskeletalManager(models.Manager):
def get_by_natural_key(self, musculo_skeletal, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(musculo_skeletal=musculo_skeletal, congenital_anomalies=infant_congenital_anomalities)
class InfantSkinManager(models.Manager):
def get_by_natural_key(self, skin, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(skin=skin, congenital_anomalies=infant_congenital_anomalities)
class InfantTrisomiesManager(models.Manager):
def get_by_natural_key(self, trisomies, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(trisomies=trisomies, congenital_anomalies=infant_congenital_anomalities)
class InfantOtherAbnormalityItemsManager(models.Manager):
def get_by_natural_key(self, other_abnormalities, report_datetime, visit_instance, code, subject_identifier_as_pk):
InfantCongenitalAnomalies = models.get_model('mb_infant', 'InfantCongenitalAnomalies')
infant_congenital_anomalities = InfantCongenitalAnomalies.objects.get_by_natural_key(
report_datetime, visit_instance, code, subject_identifier_as_pk)
return self.get(other_abnormalities=other_abnormalities, congenital_anomalies=infant_congenital_anomalities)
|
TshepangRas/tshilo-dikotla
|
td_infant/managers/congenital_anomalities_manager.py
|
Python
|
gpl-2.0
| 7,487
|
"""
"""
import pygame
from pygame.locals import *
import pguglobals
import container
from const import *
class App(container.Container):
"""The top-level widget for an application.
<pre>App(theme=None)</pre>
<dl>
<dt>theme<dd>an instance of a Theme, optional as it will use the default Theme class.
</dl>
<strong>Basic Example</strong>
<code>
app = gui.App()
app.run(widget=widget,screen=screen)
</code>
<strong>Integrated Example</strong>
<code>
app = gui.App()
gui.init(widget=widget)
while 1:
for e in pygame.event.get():
app.event(e)
app.update(screen)
</code>
"""
def __init__(self,theme=None,**params):
self.set_global_app()
if theme == None:
from theme import Theme
theme = Theme()
self.theme = theme
params['decorate'] = 'app'
container.Container.__init__(self,**params)
self._quit = False
self.widget = None
self._chsize = False
self._repaint = False
self.screen = None
self.container = None
self.events = []
def set_global_app(self):
# Keep a global reference to this application instance so that PGU
# components can easily find it.
pguglobals.app = self
# For backwards compatibility we keep a reference in the class
# itself too.
App.app = self
def resize(self):
screen = self.screen
w = self.widget
wsize = 0
#5 cases
#input screen is already set use its size
if screen:
self.screen = screen
width,height = screen.get_width(),screen.get_height()
#display.screen
elif pygame.display.get_surface():
screen = pygame.display.get_surface()
self.screen = screen
width,height = screen.get_width(),screen.get_height()
#app has width,height
elif self.style.width != 0 and self.style.height != 0:
screen = pygame.display.set_mode((self.style.width,self.style.height),SWSURFACE)
self.screen = screen
width,height = screen.get_width(),screen.get_height()
#widget has width,height, or its own size..
else:
wsize = 1
width,height = w.rect.w,w.rect.h = w.resize()
#w._resize()
screen = pygame.display.set_mode((width,height),SWSURFACE)
self.screen = screen
#use screen to set up size of this widget
self.style.width,self.style.height = width,height
self.rect.w,self.rect.h = width,height
self.rect.x,self.rect.y = 0,0
w.rect.x,w.rect.y = 0,0
w.rect.w,w.rect.h = w.resize(width,height)
for w in self.windows:
w.rect.w,w.rect.h = w.resize()
self._chsize = False
def init(self,widget=None,screen=None): #TODO widget= could conflict with module widget
"""Initialize the application.
<pre>App.init(widget=None,screen=None)</pre>
<dl>
<dt>widget<dd>main widget
<dt>screen<dd>pygame.Surface to render to
</dl>
"""
self.set_global_app()
if widget: self.widget = widget
if screen: self.screen = screen
self.resize()
w = self.widget
self.widgets = []
self.widgets.append(w)
w.container = self
self.focus(w)
pygame.key.set_repeat(500,30)
self._repaint = True
self._quit = False
self.send(INIT)
def event(self,e):
"""Pass an event to the main widget.
<pre>App.event(e)</pre>
<dl>
<dt>e<dd>event
</dl>
"""
self.set_global_app()
#NOTE: might want to deal with ACTIVEEVENT in the future.
self.send(e.type,e)
container.Container.event(self,e)
if e.type == MOUSEBUTTONUP:
if e.button not in (4,5): #ignore mouse wheel
sub = pygame.event.Event(CLICK,{
'button':e.button,
'pos':e.pos})
self.send(sub.type,sub)
container.Container.event(self,sub)
def loop(self):
self.set_global_app()
s = self.screen
for e in pygame.event.get():
if not (e.type == QUIT and self.mywindow):
self.event(e)
us = self.update(s)
pygame.display.update(us)
def paint(self,screen):
self.screen = screen
if self._chsize:
self.resize()
self._chsize = False
if hasattr(self,'background'):
self.background.paint(screen)
container.Container.paint(self,screen)
def update(self,screen):
"""Update the screen.
<dl>
<dt>screen<dd>pygame surface
</dl>
"""
self.screen = screen
if self._chsize:
self.resize()
self._chsize = False
if self._repaint:
self.paint(screen)
self._repaint = False
return [pygame.Rect(0,0,screen.get_width(),screen.get_height())]
else:
us = container.Container.update(self,screen)
return us
def run(self,widget=None,screen=None):
"""Run an application.
<p>Automatically calls <tt>App.init</tt> and then forever loops <tt>App.event</tt> and <tt>App.update</tt></p>
<dl>
<dt>widget<dd>main widget
<dt>screen<dd>pygame.Surface to render to
</dl>
"""
self.init(widget,screen)
while not self._quit:
self.loop()
pygame.time.wait(10)
def reupdate(self,w=None): pass
def repaint(self,w=None): self._repaint = True
def repaintall(self): self._repaint = True
def chsize(self):
self._chsize = True
self._repaint = True
def quit(self,value=None): self._quit = True
def open(self, w, pos=None):
w.container = self
if (w.rect.w == 0 or w.rect.h == 0):
w.rect.size = w.resize()
if (not pos):
# Auto-center the window
w.rect.center = self.rect.center
#w.rect.topleft = ((self.rect.w - w.rect.w)/2,
# (self.rect.h - w.rect.h)/2)
else:
# Show the window in a particular location
w.rect.topleft = pos
self.windows.append(w)
self.mywindow = w
self.focus(w)
self.repaint(w)
w.send(OPEN)
def close(self, w):
if self.myfocus is w: self.blur(w)
if w not in self.windows: return #no need to remove it twice! happens.
self.windows.remove(w)
self.mywindow = None
if self.windows:
self.mywindow = self.windows[-1]
self.focus(self.mywindow)
if not self.mywindow:
self.myfocus = self.widget #HACK: should be done fancier, i think..
if not self.myhover:
self.enter(self.widget)
self.repaintall()
w.send(CLOSE)
class Desktop(App):
"""Create an App using the <tt>desktop</tt> theme class.
<pre>Desktop()</pre>
"""
def __init__(self,**params):
params.setdefault('cls','desktop')
App.__init__(self,**params)
|
JordanMagnuson/Country-Connect
|
pgu/gui/app.py
|
Python
|
lgpl-2.1
| 7,728
|
#!/usr/bin/python
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_swarm
short_description: Manage Swarm cluster
version_added: "2.7"
description:
- Create a new Swarm cluster.
- Add/Remove nodes or managers to an existing cluster.
options:
advertise_addr:
description:
- Externally reachable address advertised to other nodes.
- This can either be an address/port combination
in the form C(192.168.1.1:4567), or an interface followed by a
port number, like C(eth0:4567).
- If the port number is omitted,
the port number from the listen address is used.
- If I(advertise_addr) is not specified, it will be automatically
detected when possible.
- Only used when swarm is initialised or joined. Because of this it's not
considered for idempotency checking.
type: str
default_addr_pool:
description:
- Default address pool in CIDR format.
- Only used when swarm is initialised. Because of this it's not considered
for idempotency checking.
- Requires API version >= 1.39.
type: list
elements: str
version_added: "2.8"
subnet_size:
description:
- Default address pool subnet mask length.
- Only used when swarm is initialised. Because of this it's not considered
for idempotency checking.
- Requires API version >= 1.39.
type: int
version_added: "2.8"
listen_addr:
description:
- Listen address used for inter-manager communication.
- This can either be an address/port combination in the form
C(192.168.1.1:4567), or an interface followed by a port number,
like C(eth0:4567).
- If the port number is omitted, the default swarm listening port
is used.
- Only used when swarm is initialised or joined. Because of this it's not
considered for idempotency checking.
type: str
default: 0.0.0.0:2377
force:
description:
- Use with state C(present) to force creating a new Swarm, even if already part of one.
- Use with state C(absent) to Leave the swarm even if this node is a manager.
type: bool
default: no
state:
description:
- Set to C(present), to create/update a new cluster.
- Set to C(join), to join an existing cluster.
- Set to C(absent), to leave an existing cluster.
- Set to C(remove), to remove an absent node from the cluster.
Note that removing requires Docker SDK for Python >= 2.4.0.
- Set to C(inspect) to display swarm informations.
type: str
default: present
choices:
- present
- join
- absent
- remove
- inspect
node_id:
description:
- Swarm id of the node to remove.
- Used with I(state=remove).
type: str
join_token:
description:
- Swarm token used to join a swarm cluster.
- Used with I(state=join).
type: str
remote_addrs:
description:
- Remote address of one or more manager nodes of an existing Swarm to connect to.
- Used with I(state=join).
type: list
elements: str
task_history_retention_limit:
description:
- Maximum number of tasks history stored.
- Docker default value is C(5).
type: int
snapshot_interval:
description:
- Number of logs entries between snapshot.
- Docker default value is C(10000).
type: int
keep_old_snapshots:
description:
- Number of snapshots to keep beyond the current snapshot.
- Docker default value is C(0).
type: int
log_entries_for_slow_followers:
description:
- Number of log entries to keep around to sync up slow followers after a snapshot is created.
type: int
heartbeat_tick:
description:
- Amount of ticks (in seconds) between each heartbeat.
- Docker default value is C(1s).
type: int
election_tick:
description:
- Amount of ticks (in seconds) needed without a leader to trigger a new election.
- Docker default value is C(10s).
type: int
dispatcher_heartbeat_period:
description:
- The delay for an agent to send a heartbeat to the dispatcher.
- Docker default value is C(5s).
type: int
node_cert_expiry:
description:
- Automatic expiry for nodes certificates.
- Docker default value is C(3months).
type: int
name:
description:
- The name of the swarm.
type: str
labels:
description:
- User-defined key/value metadata.
- Label operations in this module apply to the docker swarm cluster.
Use M(docker_node) module to add/modify/remove swarm node labels.
- Requires API version >= 1.32.
type: dict
signing_ca_cert:
description:
- The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
- This must not be a path to a certificate, but the contents of the certificate.
- Requires API version >= 1.30.
type: str
signing_ca_key:
description:
- The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
- This must not be a path to a key, but the contents of the key.
- Requires API version >= 1.30.
type: str
ca_force_rotate:
description:
- An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
if none have been specified.
- Docker default value is C(0).
- Requires API version >= 1.30.
type: int
autolock_managers:
description:
- If set, generate a key and use it to lock data stored on the managers.
- Docker default value is C(no).
- M(docker_swarm_info) can be used to retrieve the unlock key.
type: bool
rotate_worker_token:
description: Rotate the worker join token.
type: bool
default: no
rotate_manager_token:
description: Rotate the manager join token.
type: bool
default: no
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- Docker API >= 1.25
author:
- Thierry Bouvet (@tbouvet)
- Piotr Wojciechowski (@WojciechowskiPiotr)
'''
EXAMPLES = '''
- name: Init a new swarm with default parameters
docker_swarm:
state: present
- name: Update swarm configuration
docker_swarm:
state: present
election_tick: 5
- name: Add nodes
docker_swarm:
state: join
advertise_addr: 192.168.1.2
join_token: SWMTKN-1--xxxxx
remote_addrs: [ '192.168.1.1:2377' ]
- name: Leave swarm for a node
docker_swarm:
state: absent
- name: Remove a swarm manager
docker_swarm:
state: absent
force: true
- name: Remove node from swarm
docker_swarm:
state: remove
node_id: mynode
- name: Inspect swarm
docker_swarm:
state: inspect
register: swarm_info
'''
RETURN = '''
swarm_facts:
description: Informations about swarm.
returned: success
type: dict
contains:
JoinTokens:
description: Tokens to connect to the Swarm.
returned: success
type: dict
contains:
Worker:
description: Token to create a new *worker* node
returned: success
type: str
example: SWMTKN-1--xxxxx
Manager:
description: Token to create a new *manager* node
returned: success
type: str
example: SWMTKN-1--xxxxx
UnlockKey:
description: The swarm unlock-key if I(autolock_managers) is C(true).
returned: on success if I(autolock_managers) is C(true)
and swarm is initialised, or if I(autolock_managers) has changed.
type: str
example: SWMKEY-1-xxx
actions:
description: Provides the actions done on the swarm.
returned: when action failed.
type: list
elements: str
example: "['This cluster is already a swarm cluster']"
'''
import json
import traceback
try:
from docker.errors import DockerException, APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils.docker.common import (
DockerBaseClass,
DifferenceTracker,
RequestException,
)
from ansible.module_utils.docker.swarm import AnsibleDockerSwarmClient
from ansible.module_utils._text import to_native
class TaskParameters(DockerBaseClass):
def __init__(self):
super(TaskParameters, self).__init__()
self.advertise_addr = None
self.listen_addr = None
self.remote_addrs = None
self.join_token = None
# Spec
self.snapshot_interval = None
self.task_history_retention_limit = None
self.keep_old_snapshots = None
self.log_entries_for_slow_followers = None
self.heartbeat_tick = None
self.election_tick = None
self.dispatcher_heartbeat_period = None
self.node_cert_expiry = None
self.name = None
self.labels = None
self.log_driver = None
self.signing_ca_cert = None
self.signing_ca_key = None
self.ca_force_rotate = None
self.autolock_managers = None
self.rotate_worker_token = None
self.rotate_manager_token = None
self.default_addr_pool = None
self.subnet_size = None
@staticmethod
def from_ansible_params(client):
result = TaskParameters()
for key, value in client.module.params.items():
if key in result.__dict__:
setattr(result, key, value)
result.update_parameters(client)
return result
def update_from_swarm_info(self, swarm_info):
spec = swarm_info['Spec']
ca_config = spec.get('CAConfig') or dict()
if self.node_cert_expiry is None:
self.node_cert_expiry = ca_config.get('NodeCertExpiry')
if self.ca_force_rotate is None:
self.ca_force_rotate = ca_config.get('ForceRotate')
dispatcher = spec.get('Dispatcher') or dict()
if self.dispatcher_heartbeat_period is None:
self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
raft = spec.get('Raft') or dict()
if self.snapshot_interval is None:
self.snapshot_interval = raft.get('SnapshotInterval')
if self.keep_old_snapshots is None:
self.keep_old_snapshots = raft.get('KeepOldSnapshots')
if self.heartbeat_tick is None:
self.heartbeat_tick = raft.get('HeartbeatTick')
if self.log_entries_for_slow_followers is None:
self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
if self.election_tick is None:
self.election_tick = raft.get('ElectionTick')
orchestration = spec.get('Orchestration') or dict()
if self.task_history_retention_limit is None:
self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
encryption_config = spec.get('EncryptionConfig') or dict()
if self.autolock_managers is None:
self.autolock_managers = encryption_config.get('AutoLockManagers')
if self.name is None:
self.name = spec['Name']
if self.labels is None:
self.labels = spec.get('Labels') or {}
if 'LogDriver' in spec['TaskDefaults']:
self.log_driver = spec['TaskDefaults']['LogDriver']
def update_parameters(self, client):
assign = dict(
snapshot_interval='snapshot_interval',
task_history_retention_limit='task_history_retention_limit',
keep_old_snapshots='keep_old_snapshots',
log_entries_for_slow_followers='log_entries_for_slow_followers',
heartbeat_tick='heartbeat_tick',
election_tick='election_tick',
dispatcher_heartbeat_period='dispatcher_heartbeat_period',
node_cert_expiry='node_cert_expiry',
name='name',
labels='labels',
signing_ca_cert='signing_ca_cert',
signing_ca_key='signing_ca_key',
ca_force_rotate='ca_force_rotate',
autolock_managers='autolock_managers',
log_driver='log_driver',
)
params = dict()
for dest, source in assign.items():
if not client.option_minimal_versions[source]['supported']:
continue
value = getattr(self, source)
if value is not None:
params[dest] = value
self.spec = client.create_swarm_spec(**params)
def compare_to_active(self, other, client, differences):
for k in self.__dict__:
if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
'rotate_worker_token', 'rotate_manager_token', 'spec',
'default_addr_pool', 'subnet_size'):
continue
if not client.option_minimal_versions[k]['supported']:
continue
value = getattr(self, k)
if value is None:
continue
other_value = getattr(other, k)
if value != other_value:
differences.add(k, parameter=value, active=other_value)
if self.rotate_worker_token:
differences.add('rotate_worker_token', parameter=True, active=False)
if self.rotate_manager_token:
differences.add('rotate_manager_token', parameter=True, active=False)
return differences
class SwarmManager(DockerBaseClass):
def __init__(self, client, results):
super(SwarmManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
self.swarm_info = {}
self.state = client.module.params['state']
self.force = client.module.params['force']
self.node_id = client.module.params['node_id']
self.differences = DifferenceTracker()
self.parameters = TaskParameters.from_ansible_params(client)
self.created = False
def __call__(self):
choice_map = {
"present": self.init_swarm,
"join": self.join,
"absent": self.leave,
"remove": self.remove,
"inspect": self.inspect_swarm
}
if self.state == 'inspect':
self.client.module.deprecate(
"The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster",
version='2.12', collection_name='ansible.builtin')
choice_map.get(self.state)()
if self.client.module._diff or self.parameters.debug:
diff = dict()
diff['before'], diff['after'] = self.differences.get_before_after()
self.results['diff'] = diff
def inspect_swarm(self):
try:
data = self.client.inspect_swarm()
json_str = json.dumps(data, ensure_ascii=False)
self.swarm_info = json.loads(json_str)
self.results['changed'] = False
self.results['swarm_facts'] = self.swarm_info
unlock_key = self.get_unlock_key()
self.swarm_info.update(unlock_key)
except APIError:
return
def get_unlock_key(self):
default = {'UnlockKey': None}
if not self.has_swarm_lock_changed():
return default
try:
return self.client.get_unlock_key() or default
except APIError:
return default
def has_swarm_lock_changed(self):
return self.parameters.autolock_managers and (
self.created or self.differences.has_difference_for('autolock_managers')
)
def init_swarm(self):
if not self.force and self.client.check_if_swarm_manager():
self.__update_swarm()
return
if not self.check_mode:
init_arguments = {
'advertise_addr': self.parameters.advertise_addr,
'listen_addr': self.parameters.listen_addr,
'force_new_cluster': self.force,
'swarm_spec': self.parameters.spec,
}
if self.parameters.default_addr_pool is not None:
init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
if self.parameters.subnet_size is not None:
init_arguments['subnet_size'] = self.parameters.subnet_size
try:
self.client.init_swarm(**init_arguments)
except APIError as exc:
self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
if not self.client.check_if_swarm_manager():
if not self.check_mode:
self.client.fail("Swarm not created or other error!")
self.created = True
self.inspect_swarm()
self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
self.differences.add('state', parameter='present', active='absent')
self.results['changed'] = True
self.results['swarm_facts'] = {
'JoinTokens': self.swarm_info.get('JoinTokens'),
'UnlockKey': self.swarm_info.get('UnlockKey')
}
def __update_swarm(self):
try:
self.inspect_swarm()
version = self.swarm_info['Version']['Index']
self.parameters.update_from_swarm_info(self.swarm_info)
old_parameters = TaskParameters()
old_parameters.update_from_swarm_info(self.swarm_info)
self.parameters.compare_to_active(old_parameters, self.client, self.differences)
if self.differences.empty:
self.results['actions'].append("No modification")
self.results['changed'] = False
return
update_parameters = TaskParameters.from_ansible_params(self.client)
update_parameters.update_parameters(self.client)
if not self.check_mode:
self.client.update_swarm(
version=version, swarm_spec=update_parameters.spec,
rotate_worker_token=self.parameters.rotate_worker_token,
rotate_manager_token=self.parameters.rotate_manager_token)
except APIError as exc:
self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
return
self.inspect_swarm()
self.results['actions'].append("Swarm cluster updated")
self.results['changed'] = True
def join(self):
if self.client.check_if_swarm_node():
self.results['actions'].append("This node is already part of a swarm.")
return
if not self.check_mode:
try:
self.client.join_swarm(
remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
except APIError as exc:
self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
self.results['actions'].append("New node is added to swarm cluster")
self.differences.add('joined', parameter=True, active=False)
self.results['changed'] = True
def leave(self):
if not self.client.check_if_swarm_node():
self.results['actions'].append("This node is not part of a swarm.")
return
if not self.check_mode:
try:
self.client.leave_swarm(force=self.force)
except APIError as exc:
self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
self.results['actions'].append("Node has left the swarm cluster")
self.differences.add('joined', parameter='absent', active='present')
self.results['changed'] = True
def remove(self):
if not self.client.check_if_swarm_manager():
self.client.fail("This node is not a manager.")
try:
status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
except APIError:
return
if not status_down:
self.client.fail("Can not remove the node. The status node is ready and not down.")
if not self.check_mode:
try:
self.client.remove_node(node_id=self.node_id, force=self.force)
except APIError as exc:
self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
self.results['actions'].append("Node is removed from swarm cluster.")
self.differences.add('joined', parameter=False, active=True)
self.results['changed'] = True
def _detect_remove_operation(client):
return client.module.params['state'] == 'remove'
def main():
argument_spec = dict(
advertise_addr=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']),
force=dict(type='bool', default=False),
listen_addr=dict(type='str', default='0.0.0.0:2377'),
remote_addrs=dict(type='list', elements='str'),
join_token=dict(type='str'),
snapshot_interval=dict(type='int'),
task_history_retention_limit=dict(type='int'),
keep_old_snapshots=dict(type='int'),
log_entries_for_slow_followers=dict(type='int'),
heartbeat_tick=dict(type='int'),
election_tick=dict(type='int'),
dispatcher_heartbeat_period=dict(type='int'),
node_cert_expiry=dict(type='int'),
name=dict(type='str'),
labels=dict(type='dict'),
signing_ca_cert=dict(type='str'),
signing_ca_key=dict(type='str'),
ca_force_rotate=dict(type='int'),
autolock_managers=dict(type='bool'),
node_id=dict(type='str'),
rotate_worker_token=dict(type='bool', default=False),
rotate_manager_token=dict(type='bool', default=False),
default_addr_pool=dict(type='list', elements='str'),
subnet_size=dict(type='int'),
)
required_if = [
('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']),
('state', 'remove', ['node_id'])
]
option_minimal_versions = dict(
labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
autolock_managers=dict(docker_py_version='2.6.0'),
log_driver=dict(docker_py_version='2.6.0'),
remove_operation=dict(
docker_py_version='2.4.0',
detect_usage=_detect_remove_operation,
usage_msg='remove swarm nodes'
),
default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
)
client = AnsibleDockerSwarmClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
min_docker_version='1.10.0',
min_docker_api_version='1.25',
option_minimal_versions=option_minimal_versions,
)
try:
results = dict(
changed=False,
result='',
actions=[]
)
SwarmManager(client, results)()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
s-hertel/ansible
|
test/support/integration/plugins/modules/docker_swarm.py
|
Python
|
gpl-3.0
| 24,565
|
class Solution(object):
def findDuplicate(self, paths):
"""
:type paths: List[str]
:rtype: List[List[str]]
"""
M = collections.defaultdict(list)
for i in paths:
data = i.split()
base = data[0]
for file in data[1:]:
name, _, content = file.partition('(')
M[content[:-1]].append(base + '/' + name)
return [x for x in M.values() if len(x) > 1]
|
danisfermi/CodingPractice
|
LeetCode/Find Duplicate File in System/FindDuplicateFileinSystem.py
|
Python
|
gpl-3.0
| 469
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for Invenio-OpenAIRE.
This file is imported by ``invenio_openaire.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = "1.0.0a15"
|
inveniosoftware/invenio-openaire
|
invenio_openaire/version.py
|
Python
|
mit
| 451
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Services Cron jobs
"""
from treeio.services.models import TicketQueue, TicketRecord
from django.core.urlresolvers import reverse
import datetime
def tickets_escalate():
"Automatically move tickets to upper queues when no action taken"
# Collect queues which have waiting time and next queue specified
queues = TicketQueue.objects.filter(waiting_time__isnull=False, next_queue__isnull=False)
now = datetime.datetime.now()
for queue in queues:
if queue.waiting_time and queue.next_queue:
# Calculate the timeframe outside of which idle tickets should be escalated
delta = datetime.timedelta(seconds=int(queue.waiting_time))
timeframe = now-delta
# Collect tickets ourside the timeframe
tickets = queue.ticket_set.filter(date_created__lt=timeframe, status__active=True)
for ticket in tickets:
# Identify if any recent updates have been made on the ticket
updates = ticket.updates.filter(date_created__gte=timeframe).exists()
if not updates:
ticket.queue = queue.next_queue
ticket.auto_notify = False
ticket.save()
record = TicketRecord(record_type='update')
record.format_message = 'Ticket automatically escalated from <a href="' + \
reverse('services_queue_view', args=[queue.id]) + \
'">' + unicode(queue) + '</a> to <a href="' + \
reverse('services_queue_view', args=[queue.next_queue.id]) + \
'">' + unicode(queue.next_queue) + '</a>.'
record.author = ticket.creator
record.save()
record.about.add(ticket)
ticket.set_last_updated()
|
rogeriofalcone/treeio
|
services/cron.py
|
Python
|
mit
| 2,070
|
import numpy as np
def normalized(a, axis=-1, order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2==0] = 1
return a / np.expand_dims(l2, axis)
A = np.random.randn(3,3,3)
print normalized(A,0)
print normalized(A,1)
print normalized(A,2)
print normalized(np.arange(3)[:,None])
print normalized(np.arange(3))
import math
def P(prev_score,next_score,temperature):
if next_score < prev_score:
return 1.0
else:
return math.exp( -abs(next_score-prev_score)/temperature )
print(P(7,8,1.5))
|
sergeimoiseev/othodi_code
|
old2/test_normalized.py
|
Python
|
mit
| 538
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
#
import gzip
import os
import os.path as P
import subprocess
from unittest import mock
import sys
import pytest
import smart_open.hdfs
CURR_DIR = P.dirname(P.abspath(__file__))
if sys.platform.startswith("win"):
pytest.skip("these tests don't work under Windows", allow_module_level=True)
#
# We want our mocks to emulate the real implementation as close as possible,
# so we use a Popen call during each test. If we mocked using io.BytesIO, then
# it is possible the mocks would behave differently to what we expect in real
# use.
#
# Since these tests use cat, they will not work in an environment without cat,
# such as Windows. The main line of this test submodule contains a simple
# cat implementation. We need this because Windows' analog, type, does
# weird stuff with line endings (inserts CRLF). Also, I don't know of a way
# to get type to echo standard input.
#
def cat(path=None):
command = [sys.executable, P.abspath(__file__)]
if path:
command.append(path)
return subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
CAP_PATH = P.join(CURR_DIR, 'test_data', 'crime-and-punishment.txt')
with open(CAP_PATH, encoding='utf-8') as fin:
CRIME_AND_PUNISHMENT = fin.read()
def test_sanity_read_bytes():
with open(CAP_PATH, 'rb') as fin:
lines = [line for line in fin]
assert len(lines) == 3
def test_sanity_read_text():
with open(CAP_PATH, 'r', encoding='utf-8') as fin:
text = fin.read()
expected = 'В начале июля, в чрезвычайно жаркое время'
assert text[:len(expected)] == expected
@pytest.mark.parametrize('schema', [('hdfs', ), ('viewfs', )])
def test_read(schema):
with mock.patch('subprocess.Popen', return_value=cat(CAP_PATH)):
reader = smart_open.hdfs.CliRawInputBase(f'{schema}://dummy/url')
as_bytes = reader.read()
#
# Not 100% sure why this is necessary on Windows platforms, but the
# tests fail without it. It may be a bug, but I don't have time to
# investigate right now.
#
as_text = as_bytes.decode('utf-8').replace(os.linesep, '\n')
assert as_text == CRIME_AND_PUNISHMENT
@pytest.mark.parametrize('schema', [('hdfs', ), ('viewfs', )])
def test_read_75(schema):
with mock.patch('subprocess.Popen', return_value=cat(CAP_PATH)):
reader = smart_open.hdfs.CliRawInputBase(f'{schema}://dummy/url')
as_bytes = reader.read(75)
as_text = as_bytes.decode('utf-8').replace(os.linesep, '\n')
assert as_text == CRIME_AND_PUNISHMENT[:len(as_text)]
@pytest.mark.parametrize('schema', [('hdfs', ), ('viewfs', )])
def test_unzip(schema):
with mock.patch('subprocess.Popen', return_value=cat(CAP_PATH + '.gz')):
with gzip.GzipFile(fileobj=smart_open.hdfs.CliRawInputBase(f'{schema}://dummy/url')) as fin:
as_bytes = fin.read()
as_text = as_bytes.decode('utf-8')
assert as_text == CRIME_AND_PUNISHMENT
@pytest.mark.parametrize('schema', [('hdfs', ), ('viewfs', )])
def test_context_manager(schema):
with mock.patch('subprocess.Popen', return_value=cat(CAP_PATH)):
with smart_open.hdfs.CliRawInputBase(f'{schema}://dummy/url') as fin:
as_bytes = fin.read()
as_text = as_bytes.decode('utf-8').replace('\r\n', '\n')
assert as_text == CRIME_AND_PUNISHMENT
@pytest.mark.parametrize('schema', [('hdfs', ), ('viewfs', )])
def test_write(schema):
expected = 'мы в ответе за тех, кого приручили'
mocked_cat = cat()
with mock.patch('subprocess.Popen', return_value=mocked_cat):
with smart_open.hdfs.CliRawOutputBase(f'{schema}://dummy/url') as fout:
fout.write(expected.encode('utf-8'))
actual = mocked_cat.stdout.read().decode('utf-8')
assert actual == expected
@pytest.mark.parametrize('schema', [('hdfs', ), ('viewfs', )])
def test_write_zip(schema):
expected = 'мы в ответе за тех, кого приручили'
mocked_cat = cat()
with mock.patch('subprocess.Popen', return_value=mocked_cat):
with smart_open.hdfs.CliRawOutputBase(f'{schema}://dummy/url') as fout:
with gzip.GzipFile(fileobj=fout, mode='wb') as gz_fout:
gz_fout.write(expected.encode('utf-8'))
with gzip.GzipFile(fileobj=mocked_cat.stdout) as fin:
actual = fin.read().decode('utf-8')
assert actual == expected
def main():
try:
path = sys.argv[1]
except IndexError:
bytez = sys.stdin.buffer.read()
else:
with open(path, 'rb') as fin:
bytez = fin.read()
sys.stdout.buffer.write(bytez)
sys.stdout.flush()
if __name__ == '__main__':
main()
|
RaRe-Technologies/smart_open
|
smart_open/tests/test_hdfs.py
|
Python
|
mit
| 4,884
|
x = int(raw_input())
d = input()
i = int(input())
print i[d:]+i[:d]
print i[-d:]+i[:-d]
|
ganesh-95/python-programs
|
pypro/1hr.py
|
Python
|
mit
| 94
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MySQL Service Benchmarks.
This is a set of benchmarks that measures performance of MySQL Databases on
managed MySQL services.
- On AWS, we will use RDS+MySQL.
- On GCP, we will use Cloud SQL v2 (Performance Edition). As of July 2015, you
will need to request to whitelist your GCP project to get access to Cloud SQL
v2. Follow instructions on your GCP's project console to do that.
As other cloud providers deliver a managed MySQL service, we will add it here.
"""
import json
import logging
import re
import StringIO
import time
import uuid
from perfkitbenchmarker import benchmark_spec as benchmark_spec_class
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'mysql_svc_db_instance_cores', '4', ['1', '4', '8', '16'],
'The number of cores to be provisioned for the DB instance.')
flags.DEFINE_integer('mysql_svc_oltp_tables_count', 4,
'The number of tables used in sysbench oltp.lua tests')
flags.DEFINE_integer('mysql_svc_oltp_table_size', 100000,
'The number of rows of each table used in the oltp tests')
flags.DEFINE_integer('sysbench_warmup_seconds', 120,
'The duration of the warmup run in which results are '
'discarded, in seconds.')
flags.DEFINE_integer('sysbench_run_seconds', 480,
'The duration of the actual run in which results are '
'collected, in seconds.')
flags.DEFINE_integer('sysbench_thread_count', 16,
'The number of test threads on the client side.')
flags.DEFINE_integer('sysbench_latency_percentile', 99,
'The latency percentile we ask sysbench to compute.')
flags.DEFINE_integer('sysbench_report_interval', 2,
'The interval, in seconds, we ask sysbench to report '
'results.')
BENCHMARK_NAME = 'mysql_service'
BENCHMARK_CONFIG = """
mysql_service:
description: MySQL service benchmarks.
vm_groups:
default:
vm_spec: *default_single_core
"""
# Query DB creation status once every 15 seconds
DB_STATUS_QUERY_INTERVAL = 15
# How many times we will wait for the service to create the DB
# total wait time is therefore: "query interval * query limit"
DB_STATUS_QUERY_LIMIT = 200
# Map from FLAGs.mysql_svc_db_instance_cores to RDS DB Type
RDS_CORE_TO_DB_CLASS_MAP = {
'1': 'db.m3.medium',
'4': 'db.m3.xlarge',
'8': 'db.m3.2xlarge',
'16': 'db.r3.4xlarge', # m3 series doesn't have 16 core.
}
RDS_DB_ENGINE = 'MySQL'
RDS_DB_ENGINE_VERSION = '5.6.23'
RDS_DB_STORAGE_TYPE_GP2 = 'gp2'
# Storage IOPS capacity of the DB instance.
# Currently this is fixed because the cloud provider GCP does not support
# changing this setting. As soon as it supports changing the storage size, we
# will expose a flag here to allow caller to select a storage size.
# Default GCP storage size is 1TB PD-SSD which supports 10K Read or 15K Write
# IOPS (12.5K mixed).
# To support 12.5K IOPS on EBS-GP, we need 4170 GB disk.
RDS_DB_STORAGE_GP2_SIZE = '4170'
# A list of status strings that are possible during RDS DB creation.
RDS_DB_CREATION_PENDING_STATUS = frozenset(
['creating', 'modifying', 'backing-up', 'rebooting'])
# Constants defined for Sysbench tests.
RAND_INIT_ON = 'on'
DISABLE = 'disable'
UNIFORM = 'uniform'
OFF = 'off'
MYSQL_ROOT_USER = 'root'
MYSQL_ROOT_PASSWORD_PREFIX = 'Perfkit8'
MYSQL_PORT = '3306'
NORMAL_SYSBENCH_PATH_PREFIX = '/usr'
PREPARE_SCRIPT_PATH = '/share/doc/sysbench/tests/db/parallel_prepare.lua'
OLTP_SCRIPT_PATH = '/share/doc/sysbench/tests/db/oltp.lua'
SYSBENCH_RESULT_NAME_DATA_LOAD = 'sysbench data load time'
SYSBENCH_RESULT_NAME_TPS = 'sysbench tps'
SYSBENCH_RESULT_NAME_LATENCY = 'sysbench latency'
NA_UNIT = 'NA'
SECONDS_UNIT = 'seconds'
MS_UNIT = 'milliseconds'
# These are the constants that should be specified in GCP's cloud SQL command.
DEFAULT_BACKUP_START_TIME = '07:00'
GCP_MY_SQL_VERSION = 'MYSQL_5_6'
GCP_PRICING_PLAN = 'PACKAGE'
RESPONSE_TIME_TOKENS = ['min', 'avg', 'max', 'percentile']
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
class DBStatusQueryError(Exception):
pass
def _GenerateRandomPassword():
""" Generates a random password to be used by the DB instance.
Args:
None
Returns:
A string that can be used as password to a DB instance.
"""
return '%s%s' % (MYSQL_ROOT_PASSWORD_PREFIX, str(uuid.uuid4())[-8:])
def ParseSysbenchOutput(sysbench_output, results, metadata):
"""Parses sysbench output.
Extract relevant TPS and latency numbers, and populate the final result
collection with these information.
Specifically, we are interested in tps numbers reported by each reporting
interval, and the summary latency numbers printed at the end of the run in
"General Statistics" -> "Response Time".
Example Sysbench output:
sysbench 0.5: multi-threaded system evaluation benchmark
<... lots of output we don't care here ...>
Threads started!
[ 2s] threads: 16, tps: 526.38, reads: 7446.79, writes: 2105.52, response
time: 210.67ms (99%), errors: 0.00, reconnects: 0.00
< .... lots of tps output every 2 second, we need all those>
< ... lots of other output we don't care for now...>
General statistics:
total time: 17.0563s
total number of events: 10000
total time taken by event execution: 272.6053s
response time:
min: 18.31ms
avg: 27.26ms
max: 313.50ms
approx. 99 percentile: 57.15ms
< We care about the response time section above, these are latency numbers>
< then there are some outputs after this, we don't care either>
Args:
sysbench_output: The output from sysbench.
results: The dictionary to store results based on sysbench output.
metadata: The metadata to be passed along to the Samples class.
"""
all_tps = []
seen_general_statistics = False
seen_response_time = False
response_times = {}
sysbench_output_io = StringIO.StringIO(sysbench_output)
for line in sysbench_output_io.readlines():
if re.match('^\[', line):
tps = re.findall('tps: (.*?),', line)
all_tps.append(float(tps[0]))
continue
if line.startswith('General statistics:'):
seen_general_statistics = True
continue
if seen_general_statistics:
if re.match('^ +response time:.*', line):
seen_response_time = True
continue
if seen_general_statistics and seen_response_time:
for token in RESPONSE_TIME_TOKENS:
search_string = '.*%s: +(.*)ms' % token
if re.findall(search_string, line):
response_times[token] = float(re.findall(search_string, line)[0])
tps_line = ', '.join(map(str, all_tps))
# Print all tps data points in the log for reference. And report
# percentiles of these tps data in the final result set.
logging.info('All TPS numbers: \n %s', tps_line)
tps_percentile = sample.PercentileCalculator(all_tps)
for percentile in sample.PERCENTILES_LIST:
percentile_string = 'p%s' % str(percentile)
logging.info('%s tps %f', percentile_string,
tps_percentile[percentile_string])
metric_name = ('%s %s') % (SYSBENCH_RESULT_NAME_TPS, percentile_string)
results.append(sample.Sample(
metric_name,
tps_percentile[percentile_string],
NA_UNIT,
metadata))
# Also report average, stddev, and coefficient of variation
for token in ['average', 'stddev']:
logging.info('tps %s %f', token, tps_percentile[token])
metric_name = ('%s %s') % (SYSBENCH_RESULT_NAME_TPS, token)
results.append(sample.Sample(
metric_name,
tps_percentile[token],
NA_UNIT,
metadata))
if tps_percentile['average'] > 0:
cv = tps_percentile['stddev'] / tps_percentile['average']
logging.info('tps coefficient of variation %f', cv)
metric_name = ('%s %s') % (SYSBENCH_RESULT_NAME_TPS, 'cv')
results.append(sample.Sample(
metric_name,
cv,
NA_UNIT,
metadata))
# Now, report the latency numbers.
for token in RESPONSE_TIME_TOKENS:
logging.info('%s_response_time is %f', token, response_times[token])
metric_name = '%s %s' % (SYSBENCH_RESULT_NAME_LATENCY, token)
if token == 'percentile':
metric_name = '%s %s' % (metric_name, FLAGS.sysbench_latency_percentile)
results.append(sample.Sample(
metric_name,
response_times[token],
MS_UNIT,
metadata))
def _GetSysbenchCommandPrefix():
""" Decides what the prefix is for sysbench command based on os type.
Args:
None.
Returns:
A string representing the sysbench command prefix.
"""
if FLAGS.os_type == 'rhel':
return vm_util.VM_TMP_DIR
else:
return NORMAL_SYSBENCH_PATH_PREFIX
def _IssueSysbenchCommand(vm, duration):
""" Issues a sysbench run command given a vm and a duration.
Does nothing if duration is <= 0
Args:
vm: The test VM to issue command to.
duration: the duration of the sysbench run.
Returns:
stdout, stderr: the result of the command.
"""
stdout = ''
stderr = ''
oltp_script_path = '%s%s' % (_GetSysbenchCommandPrefix(), OLTP_SCRIPT_PATH)
if duration > 0:
run_cmd_tokens = ['sysbench',
'--test=%s' % oltp_script_path,
'--mysql_svc_oltp_tables_count=%d' %
FLAGS.mysql_svc_oltp_tables_count,
'--oltp-table-size=%d' %
FLAGS.mysql_svc_oltp_table_size,
'--rand-init=%s' % RAND_INIT_ON,
'--db-ps-mode=%s' % DISABLE,
'--oltp-dist-type=%s' % UNIFORM,
'--oltp-read-only=%s' % OFF,
'--num-threads=%d' % FLAGS.sysbench_thread_count,
'--percentile=%d' % FLAGS.sysbench_latency_percentile,
'--report-interval=%d' %
FLAGS.sysbench_report_interval,
'--max-requests=0',
'--max-time=%d' % duration,
'--mysql-user=%s' % vm.db_instance_master_user,
'--mysql-password="%s"' %
vm.db_instance_master_password,
'--mysql-host=%s' % vm.db_instance_address,
'run']
run_cmd = ' '.join(run_cmd_tokens)
stdout, stderr = vm.RobustRemoteCommand(run_cmd)
logging.info('Sysbench results: \n stdout is:\n%s\nstderr is\n%s',
stdout, stderr)
return stdout, stderr
def _RunSysbench(vm, metadata):
""" Runs the Sysbench OLTP test.
The test is run on the DB instance as indicated by the vm.db_instance_address.
Args:
vm: The client VM that will issue the sysbench test.
metadata: The PKB metadata to be passed along to the final results.
Returns:
Results: A list of results of this run.
"""
results = []
if not hasattr(vm, 'db_instance_address'):
logging.error(
'Prepare has likely failed, db_instance_address is not found.')
raise DBStatusQueryError('RunSysbench: DB instance address not found.')
# Create the sbtest database for Sysbench.
# str(uuid.uuid4())[-8:]
create_sbtest_db_cmd = ('mysql -h %s -u %s -p%s '
'-e \'create database sbtest;\'') % (
vm.db_instance_address,
vm.db_instance_master_user,
vm.db_instance_master_password)
stdout, stderr = vm.RemoteCommand(create_sbtest_db_cmd)
logging.info('sbtest db created, stdout is %s, stderr is %s',
stdout, stderr)
# Provision the Sysbench test based on the input flags (load data into DB)
# Could take a long time if the data to be loaded is large.
data_load_start_time = time.time()
prepare_script_path = '%s%s' % (_GetSysbenchCommandPrefix(),
PREPARE_SCRIPT_PATH)
data_load_cmd_tokens = ['sysbench',
'--test=%s' % prepare_script_path,
'--mysql_svc_oltp_tables_count=%d' %
FLAGS.mysql_svc_oltp_tables_count,
'--oltp-table-size=%d' %
FLAGS.mysql_svc_oltp_table_size,
'--rand-init=%s' % RAND_INIT_ON,
'--num-threads=%d' %
FLAGS.mysql_svc_oltp_tables_count,
'--mysql-user=%s' % vm.db_instance_master_user,
'--mysql-password="%s"' %
vm.db_instance_master_password,
'--mysql-host=%s' % vm.db_instance_address,
'run']
data_load_cmd = ' '.join(data_load_cmd_tokens)
# Sysbench output is in stdout, but we also get stderr just in case
# something went wrong.
stdout, stderr = vm.RobustRemoteCommand(data_load_cmd)
load_duration = time.time() - data_load_start_time
logging.info('It took %d seconds to finish the data loading step',
load_duration)
logging.info('data loading results: \n stdout is:\n%s\nstderr is\n%s',
stdout, stderr)
results.append(sample.Sample(
SYSBENCH_RESULT_NAME_DATA_LOAD,
load_duration,
SECONDS_UNIT,
metadata))
# Now run the sysbench OLTP test and parse the results.
for phase in ['warm-up', 'run']:
# First step is to run the test long enough to cover the warmup period
# as requested by the caller. Then we do the "real" run, parse and report
# the results.
duration = 0
if phase == 'warm-up' and FLAGS.sysbench_warmup_seconds > 0:
duration = FLAGS.sysbench_warmup_seconds
logging.info('Sysbench warm-up run, duration is %d', duration)
elif phase == 'run':
duration = FLAGS.sysbench_run_seconds
logging.info('Sysbench real run, duration is %d', duration)
stdout, stderr = _IssueSysbenchCommand(vm, duration)
if phase == 'run':
# We only need to parse the results for the "real" run.
logging.info('\n Parsing Sysbench Results...\n')
ParseSysbenchOutput(stdout, results, metadata)
return results
def _RDSParseDBInstanceStatus(json_response):
"""Parses a JSON response from an RDS DB status query command.
Args:
json_response: The response from the DB status query command in JSON.
Returns:
A list of sample.Sample objects.
"""
status = ''
# Sometimes you look for 'DBInstance', some times you need to look for
# 'DBInstances' and then take the first element
if 'DBInstance' in json_response:
status = json_response['DBInstance']['DBInstanceStatus']
else:
if 'DBInstances' in json_response:
status = json_response['DBInstances'][0]['DBInstanceStatus']
return status
class RDSMySQLBenchmark(object):
"""MySQL benchmark based on the RDS service on AWS."""
def Prepare(self, vm):
"""Prepares the DB and everything for the AWS-RDS provider.
Args:
vm: The VM to be used as the test client.
"""
logging.info('Preparing MySQL Service benchmarks for RDS.')
# TODO: Refactor the RDS DB instance creation and deletion logic out
# to a new class called RDSDBInstance that Inherits from
# perfkitbenchmarker.resource.BaseResource.
# And do the same for GCP.
# First is to create another subnet in the same VPC as the VM but in a
# different zone. RDS requires two subnets in two different zones to create
# a DB instance, EVEN IF you do not specify multi-AZ in your DB creation
# request.
# Get a list of zones and pick one that's different from the zone VM is in.
new_subnet_zone = None
get_zones_cmd = util.AWS_PREFIX + ['ec2', 'describe-availability-zones']
stdout, _, _ = vm_util.IssueCommand(get_zones_cmd)
response = json.loads(stdout)
all_zones = response['AvailabilityZones']
for zone in all_zones:
if zone['ZoneName'] != vm.zone:
new_subnet_zone = zone['ZoneName']
break
if new_subnet_zone is None:
raise DBStatusQueryError('Cannot find a zone to create the required '
'second subnet for the DB instance.')
# Now create a new subnet in the zone that's different from where the VM is
logging.info('Creating a second subnet in zone %s', new_subnet_zone)
new_subnet = aws_network.AwsSubnet(new_subnet_zone, vm.network.vpc.id,
'10.0.1.0/24')
new_subnet.Create()
logging.info('Successfully created a new subnet, subnet id is: %s',
new_subnet.id)
# Remember this so we can cleanup properly.
vm.extra_subnet_for_db = new_subnet
# Now we can create a new DB subnet group that has two subnets in it.
db_subnet_group_name = 'pkb%s' % FLAGS.run_uri
create_db_subnet_group_cmd = util.AWS_PREFIX + [
'rds',
'create-db-subnet-group',
'--db-subnet-group-name', db_subnet_group_name,
'--db-subnet-group-description', 'pkb_subnet_group_for_db',
'--subnet-ids', vm.network.subnet.id, new_subnet.id]
stdout, stderr, _ = vm_util.IssueCommand(create_db_subnet_group_cmd)
logging.info('Created a DB subnet group, stdout is:\n%s\nstderr is:\n%s',
stdout, stderr)
vm.db_subnet_group_name = db_subnet_group_name
# open up tcp port 3306 in the VPC's security group, we need that to connect
# to the DB.
open_port_cmd = util.AWS_PREFIX + [
'ec2',
'authorize-security-group-ingress',
'--group-id', vm.group_id,
'--source-group', vm.group_id,
'--protocol', 'tcp',
'--port', MYSQL_PORT]
stdout, stderr, _ = vm_util.IssueCommand(open_port_cmd)
logging.info('Granted DB port ingress, stdout is:\n%s\nstderr is:\n%s',
stdout, stderr)
# Finally, it's time to create the DB instance!
vm.db_instance_id = 'pkb-DB-%s' % FLAGS.run_uri
db_class = \
RDS_CORE_TO_DB_CLASS_MAP['%s' % FLAGS.mysql_svc_db_instance_cores]
vm.db_instance_master_user = MYSQL_ROOT_USER
vm.db_instance_master_password = _GenerateRandomPassword()
create_db_cmd = util.AWS_PREFIX + [
'rds',
'create-db-instance',
'--db-instance-identifier', vm.db_instance_id,
'--db-instance-class', db_class,
'--engine', RDS_DB_ENGINE,
'--engine-version', RDS_DB_ENGINE_VERSION,
'--storage-type', RDS_DB_STORAGE_TYPE_GP2,
'--allocated-storage', RDS_DB_STORAGE_GP2_SIZE,
'--vpc-security-group-ids', vm.group_id,
'--master-username', vm.db_instance_master_user,
'--master-user-password', vm.db_instance_master_password,
'--availability-zone', vm.zone,
'--db-subnet-group-name', vm.db_subnet_group_name]
status_query_cmd = util.AWS_PREFIX + [
'rds',
'describe-db-instances',
'--db-instance-id', vm.db_instance_id]
stdout, stderr, _ = vm_util.IssueCommand(create_db_cmd)
logging.info('Request to create the DB has been issued, stdout:\n%s\n'
'stderr:%s\n', stdout, stderr)
response = json.loads(stdout)
db_creation_status = _RDSParseDBInstanceStatus(response)
for status_query_count in xrange(1, DB_STATUS_QUERY_LIMIT + 1):
if db_creation_status == 'available':
break
if db_creation_status not in RDS_DB_CREATION_PENDING_STATUS:
raise DBStatusQueryError('Invalid status in DB creation response. '
' stdout is\n%s, stderr is\n%s' % (
stdout, stderr))
logging.info('Querying db creation status, current state is %s, query '
'count is %d', db_creation_status, status_query_count)
time.sleep(DB_STATUS_QUERY_INTERVAL)
stdout, stderr, _ = vm_util.IssueCommand(status_query_cmd)
response = json.loads(stdout)
db_creation_status = _RDSParseDBInstanceStatus(response)
else:
raise DBStatusQueryError('DB creation timed-out, we have '
'waited at least %s * %s seconds.' % (
DB_STATUS_QUERY_INTERVAL,
DB_STATUS_QUERY_LIMIT))
# We are good now, db has been created. Now get the endpoint address.
# On RDS, you always connect with a DNS name, if you do that from a EC2 VM,
# that DNS name will be resolved to an internal IP address of the DB.
if 'DBInstance' in response:
vm.db_instance_address = response['DBInstance']['Endpoint']['Address']
else:
if 'DBInstances' in response:
vm.db_instance_address = \
response['DBInstances'][0]['Endpoint']['Address']
logging.info('Successfully created an RDS DB instance. Address is %s',
vm.db_instance_address)
logging.info('Complete output is:\n %s', response)
def Cleanup(self, vm):
"""Clean up RDS instances, cleanup the extra subnet created for the
creation of the RDS instance.
Args:
vm: The VM that was used as the test client, which also stores states
for clean-up.
"""
# Now, we can delete the DB instance. vm.db_instance_id is the id to call.
# We need to keep querying the status of the deletion here before we let
# this go. RDS DB deletion takes some time to finish. And we have to
# wait until this DB is deleted before we proceed because this DB holds
# references to various other resources: subnet groups, subnets, vpc, etc.
delete_db_cmd = util.AWS_PREFIX + [
'rds',
'delete-db-instance',
'--db-instance-identifier', vm.db_instance_id,
'--skip-final-snapshot']
logging.info('Deleting db instance %s...', vm.db_instance_id)
# Note below, the status of this deletion command is validated below in the
# loop. both stdout and stderr are checked.
stdout, stderr, _ = vm_util.IssueCommand(delete_db_cmd)
logging.info('Request to delete the DB has been issued, stdout:\n%s\n'
'stderr:%s\n', stdout, stderr)
status_query_cmd = util.AWS_PREFIX + [
'rds',
'describe-db-instances',
'--db-instance-id', vm.db_instance_id]
db_status = None
for status_query_count in xrange(1, DB_STATUS_QUERY_LIMIT + 1):
try:
response = json.loads(stdout)
db_status = _RDSParseDBInstanceStatus(response)
if db_status == 'deleting':
logging.info('DB is still in the deleting state, status_query_count '
'is %d', status_query_count)
# Wait for a few seconds and query status
time.sleep(DB_STATUS_QUERY_INTERVAL)
stdout, stderr, _ = vm_util.IssueCommand(status_query_cmd)
else:
logging.info('DB deletion status is no longer in deleting, it is %s',
db_status)
break
except:
# stdout cannot be parsed into json, it might simply be empty because
# deletion has been completed.
break
else:
logging.warn('DB is still in deleting state after long wait, bail.')
db_instance_deletion_failed = False
if db_status == 'deleted' or re.findall('DBInstanceNotFound', stderr):
# Sometimes we get a 'deleted' status from DB status query command,
# but even more times, the DB status query command would fail with
# an "not found" error, both are positive confirmation that the DB has
# been deleted.
logging.info('DB has been successfully deleted, got confirmation.')
else:
# We did not get a positive confirmation that the DB is deleted even after
# long wait, we have to bail. But we will log an error message, and
# then raise an exception at the end of this function so this particular
# run will show as a failed run to the user and allow them to examine
# the logs
db_instance_deletion_failed = True
logging.error(
'RDS DB instance %s failed to be deleted, we did not get '
'final confirmation from stderr, which is:\n %s', vm.db_instance_id,
stderr)
if hasattr(vm, 'db_subnet_group_name'):
delete_db_subnet_group_cmd = util.AWS_PREFIX + [
'rds',
'delete-db-subnet-group',
'--db-subnet-group-name', vm.db_subnet_group_name]
stdout, stderr, _ = vm_util.IssueCommand(delete_db_subnet_group_cmd)
logging.info('Deleted the db subnet group. stdout is:\n%s, stderr: \n%s',
stdout, stderr)
if hasattr(vm, 'extra_subnet_for_db'):
vm.extra_subnet_for_db.Delete()
if db_instance_deletion_failed:
raise DBStatusQueryError('Failed to get confirmation of DB instance '
'deletion! Check the log for details!')
class GoogleCloudSQLBenchmark(object):
"""MySQL benchmark based on the Google Cloud SQL service."""
def Prepare(self, vm):
"""Prepares the DB and everything for the provider GCP (Cloud SQL)
Args:
vm: The VM to be used as the test client
"""
# TODO: Refactor the GCP Cloud SQL instance creation and deletion logic out
# to a new class called GCPCloudSQLInstance that Inherits from
# perfkitbenchmarker.resource.BaseResource.
logging.info('Preparing MySQL Service benchmarks for Google Cloud SQL.')
vm.db_instance_name = 'pkb%s' % FLAGS.run_uri
db_tier = 'db-n1-standard-%s' % FLAGS.mysql_svc_db_instance_cores
# Currently, we create DB instance in the same zone as the test VM.
db_instance_zone = vm.zone
# Currently GCP REQUIRES you to connect to the DB instance via external IP
# (i.e., using external IPs of the DB instance AND the VM instance).
authorized_network = '%s/32' % vm.ip_address
create_db_cmd = [FLAGS.gcloud_path,
'sql',
'instances',
'create', vm.db_instance_name,
'--quiet',
'--format=json',
'--async',
'--activation-policy=ALWAYS',
'--assign-ip',
'--authorized-networks=%s' % authorized_network,
'--backup-start-time=%s' % DEFAULT_BACKUP_START_TIME,
'--enable-bin-log',
'--tier=%s' % db_tier,
'--gce-zone=%s' % db_instance_zone,
'--database-version=%s' % GCP_MY_SQL_VERSION,
'--pricing-plan=%s' % GCP_PRICING_PLAN]
stdout, _, _ = vm_util.IssueCommand(create_db_cmd)
response = json.loads(stdout)
if response['operation'] is None or response['operationType'] != 'CREATE':
raise DBStatusQueryError('Invalid operation or unrecognized '
'operationType in DB creation response. '
' stdout is %s' % stdout)
status_query_cmd = [FLAGS.gcloud_path,
'sql',
'instances',
'describe', vm.db_instance_name,
'--format', 'json']
stdout, _, _ = vm_util.IssueCommand(status_query_cmd)
response = json.loads(stdout)
query_count = 1
while True:
state = response['state']
if state is None:
raise ValueError('Cannot parse response from status query command. '
'The state is missing. stdout is %s' % stdout)
if state == 'RUNNABLE':
break
else:
if query_count > DB_STATUS_QUERY_LIMIT:
raise DBStatusQueryError('DB creation timed-out, we have '
'waited at least %s * %s seconds.' % (
DB_STATUS_QUERY_INTERVAL,
DB_STATUS_QUERY_LIMIT))
logging.info('Querying db creation status, current state is %s, query '
'count is %d', state, query_count)
time.sleep(DB_STATUS_QUERY_INTERVAL)
stdout, _, _ = vm_util.IssueCommand(status_query_cmd)
response = json.loads(stdout)
query_count += 1
logging.info('Successfully created the DB instance. Complete response is '
'%s', response)
vm.db_instance_address = response['ipAddresses'][0]['ipAddress']
logging.info('DB IP address is: %s', vm.db_instance_address)
# Set the root password to a common one that can be referred to in common
# code across providers.
vm.db_instance_master_user = MYSQL_ROOT_USER
vm.db_instance_master_password = _GenerateRandomPassword()
set_password_cmd = [FLAGS.gcloud_path,
'sql',
'instances',
'set-root-password',
vm.db_instance_name,
'--password', vm.db_instance_master_password]
stdout, stderr, _ = vm_util.IssueCommand(set_password_cmd)
logging.info('Set root password completed. Stdout:\n%s\nStderr:\n%s',
stdout, stderr)
def Cleanup(self, vm):
if hasattr(vm, 'db_instance_name'):
delete_db_cmd = [FLAGS.gcloud_path,
'sql',
'instances',
'delete', vm.db_instance_name,
'--quiet']
stdout, stderr, status = vm_util.IssueCommand(delete_db_cmd)
logging.info('DB cleanup command issued, stdout is %s, stderr is %s '
'status is %s', stdout, stderr, status)
else:
logging.info('db_instance_name does not exist, no need to cleanup.')
MYSQL_SERVICE_BENCHMARK_DICTIONARY = {
benchmark_spec_class.GCP: GoogleCloudSQLBenchmark(),
benchmark_spec_class.AWS: RDSMySQLBenchmark()}
def Prepare(benchmark_spec):
"""Prepare the MySQL DB Instances, configures it.
Prepare the client test VM, installs SysBench, configures it.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
# We would like to always cleanup server side states.
# If we don't set this, our cleanup function will only be called when the VM
# is static VM, but we have server side states to cleanup regardless of the
# VM type.
benchmark_spec.always_call_cleanup = True
vms = benchmark_spec.vms
# Setup common test tools required on the client VM
vms[0].Install('sysbench05plus')
# Prepare service specific states (create DB instance, configure it, etc)
MYSQL_SERVICE_BENCHMARK_DICTIONARY[FLAGS.cloud].Prepare(vms[0])
def Run(benchmark_spec):
"""Run the MySQL Service benchmark and publish results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Results.
"""
logging.info('Start benchmarking MySQL Service, '
'Cloud Provider is %s.', FLAGS.cloud)
vms = benchmark_spec.vms
metadata = {
'mysql_svc_oltp_tables_count': FLAGS.mysql_svc_oltp_tables_count,
'mysql_svc_oltp_table_size': FLAGS.mysql_svc_oltp_table_size,
'mysql_svc_db_instance_cores': FLAGS.mysql_svc_db_instance_cores,
'sysbench_warm_up_seconds': FLAGS.sysbench_warmup_seconds,
'sysbench_run_seconds': FLAGS.sysbench_run_seconds,
'sysbench_thread_count': FLAGS.sysbench_thread_count,
'sysbench_latency_percentile': FLAGS.sysbench_latency_percentile,
'sysbench_report_interval': FLAGS.sysbench_report_interval
}
# The run phase is common across providers. The VMs[0] object contains all
# information and states necessary to carry out the run.
results = _RunSysbench(vms[0], metadata)
print results
return results
def Cleanup(benchmark_spec):
"""Clean up MySQL Service benchmark related states on server and client.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
MYSQL_SERVICE_BENCHMARK_DICTIONARY[FLAGS.cloud].Cleanup(vms[0])
|
mateusz-blaszkowski/PerfKitBenchmarker
|
perfkitbenchmarker/linux_benchmarks/mysql_service_benchmark.py
|
Python
|
apache-2.0
| 33,031
|
from types import TupleType
from fontTools.misc.textTools import safeEval
def buildConverters(tableSpec, tableNamespace):
"""Given a table spec from otData.py, build a converter object for each
field of the table. This is called for each table in otData.py, and
the results are assigned to the corresponding class in otTables.py."""
converters = []
convertersByName = {}
for tp, name, repeat, repeatOffset, descr in tableSpec:
if name.startswith("ValueFormat"):
assert tp == "uint16"
converterClass = ValueFormat
elif name == "DeltaValue":
assert tp == "uint16"
converterClass = DeltaValue
elif name.endswith("Count"):
assert tp == "uint16"
converterClass = Count
elif name == "SubTable":
converterClass = SubTable
elif name == "ExtSubTable":
converterClass = ExtSubTable
else:
converterClass = converterMapping[tp]
tableClass = tableNamespace.get(name)
conv = converterClass(name, repeat, repeatOffset, tableClass)
if name in ["SubTable", "ExtSubTable"]:
conv.lookupTypes = tableNamespace['lookupTypes']
# also create reverse mapping
for t in conv.lookupTypes.values():
for cls in t.values():
convertersByName[cls.__name__] = Table(name, repeat, repeatOffset, cls)
converters.append(conv)
assert not convertersByName.has_key(name)
convertersByName[name] = conv
return converters, convertersByName
class BaseConverter:
"""Base class for converter objects. Apart from the constructor, this
is an abstract class."""
def __init__(self, name, repeat, repeatOffset, tableClass):
self.name = name
self.repeat = repeat
self.repeatOffset = repeatOffset
self.tableClass = tableClass
self.isCount = name.endswith("Count")
def read(self, reader, font, tableStack):
"""Read a value from the reader."""
raise NotImplementedError, self
def write(self, writer, font, tableStack, value, repeatIndex=None):
"""Write a value to the writer."""
raise NotImplementedError, self
def xmlRead(self, attrs, content, font):
"""Read a value from XML."""
raise NotImplementedError, self
def xmlWrite(self, xmlWriter, font, value, name, attrs):
"""Write a value to XML."""
raise NotImplementedError, self
class SimpleValue(BaseConverter):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return attrs["value"]
class IntValue(SimpleValue):
def xmlRead(self, attrs, content, font):
return int(attrs["value"])
class Long(IntValue):
def read(self, reader, font, tableStack):
return reader.readLong()
def write(self, writer, font, tableStack, value, repeatIndex=None):
writer.writeLong(value)
class Fixed(IntValue):
def read(self, reader, font, tableStack):
return float(reader.readLong()) / 0x10000
def write(self, writer, font, tableStack, value, repeatIndex=None):
writer.writeLong(int(round(value * 0x10000)))
def xmlRead(self, attrs, content, font):
return float(attrs["value"])
class Short(IntValue):
def read(self, reader, font, tableStack):
return reader.readShort()
def write(self, writer, font, tableStack, value, repeatIndex=None):
writer.writeShort(value)
class UShort(IntValue):
def read(self, reader, font, tableStack):
return reader.readUShort()
def write(self, writer, font, tableStack, value, repeatIndex=None):
writer.writeUShort(value)
class Count(Short):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.comment("%s=%s" % (name, value))
xmlWriter.newline()
class Tag(SimpleValue):
def read(self, reader, font, tableStack):
return reader.readTag()
def write(self, writer, font, tableStack, value, repeatIndex=None):
writer.writeTag(value)
class GlyphID(SimpleValue):
def read(self, reader, font, tableStack):
value = reader.readUShort()
value = font.getGlyphName(value)
return value
def write(self, writer, font, tableStack, value, repeatIndex=None):
value = font.getGlyphID(value)
writer.writeUShort(value)
class Struct(BaseConverter):
def read(self, reader, font, tableStack):
table = self.tableClass()
table.decompile(reader, font, tableStack)
return table
def write(self, writer, font, tableStack, value, repeatIndex=None):
value.compile(writer, font, tableStack)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is None:
pass # NULL table, ignore
else:
value.toXML(xmlWriter, font, attrs)
def xmlRead(self, attrs, content, font):
table = self.tableClass()
Format = attrs.get("Format")
if Format is not None:
table.Format = int(Format)
for element in content:
if type(element) == TupleType:
name, attrs, content = element
table.fromXML((name, attrs, content), font)
else:
pass
return table
class Table(Struct):
def read(self, reader, font, tableStack):
offset = reader.readUShort()
if offset == 0:
return None
if offset <= 3:
# XXX hack to work around buggy pala.ttf
print "*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \
% (offset, self.tableClass.__name__)
return None
subReader = reader.getSubReader(offset)
table = self.tableClass()
table.decompile(subReader, font, tableStack)
return table
def write(self, writer, font, tableStack, value, repeatIndex=None):
if value is None:
writer.writeUShort(0)
else:
subWriter = writer.getSubWriter()
subWriter.name = self.name
if repeatIndex is not None:
subWriter.repeatIndex = repeatIndex
value.preCompile()
writer.writeSubTable(subWriter)
value.compile(subWriter, font, tableStack)
class SubTable(Table):
def getConverter(self, tableType, lookupType):
lookupTypes = self.lookupTypes[tableType]
tableClass = lookupTypes[lookupType]
return SubTable(self.name, self.repeat, self.repeatOffset, tableClass)
class ExtSubTable(Table):
def getConverter(self, tableType, lookupType):
lookupTypes = self.lookupTypes[tableType]
tableClass = lookupTypes[lookupType]
return ExtSubTable(self.name, self.repeat, self.repeatOffset, tableClass)
def read(self, reader, font, tableStack):
offset = reader.readULong()
if offset == 0:
return None
subReader = reader.getSubReader(offset)
table = self.tableClass()
table.reader = subReader
table.font = font
table.compileStatus = 1
table.start = table.reader.offset
return table
def write(self, writer, font, tableStack, value, repeatIndex=None):
writer.Extension = 1 # actually, mere presence of the field flags it as an Ext Subtable writer.
if value is None:
writer.writeULong(0)
else:
# If the subtable has not yet been decompiled, we need to do so.
if value.compileStatus == 1:
value.decompile(value.reader, value.font, tableStack)
subWriter = writer.getSubWriter()
subWriter.name = self.name
writer.writeSubTable(subWriter)
# If the subtable has been sorted and we can just write the original
# data, then do so.
if value.compileStatus == 3:
data = value.reader.data[value.start:value.end]
subWriter.writeData(data)
else:
value.compile(subWriter, font, tableStack)
class ValueFormat(IntValue):
def __init__(self, name, repeat, repeatOffset, tableClass):
BaseConverter.__init__(self, name, repeat, repeatOffset, tableClass)
self.which = name[-1] == "2"
def read(self, reader, font, tableStack):
format = reader.readUShort()
reader.setValueFormat(format, self.which)
return format
def write(self, writer, font, tableStack, format, repeatIndex=None):
writer.writeUShort(format)
writer.setValueFormat(format, self.which)
class ValueRecord(ValueFormat):
def read(self, reader, font, tableStack):
return reader.readValueRecord(font, self.which)
def write(self, writer, font, tableStack, value, repeatIndex=None):
writer.writeValueRecord(value, font, self.which)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is None:
pass # NULL table, ignore
else:
value.toXML(xmlWriter, font, self.name, attrs)
def xmlRead(self, attrs, content, font):
from otBase import ValueRecord
value = ValueRecord()
value.fromXML((None, attrs, content), font)
return value
class DeltaValue(BaseConverter):
def read(self, reader, font, tableStack):
table = tableStack.getTop()
StartSize = table["StartSize"]
EndSize = table["EndSize"]
DeltaFormat = table["DeltaFormat"]
assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
nItems = EndSize - StartSize + 1
nBits = 1 << DeltaFormat
minusOffset = 1 << nBits
mask = (1 << nBits) - 1
signMask = 1 << (nBits - 1)
DeltaValue = []
tmp, shift = 0, 0
for i in range(nItems):
if shift == 0:
tmp, shift = reader.readUShort(), 16
shift = shift - nBits
value = (tmp >> shift) & mask
if value & signMask:
value = value - minusOffset
DeltaValue.append(value)
return DeltaValue
def write(self, writer, font, tableStack, value, repeatIndex=None):
table = tableStack.getTop()
StartSize = table["StartSize"]
EndSize = table["EndSize"]
DeltaFormat = table["DeltaFormat"]
DeltaValue = table["DeltaValue"]
assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
nItems = EndSize - StartSize + 1
nBits = 1 << DeltaFormat
assert len(DeltaValue) == nItems
mask = (1 << nBits) - 1
tmp, shift = 0, 16
for value in DeltaValue:
shift = shift - nBits
tmp = tmp | ((value & mask) << shift)
if shift == 0:
writer.writeUShort(tmp)
tmp, shift = 0, 16
if shift <> 16:
writer.writeUShort(tmp)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
# XXX this could do with a nicer format
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return safeEval(attrs["value"])
converterMapping = {
# type class
"int16": Short,
"uint16": UShort,
"ULONG": Long,
"Fixed": Fixed,
"Tag": Tag,
"GlyphID": GlyphID,
"struct": Struct,
"Offset": Table,
"LOffset": ExtSubTable,
"ValueRecord": ValueRecord,
}
# equivalents:
converterMapping["USHORT"] = converterMapping["uint16"]
converterMapping["fixed32"] = converterMapping["Fixed"]
|
olivierdalang/stdm
|
third_party/FontTools/fontTools/ttLib/tables/otConverters.py
|
Python
|
gpl-2.0
| 10,542
|
import numpy as np
from bokeh.sampledata.stocks import AAPL, FB, GOOG, IBM, MSFT
from bokeh.plotting import *
output_server('stocks')
hold()
figure(x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave")
line(np.array(AAPL['date'], 'M64'), AAPL['adj_close'], color='#A6CEE3', legend='AAPL')
line(np.array(FB['date'], 'M64'), FB['adj_close'], color='#1F78B4', legend='FB')
line(np.array(GOOG['date'], 'M64'), GOOG['adj_close'], color='#B2DF8A', legend='GOOG')
line(np.array(IBM['date'], 'M64'), IBM['adj_close'], color='#33A02C', legend='IBM')
line(np.array(MSFT['date'], 'M64'), MSFT['adj_close'], color='#FB9A99', legend='MSFT')
curplot().title = "Stock Closing Prices"
grid().grid_line_alpha=0.3
aapl = np.array(AAPL['adj_close'])
aapl_dates = np.array(AAPL['date'], dtype=np.datetime64)
window_size = 30
window = np.ones(window_size)/float(window_size)
aapl_avg = np.convolve(aapl, window, 'same')
figure(x_axis_type="datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave")
scatter(aapl_dates, aapl, size=4, color='#A6CEE3', legend='close')
line(aapl_dates, aapl_avg, color='red', legend='avg')
curplot().title = "AAPL One-Month Average"
grid().grid_line_alpha=0.3
show() # open a browser
|
sahat/bokeh
|
examples/plotting/server/stocks.py
|
Python
|
bsd-3-clause
| 1,233
|
import os
from fabric.decorators import task
from fabric.api import local, run, cd, env, prefix, hide
from fabric.colors import cyan, red, green, yellow
import app
import git
import virtualenv
@task
def init():
"""Execute init tasks for all components (virtualenv, pip)."""
print(yellow("# Setting up development environment...\n", True))
virtualenv.init()
virtualenv.update()
print(green("\n# DONE.", True))
print(green("Type ") + green("activate", True) + green(" to enable your dev virtual environment."))
@task
def update():
"""Update virtual env with requirements packages."""
virtualenv.update()
@task
def dev():
"""Setting up Development mode."""
print(yellow("# Setting up development environment...\n", True))
virtualenv.init()
virtualenv.update()
print(green("\n# DONE.", True))
print(green("Type ") + green("activate", True) + green(" to enable your dev virtual environment."))
@task
def clean():
"""Clean .pyc files"""
app.clean()
|
ronhanson/python-tbx
|
fabfile/__init__.py
|
Python
|
mit
| 1,018
|
# -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
import sale_quotation
import crm
import wizard
|
Elico-Corp/openerp-7.0
|
sale_quotation/__init__.py
|
Python
|
agpl-3.0
| 190
|
from fabric.api import env, local, require
project_name = 'eraldoenergy'
def backup():
"""fab [environment] backup"""
require('environment')
if env.environment == "development":
local('pg_dump -Fc {project} > backups/{project}-{environment}_`date +%Y-%m-%d_%H%M%S`.dump'.format(
project=project_name, environment=env.environment))
elif env.environment in ['staging', 'production']:
local(
'curl -o backups/{project}-{environment}_`date +%Y-%m-%d_%H%M%S`.dump `heroku pg:backups public-url --app {app}`'.format(
project=project_name, environment=env.environment, app=env.app))
def deploy():
"""fab [environment] deploy"""
require('environment')
maintenance_on()
push()
migrate()
maintenance_off()
ps()
def maintenance_on():
"""fab [environment] maintenance_on"""
require('environment')
local('heroku maintenance:on --app %s' % env.app)
def maintenance_off():
"""fab [environment] maintenance_off"""
require('environment')
local('heroku maintenance:off --app %s' % env.app)
def push():
"""fab [environment] push"""
require('environment')
local('git push origin {branch}'.format(branch=env.branch))
def migrate(app=None):
"""fab [environment] migrate"""
require('environment')
if env.environment == "development":
if app is not None:
local('python manage.py migrate %s' % app)
else:
local('python manage.py migrate')
else:
if app is not None:
local('heroku run python manage.py migrate %s --app %s' % (app, env.app))
else:
local('heroku run python manage.py migrate --app %s' % env.app)
def ps():
"""fab [environment] ps"""
require('environment')
local('heroku ps --app %s' % env.app)
def open():
"""fab [environment] open"""
require('environment')
local('heroku open --app %s' % env.app)
def development():
"""fab development [command]"""
env.environment = 'development'
env.app = 'local'
env.branch = 'development'
def staging():
"""fab staging [command]"""
env.environment = 'staging'
env.app = 'eraldoenergy-staging'
env.branch = 'staging'
def production():
"""fab production [command]"""
env.environment = 'production'
env.app = 'eraldoenergy'
env.branch = 'master'
|
Eraldo/eraldoenergy
|
fabfile.py
|
Python
|
bsd-3-clause
| 2,395
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Molcas(CMakePackage):
"""Molcas is an ab initio quantum chemistry software package
developed by scientists to be used by scientists.
Please set the path to licence file with the following command
export MOLCAS_LICENSE=/path/to/molcas/license/"""
homepage = "http://www.molcas.org/"
url = "file://{0}/molcas8.2.tar.gz".format(os.getcwd())
version('8.2', '25b5fb8e1338b458a3eaea0b3d3b5e58')
# Licensing
license_required = True
license_vars = ['MOLCAS_LICENSE']
depends_on('openmpi')
depends_on('openblas')
depends_on('hdf5')
patch('install_driver.patch')
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/molcas/package.py
|
Python
|
lgpl-2.1
| 1,895
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import ddt
import mock
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as test_utils
from cinder.volume.drivers import remotefs
from cinder.volume.drivers.windows import smbfs
@ddt.ddt
class WindowsSmbFsTestCase(test.TestCase):
_FAKE_SHARE = '//1.2.3.4/share1'
_FAKE_SHARE_HASH = 'db0bf952c1734092b83e8990bd321131'
_FAKE_MNT_BASE = r'c:\openstack\mnt'
_FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, _FAKE_SHARE_HASH)
_FAKE_VOLUME_ID = '4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_VOLUME_NAME = 'volume-%s.vhdx' % _FAKE_VOLUME_ID
_FAKE_SNAPSHOT_ID = '50811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT_NAME = 'volume-%s-%s.vhdx' % (_FAKE_VOLUME_ID,
_FAKE_SNAPSHOT_ID)
_FAKE_SNAPSHOT_PATH = os.path.join(_FAKE_MNT_POINT,
_FAKE_SNAPSHOT_NAME)
_FAKE_VOLUME_SIZE = 1
_FAKE_TOTAL_SIZE = 2048
_FAKE_TOTAL_AVAILABLE = 1024
_FAKE_TOTAL_ALLOCATED = 1024
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT,
_FAKE_VOLUME_NAME)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
@mock.patch.object(smbfs, 'utilsfactory')
@mock.patch.object(smbfs, 'remotefs_brick')
def setUp(self, mock_remotefs, mock_utilsfactory):
super(WindowsSmbFsTestCase, self).setUp()
self.context = context.get_admin_context()
self._FAKE_SMBFS_CONFIG = mock.MagicMock(
smbfs_shares_config=mock.sentinel.share_config_file,
smbfs_default_volume_format='vhdx',
nas_volume_prov_type='thin')
self._smbfs_driver = smbfs.WindowsSmbfsDriver(
configuration=self._FAKE_SMBFS_CONFIG)
self._smbfs_driver._delete = mock.Mock()
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver.base = self._FAKE_MNT_BASE
self._diskutils = self._smbfs_driver._diskutils
self._vhdutils = self._smbfs_driver._vhdutils
self.volume = self._simple_volume()
self.snapshot = self._simple_snapshot(volume=self.volume)
def _simple_volume(self, **kwargs):
updates = {'id': self._FAKE_VOLUME_ID,
'size': self._FAKE_VOLUME_SIZE,
'provider_location': self._FAKE_SHARE}
updates.update(kwargs)
ctxt = context.get_admin_context()
volume = test_utils.create_volume(ctxt, **updates)
return volume
def _simple_snapshot(self, **kwargs):
volume = kwargs.pop('volume', None) or self._simple_volume()
ctxt = context.get_admin_context()
updates = {'id': self._FAKE_SNAPSHOT_ID,
'volume_id': volume.id}
updates.update(kwargs)
snapshot = test_utils.create_snapshot(ctxt, **updates)
return snapshot
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_check_os_platform')
@mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, 'do_setup')
@mock.patch('os.path.exists')
@mock.patch('os.path.isabs')
@mock.patch.object(image_utils, 'check_qemu_img_version')
def _test_setup(self, mock_check_qemu_img_version,
mock_is_abs, mock_exists,
mock_remotefs_do_setup,
mock_check_os_platform,
config, share_config_exists=True):
mock_exists.return_value = share_config_exists
fake_ensure_mounted = mock.MagicMock()
self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted
self._smbfs_driver._setup_pool_mappings = mock.Mock()
self._smbfs_driver.configuration = config
if not (config.smbfs_shares_config and share_config_exists):
self.assertRaises(exception.SmbfsException,
self._smbfs_driver.do_setup,
mock.sentinel.context)
else:
self._smbfs_driver.do_setup(mock.sentinel.context)
mock_check_qemu_img_version.assert_called_once_with(
self._smbfs_driver._MINIMUM_QEMU_IMG_VERSION)
mock_is_abs.assert_called_once_with(self._smbfs_driver.base)
self.assertEqual({}, self._smbfs_driver.shares)
fake_ensure_mounted.assert_called_once_with()
self._smbfs_driver._setup_pool_mappings.assert_called_once_with()
self.assertTrue(self._smbfs_driver._thin_provisioning_support)
mock_check_os_platform.assert_called_once_with()
def test_setup_pools(self):
pool_mappings = {
'//ip/share0': 'pool0',
'//ip/share1': 'pool1',
}
self._smbfs_driver.configuration.smbfs_pool_mappings = pool_mappings
self._smbfs_driver.shares = {
'//ip/share0': None,
'//ip/share1': None,
'//ip/share2': None
}
expected_pool_mappings = pool_mappings.copy()
expected_pool_mappings['//ip/share2'] = 'share2'
self._smbfs_driver._setup_pool_mappings()
self.assertEqual(expected_pool_mappings,
self._smbfs_driver._pool_mappings)
def test_setup_pool_duplicates(self):
self._smbfs_driver.configuration.smbfs_pool_mappings = {
'share0': 'pool0',
'share1': 'pool0'
}
self.assertRaises(exception.SmbfsException,
self._smbfs_driver._setup_pool_mappings)
def test_initialize_connection(self):
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._smbfs_driver._get_mount_point_base = mock.Mock(
return_value=self._FAKE_MNT_BASE)
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver.get_volume_format = mock.Mock(
return_value=mock.sentinel.format)
fake_data = {'export': self._FAKE_SHARE,
'format': mock.sentinel.format,
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS}
expected = {
'driver_volume_type': 'smbfs',
'data': fake_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection(
self.volume, None)
self.assertEqual(expected, ret_val)
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_snapshot_backing_file')
@mock.patch.object(smbfs.WindowsSmbfsDriver, 'get_volume_format')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_mount_point_base')
def test_initialize_connection_snapshot(self, mock_get_mount_base,
mock_get_volume_format,
mock_get_snap_by_backing_file):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
mock_get_snap_by_backing_file.return_value = self._FAKE_VOLUME_NAME
mock_get_volume_format.return_value = 'vhdx'
mock_get_mount_base.return_value = self._FAKE_MNT_BASE
exp_data = {'export': self._FAKE_SHARE,
'format': 'vhdx',
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS,
'access_mode': 'ro'}
expected = {
'driver_volume_type': 'smbfs',
'data': exp_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection_snapshot(
self.snapshot, mock.sentinel.connector)
self.assertEqual(expected, ret_val)
mock_get_snap_by_backing_file.assert_called_once_with(self.snapshot)
mock_get_volume_format.assert_called_once_with(self.snapshot.volume)
mock_get_mount_base.assert_called_once_with()
def test_setup(self):
self._test_setup(config=self._FAKE_SMBFS_CONFIG)
def test_setup_missing_shares_config_option(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_shares_config = None
self._test_setup(config=fake_config,
share_config_exists=False)
def test_setup_missing_shares_config_file(self):
self._test_setup(config=self._FAKE_SMBFS_CONFIG,
share_config_exists=False)
@mock.patch.object(smbfs, 'context')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_get_pool_name_from_share')
def test_get_total_allocated(self, mock_get_pool_name, mock_ctxt):
fake_pool_name = 'pool0'
fake_host_name = 'fake_host@fake_backend'
fake_vol_sz_sum = 5
mock_db = mock.Mock()
mock_db.volume_data_get_for_host.return_value = [
mock.sentinel.vol_count, fake_vol_sz_sum]
self._smbfs_driver.host = fake_host_name
self._smbfs_driver.db = mock_db
mock_get_pool_name.return_value = fake_pool_name
allocated = self._smbfs_driver._get_total_allocated(
mock.sentinel.share)
self.assertEqual(fake_vol_sz_sum << 30,
allocated)
mock_get_pool_name.assert_called_once_with(mock.sentinel.share)
mock_db.volume_data_get_for_host.assert_called_once_with(
context=mock_ctxt.get_admin_context.return_value,
host='fake_host@fake_backend#pool0')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_get_local_volume_path_template')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.WindowsSmbfsDriver, 'get_volume_format')
def _test_get_volume_path(self, mock_get_volume_format, mock_lookup_volume,
mock_get_path_template, volume_exists=True):
drv = self._smbfs_driver
(mock_get_path_template.return_value,
ext) = os.path.splitext(self._FAKE_VOLUME_PATH)
volume_format = ext.strip('.')
mock_lookup_volume.return_value = (
self._FAKE_VOLUME_PATH if volume_exists else None)
mock_get_volume_format.return_value = volume_format
ret_val = drv.local_path(self.volume)
if volume_exists:
self.assertFalse(mock_get_volume_format.called)
else:
mock_get_volume_format.assert_called_once_with(self.volume)
self.assertEqual(self._FAKE_VOLUME_PATH, ret_val)
def test_get_existing_volume_path(self):
self._test_get_volume_path()
def test_get_new_volume_path(self):
self._test_get_volume_path(volume_exists=False)
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_volume_dir')
def test_get_local_volume_path_template(self, mock_get_local_dir):
mock_get_local_dir.return_value = self._FAKE_MNT_POINT
ret_val = self._smbfs_driver._get_local_volume_path_template(
self.volume)
exp_template = os.path.splitext(self._FAKE_VOLUME_PATH)[0]
self.assertEqual(exp_template, ret_val)
@mock.patch('os.path.exists')
def test_lookup_local_volume_path(self, mock_exists):
expected_path = self._FAKE_VOLUME_PATH + '.vhdx'
mock_exists.side_effect = lambda x: x == expected_path
ret_val = self._smbfs_driver._lookup_local_volume_path(
self._FAKE_VOLUME_PATH)
extensions = [
".%s" % ext
for ext in self._smbfs_driver._VALID_IMAGE_EXTENSIONS]
possible_paths = [self._FAKE_VOLUME_PATH + ext
for ext in extensions]
mock_exists.assert_has_calls(
[mock.call(path) for path in possible_paths])
self.assertEqual(expected_path, ret_val)
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_get_local_volume_path_template')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_volume_format_spec')
def _test_get_volume_format(self, mock_get_format_spec,
mock_lookup_volume, mock_get_path_template,
qemu_format=False, volume_format='vhdx',
expected_vol_fmt=None,
volume_exists=True):
expected_vol_fmt = expected_vol_fmt or volume_format
vol_path = '%s.%s' % (os.path.splitext(self._FAKE_VOLUME_PATH)[0],
volume_format)
mock_get_path_template.return_value = vol_path
mock_lookup_volume.return_value = (
vol_path if volume_exists else None)
mock_get_format_spec.return_value = volume_format
supported_fmts = self._smbfs_driver._SUPPORTED_IMAGE_FORMATS
if volume_format.lower() not in supported_fmts:
self.assertRaises(exception.SmbfsException,
self._smbfs_driver.get_volume_format,
self.volume,
qemu_format)
else:
ret_val = self._smbfs_driver.get_volume_format(self.volume,
qemu_format)
if volume_exists:
self.assertFalse(mock_get_format_spec.called)
else:
mock_get_format_spec.assert_called_once_with(self.volume)
self.assertEqual(expected_vol_fmt, ret_val)
def test_get_volume_format_invalid_extension(self):
self._test_get_volume_format(volume_format='fake')
def test_get_existing_vhdx_volume_format(self):
self._test_get_volume_format()
def test_get_new_vhd_volume_format(self):
fmt = 'vhd'
self._test_get_volume_format(volume_format=fmt,
volume_exists=False,
expected_vol_fmt=fmt)
def test_get_new_vhd_legacy_volume_format(self):
img_fmt = 'vhd'
expected_fmt = 'vpc'
self._test_get_volume_format(volume_format=img_fmt,
volume_exists=False,
qemu_format=True,
expected_vol_fmt=expected_fmt)
@ddt.data([False, False],
[True, True],
[False, True])
@ddt.unpack
def test_get_volume_format_spec(self,
volume_meta_contains_fmt,
volume_type_contains_fmt):
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_vol_meta_fmt = 'vhd'
fake_vol_type_fmt = 'vhdx'
volume_metadata = {}
volume_type_extra_specs = {}
if volume_meta_contains_fmt:
volume_metadata['volume_format'] = fake_vol_meta_fmt
elif volume_type_contains_fmt:
volume_type_extra_specs['smbfs:volume_format'] = fake_vol_type_fmt
volume_type = fake_volume.fake_volume_type_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
# Optional arguments are not set in _from_db_object,
# so have to set explicitly here
volume.volume_type = volume_type
volume.metadata = volume_metadata
# Same for extra_specs and VolumeType
volume_type.extra_specs = volume_type_extra_specs
resulted_fmt = self._smbfs_driver._get_volume_format_spec(volume)
if volume_meta_contains_fmt:
expected_fmt = fake_vol_meta_fmt
elif volume_type_contains_fmt:
expected_fmt = fake_vol_type_fmt
else:
expected_fmt = self._FAKE_SMBFS_CONFIG.smbfs_default_volume_format
self.assertEqual(expected_fmt, resulted_fmt)
@mock.patch.object(remotefs.RemoteFSSnapDriverDistributed,
'create_volume')
def test_create_volume_base(self, mock_create_volume):
self._smbfs_driver.create_volume(self.volume)
mock_create_volume.assert_called_once_with(self.volume)
@mock.patch('os.path.exists')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
def _test_create_volume(self, mock_get_vhd_type, mock_exists,
volume_exists=False, volume_format='vhdx'):
mock_exists.return_value = volume_exists
self._smbfs_driver.create_vhd = mock.MagicMock()
fake_create = self._smbfs_driver._vhdutils.create_vhd
self._smbfs_driver.get_volume_format = mock.Mock(
return_value=volume_format)
if volume_exists or volume_format not in ('vhd', 'vhdx'):
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._do_create_volume,
self.volume)
else:
fake_vol_path = self._FAKE_VOLUME_PATH
self._smbfs_driver._do_create_volume(self.volume)
fake_create.assert_called_once_with(
fake_vol_path, mock_get_vhd_type.return_value,
max_internal_size=self.volume.size << 30)
def test_create_volume(self):
self._test_create_volume()
def test_create_existing_volume(self):
self._test_create_volume(True)
def test_create_volume_invalid_volume(self):
self._test_create_volume(volume_format="qcow")
def test_delete_volume(self):
drv = self._smbfs_driver
fake_vol_info = self._FAKE_VOLUME_PATH + '.info'
drv._ensure_share_mounted = mock.MagicMock()
fake_ensure_mounted = drv._ensure_share_mounted
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._delete = mock.Mock()
drv._local_path_volume_info = mock.Mock(
return_value=fake_vol_info)
with mock.patch('os.path.exists', lambda x: True):
drv.delete_volume(self.volume)
fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE)
drv._delete.assert_any_call(
self._FAKE_VOLUME_PATH)
drv._delete.assert_any_call(fake_vol_info)
def test_ensure_mounted(self):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE)
self._smbfs_driver._remotefsclient.mount.assert_called_once_with(
self._FAKE_SHARE, self._FAKE_SHARE_OPTS)
def test_get_capacity_info(self):
self._diskutils.get_disk_capacity.return_value = (
self._FAKE_TOTAL_SIZE, self._FAKE_TOTAL_AVAILABLE)
self._smbfs_driver._get_mount_point_for_share = mock.Mock(
return_value=mock.sentinel.mnt_point)
self._smbfs_driver._get_total_allocated = mock.Mock(
return_value=self._FAKE_TOTAL_ALLOCATED)
ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE)
expected_ret_val = [int(x) for x in [self._FAKE_TOTAL_SIZE,
self._FAKE_TOTAL_AVAILABLE,
self._FAKE_TOTAL_ALLOCATED]]
self.assertEqual(expected_ret_val, ret_val)
self._smbfs_driver._get_mount_point_for_share.assert_called_once_with(
self._FAKE_SHARE)
self._diskutils.get_disk_capacity.assert_called_once_with(
mock.sentinel.mnt_point)
self._smbfs_driver._get_total_allocated.assert_called_once_with(
self._FAKE_SHARE)
def _test_get_img_info(self, backing_file=None):
self._smbfs_driver._vhdutils.get_vhd_parent_path.return_value = (
backing_file)
image_info = self._smbfs_driver._qemu_img_info(self._FAKE_VOLUME_PATH)
self.assertEqual(self._FAKE_VOLUME_NAME,
image_info.image)
backing_file_name = backing_file and os.path.basename(backing_file)
self.assertEqual(backing_file_name, image_info.backing_file)
def test_get_img_info_without_backing_file(self):
self._test_get_img_info()
def test_get_snapshot_info(self):
self._test_get_img_info(self._FAKE_VOLUME_PATH)
@ddt.data('attached', 'detached')
def test_create_snapshot(self, attach_status):
self.snapshot.volume.attach_status = attach_status
self.snapshot.volume.save()
self._smbfs_driver._vhdutils.create_differencing_vhd = (
mock.Mock())
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
fake_create_diff = (
self._smbfs_driver._vhdutils.create_differencing_vhd)
self._smbfs_driver._do_create_snapshot(
self.snapshot,
os.path.basename(self._FAKE_VOLUME_PATH),
self._FAKE_SNAPSHOT_PATH)
if attach_status != 'attached':
fake_create_diff.assert_called_once_with(self._FAKE_SNAPSHOT_PATH,
self._FAKE_VOLUME_PATH)
else:
fake_create_diff.assert_not_called()
self.assertEqual(os.path.basename(self._FAKE_VOLUME_PATH),
self.snapshot.metadata['backing_file'])
# Ensure that the changes have been saved.
self.assertFalse(bool(self.snapshot.obj_what_changed()))
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_check_extend_volume_support')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_local_path_active_image')
def test_extend_volume(self, mock_get_active_img,
mock_check_ext_support):
volume = fake_volume.fake_volume_obj(self.context)
new_size = volume.size + 1
self._smbfs_driver.extend_volume(volume, new_size)
mock_check_ext_support.assert_called_once_with(volume, new_size)
mock_get_active_img.assert_called_once_with(volume)
self._vhdutils.resize_vhd.assert_called_once_with(
mock_get_active_img.return_value,
new_size * units.Gi,
is_file_max_size=False)
@ddt.data({'snapshots_exist': True},
{'vol_fmt': smbfs.WindowsSmbfsDriver._DISK_FORMAT_VHD,
'snapshots_exist': True,
'expected_exc': exception.InvalidVolume})
@ddt.unpack
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'get_volume_format')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_snapshots_exist')
def test_check_extend_support(self, mock_snapshots_exist,
mock_get_volume_format,
vol_fmt=None, snapshots_exist=False,
share_eligible=True,
expected_exc=None):
vol_fmt = vol_fmt or self._smbfs_driver._DISK_FORMAT_VHDX
volume = fake_volume.fake_volume_obj(
self.context, provider_location='fake_provider_location')
new_size = volume.size + 1
mock_snapshots_exist.return_value = snapshots_exist
mock_get_volume_format.return_value = vol_fmt
if expected_exc:
self.assertRaises(expected_exc,
self._smbfs_driver._check_extend_volume_support,
volume, new_size)
else:
self._smbfs_driver._check_extend_volume_support(volume, new_size)
mock_get_volume_format.assert_called_once_with(volume)
mock_snapshots_exist.assert_called_once_with(volume)
@ddt.data({},
{'delete_latest': True},
{'attach_status': 'detached'},
{'snap_info_contains_snap_id': False})
@ddt.unpack
@mock.patch.object(remotefs.RemoteFSSnapDriverDistributed,
'_delete_snapshot')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_volume_dir')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_path_volume_info')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_write_info_file')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_read_info_file')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_nova_assisted_vol_snap_delete')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_get_snapshot_by_backing_file')
def test_delete_snapshot(self, mock_get_snap_by_backing_file,
mock_nova_assisted_snap_del,
mock_read_info_file, mock_write_info_file,
mock_local_path_volume_info,
mock_get_local_dir,
mock_remotefs_snap_delete,
attach_status='attached',
snap_info_contains_snap_id=True,
delete_latest=False):
self.snapshot.volume.attach_status = attach_status
self.snapshot.metadata['backing_file'] = os.path.basename(
self._FAKE_VOLUME_PATH)
higher_snapshot = self._simple_snapshot(id=None,
volume=self.volume)
fake_snap_file = 'snap_file'
fake_snap_parent_path = os.path.join(self._FAKE_MNT_POINT,
'snap_file_parent')
active_img = 'active_img' if not delete_latest else fake_snap_file
snap_info = dict(active=active_img)
if snap_info_contains_snap_id:
snap_info[self.snapshot.id] = fake_snap_file
mock_get_snap_by_backing_file.return_value = (
higher_snapshot if not delete_latest else None)
mock_info_path = mock_local_path_volume_info.return_value
mock_read_info_file.return_value = snap_info
mock_get_local_dir.return_value = self._FAKE_MNT_POINT
self._vhdutils.get_vhd_parent_path.return_value = (
fake_snap_parent_path)
expected_delete_info = {'file_to_merge': fake_snap_file,
'volume_id': self.snapshot.volume.id}
self._smbfs_driver._delete_snapshot(self.snapshot)
if attach_status != 'attached':
mock_remotefs_snap_delete.assert_called_once_with(self.snapshot)
elif snap_info_contains_snap_id:
mock_local_path_volume_info.assert_called_once_with(
self.snapshot.volume)
mock_read_info_file.assert_called_once_with(
mock_info_path, empty_if_missing=True)
mock_nova_assisted_snap_del.assert_called_once_with(
self.snapshot._context, self.snapshot, expected_delete_info)
exp_merged_img_path = os.path.join(self._FAKE_MNT_POINT,
fake_snap_file)
self._smbfs_driver._delete.assert_called_once_with(
exp_merged_img_path)
if delete_latest:
self._vhdutils.get_vhd_parent_path.assert_called_once_with(
exp_merged_img_path)
exp_active = os.path.basename(fake_snap_parent_path)
else:
exp_active = active_img
self.assertEqual(exp_active, snap_info['active'])
self.assertNotIn(snap_info, self.snapshot.id)
mock_write_info_file.assert_called_once_with(mock_info_path,
snap_info)
if attach_status != 'attached' or not snap_info_contains_snap_id:
mock_nova_assisted_snap_del.assert_not_called()
mock_write_info_file.assert_not_called()
if not delete_latest and snap_info_contains_snap_id:
self.assertEqual(os.path.basename(self._FAKE_VOLUME_PATH),
higher_snapshot.metadata['backing_file'])
self.assertFalse(bool(higher_snapshot.obj_what_changed()))
@ddt.data(True, False)
def test_get_snapshot_by_backing_file(self, metadata_set):
backing_file = 'fake_backing_file'
if metadata_set:
self.snapshot.metadata['backing_file'] = backing_file
self.snapshot.save()
for idx in range(2):
# We're adding a few other snapshots.
self._simple_snapshot(id=None,
volume=self.volume)
snapshot = self._smbfs_driver._get_snapshot_by_backing_file(
self.volume, backing_file)
if metadata_set:
self.assertEqual(self.snapshot.id, snapshot.id)
else:
self.assertIsNone(snapshot)
@ddt.data(True, False)
@mock.patch.object(remotefs.RemoteFSSnapDriverDistributed,
'_get_snapshot_backing_file')
def test_get_snapshot_backing_file_md_set(self, md_set,
remotefs_get_backing_file):
backing_file = 'fake_backing_file'
if md_set:
self.snapshot.metadata['backing_file'] = backing_file
ret_val = self._smbfs_driver._get_snapshot_backing_file(
self.snapshot)
# If the metadata is not set, we expect the super class method to
# be used, which is supposed to query the image.
if md_set:
self.assertEqual(backing_file, ret_val)
else:
self.assertEqual(remotefs_get_backing_file.return_value,
ret_val)
remotefs_get_backing_file.assert_called_once_with(
self.snapshot)
def test_create_volume_from_unavailable_snapshot(self):
self.snapshot.status = fields.SnapshotStatus.ERROR
self.assertRaises(
exception.InvalidSnapshot,
self._smbfs_driver.create_volume_from_snapshot,
self.volume, self.snapshot)
@ddt.data(True, False)
def test_copy_volume_to_image(self, has_parent=False):
drv = self._smbfs_driver
fake_image_meta = {'id': 'fake-image-id'}
fake_img_format = self._smbfs_driver._DISK_FORMAT_VHDX
if has_parent:
fake_volume_path = self._FAKE_SNAPSHOT_PATH
fake_parent_path = self._FAKE_VOLUME_PATH
else:
fake_volume_path = self._FAKE_VOLUME_PATH
fake_parent_path = None
fake_active_image = os.path.basename(fake_volume_path)
drv.get_active_image_from_info = mock.Mock(
return_value=fake_active_image)
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_volume_format = mock.Mock(
return_value=fake_img_format)
drv._vhdutils.get_vhd_parent_path.return_value = (
fake_parent_path)
with mock.patch.object(image_utils, 'upload_volume') as (
fake_upload_volume):
drv.copy_volume_to_image(
mock.sentinel.context, self.volume,
mock.sentinel.image_service, fake_image_meta)
if has_parent:
fake_temp_image_name = '%s.temp_image.%s.%s' % (
self.volume.id,
fake_image_meta['id'],
fake_img_format)
fake_temp_image_path = os.path.join(
self._FAKE_MNT_POINT,
fake_temp_image_name)
fake_active_image_path = os.path.join(
self._FAKE_MNT_POINT,
fake_active_image)
upload_path = fake_temp_image_path
drv._vhdutils.convert_vhd.assert_called_once_with(
fake_active_image_path,
fake_temp_image_path)
drv._delete.assert_called_once_with(
fake_temp_image_path)
else:
upload_path = fake_volume_path
fake_upload_volume.assert_called_once_with(
mock.sentinel.context, mock.sentinel.image_service,
fake_image_meta, upload_path, fake_img_format)
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
def test_copy_image_to_volume(self, mock_get_vhd_type):
drv = self._smbfs_driver
drv.get_volume_format = mock.Mock(
return_value=mock.sentinel.volume_format)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv.configuration = mock.MagicMock()
drv.configuration.volume_dd_blocksize = mock.sentinel.block_size
with mock.patch.object(image_utils,
'fetch_to_volume_format') as fake_fetch:
drv.copy_image_to_volume(
mock.sentinel.context, self.volume,
mock.sentinel.image_service,
mock.sentinel.image_id)
fake_fetch.assert_called_once_with(
mock.sentinel.context,
mock.sentinel.image_service,
mock.sentinel.image_id,
self._FAKE_VOLUME_PATH, mock.sentinel.volume_format,
mock.sentinel.block_size,
mock_get_vhd_type.return_value)
drv._vhdutils.resize_vhd.assert_called_once_with(
self._FAKE_VOLUME_PATH,
self.volume.size * units.Gi,
is_file_max_size=False)
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
def test_copy_volume_from_snapshot(self, mock_get_vhd_type):
drv = self._smbfs_driver
drv._get_snapshot_backing_file = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.local_path = mock.Mock(
return_value=mock.sentinel.new_volume_path)
drv._copy_volume_from_snapshot(self.snapshot,
self.volume, self.volume.size)
drv._get_snapshot_backing_file.assert_called_once_with(
self.snapshot)
drv._delete.assert_called_once_with(mock.sentinel.new_volume_path)
drv._vhdutils.convert_vhd.assert_called_once_with(
self._FAKE_VOLUME_PATH,
mock.sentinel.new_volume_path,
vhd_type=mock_get_vhd_type.return_value)
drv._vhdutils.resize_vhd.assert_called_once_with(
mock.sentinel.new_volume_path,
self.volume.size * units.Gi,
is_file_max_size=False)
def test_rebase_img(self):
drv = self._smbfs_driver
drv._rebase_img(
self._FAKE_SNAPSHOT_PATH,
self._FAKE_VOLUME_NAME, 'vhdx')
drv._vhdutils.reconnect_parent_vhd.assert_called_once_with(
self._FAKE_SNAPSHOT_PATH, self._FAKE_VOLUME_PATH)
def test_copy_volume_image(self):
self._smbfs_driver._copy_volume_image(mock.sentinel.src,
mock.sentinel.dest)
self._smbfs_driver._pathutils.copy.assert_called_once_with(
mock.sentinel.src, mock.sentinel.dest)
def test_get_pool_name_from_share(self):
self._smbfs_driver._pool_mappings = {
mock.sentinel.share: mock.sentinel.pool}
pool = self._smbfs_driver._get_pool_name_from_share(
mock.sentinel.share)
self.assertEqual(mock.sentinel.pool, pool)
def test_get_share_from_pool_name(self):
self._smbfs_driver._pool_mappings = {
mock.sentinel.share: mock.sentinel.pool}
share = self._smbfs_driver._get_share_from_pool_name(
mock.sentinel.pool)
self.assertEqual(mock.sentinel.share, share)
def test_get_pool_name_from_share_exception(self):
self._smbfs_driver._pool_mappings = {}
self.assertRaises(exception.SmbfsException,
self._smbfs_driver._get_share_from_pool_name,
mock.sentinel.pool)
def test_get_vhd_type(self):
drv = self._smbfs_driver
mock_type = drv._get_vhd_type(qemu_subformat=True)
self.assertEqual(mock_type, 'dynamic')
mock_type = drv._get_vhd_type(qemu_subformat=False)
self.assertEqual(mock_type, 3)
self._smbfs_driver.configuration.nas_volume_prov_type = (
'thick')
mock_type = drv._get_vhd_type(qemu_subformat=True)
self.assertEqual(mock_type, 'fixed')
def test_get_managed_vol_expected_path(self):
self._vhdutils.get_vhd_format.return_value = 'vhdx'
vol_location = dict(vol_local_path=mock.sentinel.image_path,
mountpoint=self._FAKE_MNT_POINT)
path = self._smbfs_driver._get_managed_vol_expected_path(
self.volume, vol_location)
self.assertEqual(self._FAKE_VOLUME_PATH, path)
self._vhdutils.get_vhd_format.assert_called_once_with(
mock.sentinel.image_path)
|
phenoxim/cinder
|
cinder/tests/unit/windows/test_smbfs.py
|
Python
|
apache-2.0
| 37,650
|
from pyramid.config import Configurator
from karmaid.redisio import init_redis
from karmaid.resources import StuffResource
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
init_redis(settings, 'redis.')
config = Configurator(settings=settings)
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('top', '/')
config.add_route('button', '/button/1/', factory=StuffResource)
config.add_route('api_karma', '/api/karma', factory=StuffResource)
config.add_route('api_ranking', '/api/ranking')
config.add_route('js_widget', 'widget.js', factory=StuffResource)
config.scan('.views')
return config.make_wsgi_app()
|
hirokiky/karmaid
|
karmaid/__init__.py
|
Python
|
agpl-3.0
| 732
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from modeltranslation.admin import TranslationAdmin
from .models import Service, Category
@admin.register(Service)
class ServiceAdmin(admin.ModelAdmin):
pass
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
pass
|
Semillas/semillas_backend
|
services/admin.py
|
Python
|
mit
| 379
|
# Copyright 2015 - Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import six
import wsme
from wsme import types as wtypes
from solum.api.controllers.v1.datamodel import types as api_types
class App(api_types.Base):
"""Representation of an App.
The App is the primary resource managed by Solum.
"""
# Inherited fields:
# uri
# uuid
# name is overridden.
# type
# description
# tags
# project_id
# user_id
version = wtypes.wsattr(int)
id = wtypes.text
name = wtypes.text
deleted = bool
languagepack = wtypes.text
stack_id = wtypes.text
ports = api_types.MultiType(api_types.PortType,
[api_types.PortType],
{wtypes.text: api_types.PortType})
source = {wtypes.text: wtypes.text}
workflow_config = {wtypes.text: wtypes.text}
trigger_uuid = wtypes.text
trigger_actions = [wtypes.text]
trigger_uri = wtypes.text
trust_id = wtypes.text
trust_user = wtypes.text
app_url = wtypes.text
status = wtypes.text
repo_token = wtypes.text
created_at = datetime.datetime
raw_content = wtypes.text
scale_config = wtypes.DictType(
wtypes.text,
wtypes.DictType(wtypes.text, wtypes.text))
parameters = wtypes.DictType(
wtypes.text,
wtypes.DictType(wtypes.text,
api_types.MultiType(
wtypes.text,
six.integer_types,
bool,
float)))
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
@classmethod
def sample(cls):
return cls(
name="sampleapp",
description="sample app",
)
@classmethod
def from_db_model(cls, m, host_url):
json = m.as_dict()
json['type'] = m.__tablename__
json['uri'] = '%s/v1/apps/%s' % (host_url, m.id)
json['trigger_uri'] = ('%s/v1/triggers/%s' %
(host_url, m.trigger_uuid))
return cls(**(json))
def as_dict(self, db_model):
attrs = [
'name',
'id',
'description',
'languagepack',
'project_id',
'user_id',
'deleted',
'source',
'ports',
'trigger_actions',
'workflow_config',
'stack_id',
'raw_content'
]
base = super(App, self).as_dict(db_model)
if self.parameters is not wsme.Unset:
base.update({'parameters': self.parameters})
for a in attrs:
if getattr(self, a) is wsme.Unset:
continue
if getattr(self, a) is None:
continue
base[a] = getattr(self, a)
return base
|
devdattakulkarni/test-solum
|
solum/api/controllers/v1/datamodel/app.py
|
Python
|
apache-2.0
| 3,434
|
"""
Format and compress XML documents
"""
import getopt
import re
import sys
import xml.parsers.expat
__version__ = "0.2.4"
DEFAULT_BLANKS = False
DEFAULT_COMPRESS = False
DEFAULT_SELFCLOSE = False
DEFAULT_CORRECT = True
DEFAULT_INDENT = 2
DEFAULT_INDENT_CHAR = " "
DEFAULT_INLINE = True
DEFAULT_ENCODING_INPUT = None
DEFAULT_ENCODING_OUTPUT = None
DEFAULT_EOF_NEWLINE = False
class Formatter:
# Use internal encoding:
encoding_internal = None
def __init__(
self,
indent=DEFAULT_INDENT,
preserve=[],
blanks=DEFAULT_BLANKS,
compress=DEFAULT_COMPRESS,
selfclose=DEFAULT_SELFCLOSE,
indent_char=DEFAULT_INDENT_CHAR,
encoding_input=DEFAULT_ENCODING_INPUT,
encoding_output=DEFAULT_ENCODING_OUTPUT,
inline=DEFAULT_INLINE,
correct=DEFAULT_CORRECT,
eof_newline=DEFAULT_EOF_NEWLINE,
):
# Minify the XML document:
self.compress = compress
# Use self-closing tags
self.selfclose = selfclose
# Correct text nodes
self.correct = correct
# Decode the XML document:
self.encoding_input = self.enc_normalize(encoding_input)
# Encode ouput by:
self.encoding_output = self.enc_normalize(encoding_output)
# Insert indent = indent*level*indent_char:
self.indent = int(indent)
# Indent by char:
self.indent_char = indent_char
# Format inline objects:
self.inline = inline
# Don't compress this elements and their descendants:
self.preserve = preserve
# Preserve blanks lines (collapse multiple into one)
self.blanks = blanks
# Always add a newline character at EOF
self.eof_newline = eof_newline
@property
def encoding_effective(self, enc=None):
if self.encoding_output:
return self.encoding_output
elif self.encoding_internal:
return self.encoding_internal
elif self.encoding_input:
return self.encoding_input
else:
return "UTF-8"
def enc_normalize(self, string):
""" Format an Encoding identifier to upper case. """
if isinstance(string, str):
return string.upper()
return None
def enc_encode(self, strg):
""" Encode a formatted XML document in target"""
if sys.version_info > (3, 0):
return strg.encode(self.encoding_effective) # v3
return strg.decode("utf-8").encode(self.encoding_effective) # v2
def enc_output(self, path, strg):
""" Output according to encoding """
fh = sys.stdout
if strg is not None:
if path is not None:
open(path, "w+b").write(strg)
elif sys.version_info > (3, 0):
fh.buffer.write(strg)
else:
fh.write(strg)
def format_string(self, xmldoc=""):
""" Format a XML document given by xmldoc """
token_list = Formatter.TokenList(self)
token_list.parser.Parse(xmldoc)
return self.enc_encode(str(token_list))
def format_file(self, file):
""" Format a XML document given by path name """
fh = open(file, "rb")
token_list = Formatter.TokenList(self)
token_list.parser.ParseFile(fh)
fh.close()
return self.enc_encode(str(token_list))
class TokenList:
# Being in a cdata section:
cdata_section = False
# Lock deletion of leading whitespace:
desc_mixed_level = None
# Lock indenting:
indent_level = None
# Reference the Formatter:
formatter = None
# Count levels:
level_counter = 0
# Lock deletion of whitespaces:
preserve_level = None
def __init__(self, formatter):
# Keep tokens in a list:
self._list = []
self.formatter = formatter
self.parser = xml.parsers.expat.ParserCreate(
encoding=self.formatter.encoding_input
)
self.parser.specified_attributes = 1
self.parser.buffer_text = True
# Push tokens to buffer:
for pattern in [
"XmlDecl%s",
"ElementDecl%s",
"AttlistDecl%s",
"EntityDecl%s",
"StartElement%s",
"EndElement%s",
"ProcessingInstruction%s",
"CharacterData%s",
"Comment%s",
"Default%s",
"StartDoctypeDecl%s",
"EndDoctypeDecl%s",
"StartCdataSection%s",
"EndCdataSection%s",
"NotationDecl%s",
]:
setattr(
self.parser, pattern % "Handler", self.xml_handler(pattern % "")
)
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __getitem__(self, pos):
if 0 <= pos < len(self._list):
return self._list[pos]
else:
raise IndexError
def __setitem__(self, pos, value):
if 0 <= pos < len(self._list):
self._list[pos] = value
else:
raise IndexError
def __str__(self):
""" Returns the formatted XML document in UTF-8. """
for step in ["configure", "pre_operate", "post_operate"]:
for tk in iter(self):
getattr(tk, step)()
result = ""
for tk in iter(self):
result += str(tk)
if self.formatter.eof_newline and not result.endswith("\n"):
result += "\n"
return result
def append(self, tk):
""" Add token to tokenlist. """
tk.pos = len(self._list)
self._list.append(tk)
def level_increment(self):
""" Increment level counter. """
self.level_counter += 1
def level_decrement(self):
""" Decrement level counter. """
self.level_counter -= 1
def token_descendant_mixed(self, tk):
""" Mark descendants of mixed content. """
if tk.name == "StartElement":
# Mark every descendant:
if tk.content_model in [2, 3] and self.desc_mixed_level is None:
self.desc_mixed_level = tk.level
return False
return self.desc_mixed_level is not None
elif tk.name == "EndElement":
# Stop marking every descendant:
if tk.level is self.desc_mixed_level:
self.desc_mixed_level = None
elif self.desc_mixed_level is not None:
return True
return False
elif self.desc_mixed_level is None:
return False
return self.desc_mixed_level >= tk.level - 1
def sequence(self, tk, scheme=None):
"""Returns sublist of token list.
None: next to last
EndElement: first to previous"""
if scheme == "EndElement" or (scheme is None and tk.end):
return reversed(self._list[: tk.pos])
return self._list[(tk.pos + 1) :]
def token_indent(self, tk):
if self.formatter.inline:
return self.token_indent_inline(tk)
""" Indent outside of text of mixed content. """
if tk.name == "StartElement":
# Block indenting for descendants of text and mixed content:
if tk.content_model in [2, 3] and self.indent_level is None:
self.indent_level = tk.level
elif self.indent_level is not None:
return False
return True
elif tk.name == "EndElement":
# Unblock indenting for descendants of text and mixed content:
if tk.level == self.indent_level:
self.indent_level = None
elif self.indent_level is None:
return True
return False
return self.indent_level is None
def token_indent_inline(self, tk):
""" Indent every element content - no matter enclosed by text or mixed content. """
for itk in iter(self.sequence(tk, "EndElement")):
if itk.level < tk.level and itk.name == "StartElement":
if itk.content_model == 1:
return True
return False
if (
itk.level == tk.level
and tk.name == "EndElement"
and itk.name == "StartElement"
):
if itk.content_model == 1:
return True
return False
return True
def token_model(self, tk):
"""Returns code for content model.
0: empty
1: element
2: text
3: mixed"""
eflag = tflag = 0
for itk in iter(self.sequence(tk)):
# Element boundary found:
if itk.level <= tk.level:
break
# Direct child found:
elif (itk.level - 1) == tk.level:
if itk.start:
eflag = 1
elif itk.not_empty:
tflag = 2
return eflag + tflag
def token_preserve(self, tk):
"""Preseve eyery descendant of an preserved element.
0: not locked
1: just (un)locked
2: locked"""
# Lock perserving for StartElements:
if tk.name == "StartElement":
if self.preserve_level is not None:
return 2
if tk.arg[0] in self.formatter.preserve:
self.preserve_level = tk.level
return 1
return 0
# Unlock preserving for EndElements:
elif tk.name == "EndElement":
if (
tk.arg[0] in self.formatter.preserve
and tk.level == self.preserve_level
):
self.preserve_level = None
return 1
elif self.preserve_level is None:
return 0
return 2
return self.preserve_level is not None
def whitespace_append_trailing(self, tk):
""" Add a trailing whitespace to previous character data. """
if self.formatter.correct and tk.leading and tk.not_empty:
self.whitespace_append(tk, "EndElement", "StartElement", True)
def whitespace_append_leading(self, tk):
""" Add a leading whitespace to previous character data. """
if self.formatter.correct and tk.trailing and tk.not_empty:
self.whitespace_append(tk)
def whitespace_append(
self, tk, start="StartElement", stop="EndElement", direct=False
):
""" Add a whitspace to token list. """
for itk in self.sequence(tk, start):
if (
itk.empty
or (itk.name == stop and itk.descendant_mixed is False)
or (itk.name == start and abs(tk - itk) == 1)
):
break
elif itk.not_empty or (itk.name == start and itk.descendant_mixed):
self.insert_empty(itk, direct)
break
def whitespace_delete_leading(self, tk):
""" Returns True, if no next token or all empty (up to next end element)"""
if (
self.formatter.correct
and tk.leading
and not tk.preserve
and not tk.cdata_section
):
for itk in self.sequence(tk, "EndElement"):
if itk.trailing:
return True
elif itk.name in ["EndElement", "CharacterData", "EndCdataSection"]:
return False
return True
return False
def whitespace_delete_trailing(self, tk):
"""Returns True, if no next token or all empty (up to next end element)"""
if (
self.formatter.correct
and tk.trailing
and not tk.preserve
and not tk.cdata_section
):
for itk in self.sequence(tk, "StartElement"):
if itk.end:
return True
elif (
itk.name in ["StartElement", "StartCdataSection"]
or itk.not_empty
):
return False
return True
return False
def insert_empty(self, tk, before=True):
""" Insert an Empty Token into token list - before or after tk. """
if not (0 < tk.pos < (len(self) - 1)):
return False
ptk = self[tk.pos - 1]
ntk = self.formatter.CharacterData(self, [" "])
ntk.level = max(ptk.level, tk.level)
ntk.descendant_mixed = tk.descendant_mixed
ntk.preserve = ptk.preserve * tk.preserve
ntk.cdata_section = ptk.cdata_section or tk.cdata_section
if before:
self._list.insert(tk.pos + 1, ntk)
else:
self._list.insert(tk.pos, ntk)
for i in range((tk.pos - 1), len(self._list)):
self._list[i].pos = i
def xml_handler(self, key):
""" Returns lambda function which adds token to token list"""
return lambda *arg: self.append(getattr(self.formatter, key)(self, arg))
class Token(object):
def __init__(self, tklist, arg):
# Reference Token List:
self.list = tklist
# Token datas:
self.arg = list(arg)
# Token is placed in an CDATA section:
self.cdata_section = False
# Token has content model:
self.content_model = None
# Remove trailing wihtespaces:
self.delete_trailing = False
# Remove leading whitespaces:
self.delete_leading = False
# Token is descendant of text or mixed content element:
self.descendant_mixed = False
# Reference to formatter:
self.formatter = tklist.formatter
# Insert indenting white spaces:
self.indent = False
# N-th generation of roots descendants:
self.level = self.list.level_counter
# Token class:
self.name = self.__class__.__name__
# Preserve white spaces within enclosed tokens:
self.preserve = False
# Position in token list:
self.pos = None
def __sub__(self, other):
return self.pos - other.pos
def __unicode__(self):
return ""
# Workaround, see http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/:
if sys.version_info > (3, 0):
__str__ = lambda x: x.__unicode__()
else:
__str__ = lambda x: unicode(x).encode("utf-8")
@property
def end(self):
return self.name == "EndElement"
@property
def empty(self):
return self.name == "CharacterData" and re.match(
r"^[\t\s\n]*$", self.arg[0]
)
@property
def leading(self):
return self.name == "CharacterData" and re.search(
r"^[\t\s\n]+", self.arg[0]
)
@property
def not_empty(self):
return (
self.name == "CharacterData"
and not self.cdata_section
and not re.match(r"^[\t\s\n]+$", self.arg[0])
)
@property
def trailing(self):
return self.name == "CharacterData" and re.search(
r"[\t\s\n]+$", self.arg[0]
)
@property
def start(self):
return self.name == "StartElement"
@property
def correct(self):
return self.formatter.correct
def attribute(self, key, value):
if key and value:
return ' %s="%s"' % (key, value)
elif key:
return ' %s=""' % (key)
return ""
def indent_insert(self):
""" Indent token. """
# Child of root and no empty node
if (
self.level > 0 and not (self.end and self.list[self.pos - 1].start)
) or ( # not empty node:
self.end and not self.list[self.pos - 1].start
):
return self.indent_create(self.level)
return ""
def indent_create(self, times=1):
""" Returns indent string. """
if not self.formatter.compress and self.formatter.indent:
return "\n%s" % (
(times * self.formatter.indent) * self.formatter.indent_char
)
return ""
def identifier(self, systemid, publicid):
# TODO add base parameter:
if publicid and systemid:
return ' PUBLIC "%s" "%s"' % (publicid, systemid)
elif publicid:
return ' PUBLIC "%s"' % publicid
elif systemid:
return ' SYSTEM "%s"' % systemid
return ""
def configure(self):
""" Set token properties. """
self.descendant_mixed = self.list.token_descendant_mixed(self)
self.preserve = self.list.token_preserve(self)
self.cdata_section = self.list.cdata_section
def pre_operate(self):
pass
def post_operate(self):
pass
class AttlistDecl(Token):
def __unicode__(self):
str = self.indent_create()
str += "<!ATTLIST %s %s" % (self.arg[0], self.arg[1])
if self.arg[2] is not None:
str += " %s" % self.arg[2]
if self.arg[4] and not self.arg[3]:
str += " #REQUIRED"
elif self.arg[3] and self.arg[4]:
str += " #FIXED"
elif not self.arg[4] and not self.arg[3]:
str += " #IMPLIED"
if self.arg[3]:
str += ' "%s"' % self.arg[3]
str += ">"
return str
class CharacterData(Token):
def __unicode__(self):
str = self.arg[0]
if not self.preserve and not self.cdata_section:
# remove empty tokens always in element content!
if self.empty and not self.descendant_mixed:
if self.formatter.blanks and not self.formatter.compress and re.match(r"\s*\n\s*\n\s*", str):
str = "\n"
else:
str = ""
else:
if self.correct:
str = re.sub(r"\r\n", "\n", str)
str = re.sub(r"\r|\n|\t", " ", str)
str = re.sub(r"\s+", " ", str)
if self.delete_leading:
str = re.sub(r"^\s", "", str)
if self.delete_trailing:
str = re.sub(r"\s$", "", str)
if not self.cdata_section:
str = re.sub(r"&", "&", str)
str = re.sub(r"<", "<", str)
return str
def pre_operate(self):
self.list.whitespace_append_trailing(self)
self.list.whitespace_append_leading(self)
def post_operate(self):
self.delete_leading = self.list.whitespace_delete_leading(self)
self.delete_trailing = self.list.whitespace_delete_trailing(self)
class Comment(Token):
def __unicode__(self):
str = ""
if self.preserve in [0, 1] and self.indent:
str += self.indent_insert()
str += "<!--%s-->" % re.sub(
r"^[\r\n]+$", "\n", re.sub(r"^[\r\n]+", "\n", self.arg[0])
)
return str
def configure(self):
super(Formatter.Comment, self).configure()
self.indent = self.list.token_indent(self)
class Default(Token):
pass
class EndCdataSection(Token):
def __unicode__(self):
return "]]>"
def configure(self):
self.list.cdata_section = False
class ElementDecl(Token):
def __unicode__(self):
str = self.indent_create()
str += "<!ELEMENT %s%s>" % (self.arg[0], self.evaluate_model(self.arg[1]))
return str
def evaluate_model(self, model, modelStr="", concatStr=""):
childSeq = []
mixed = model[0] == xml.parsers.expat.model.XML_CTYPE_MIXED
hasChilds = len(model[3]) or mixed
if model[0] == xml.parsers.expat.model.XML_CTYPE_EMPTY: # 1
modelStr += " EMPTY"
elif model[0] == xml.parsers.expat.model.XML_CTYPE_ANY: # 2
modelStr += " ANY"
elif model[0] == xml.parsers.expat.model.XML_CTYPE_NAME: # 4
modelStr = "%s" % model[2] # new start
elif model[0] in (
xml.parsers.expat.model.XML_CTYPE_CHOICE,
xml.parsers.expat.model.XML_CTYPE_MIXED,
): # 5
concatStr = "|"
elif model[0] == xml.parsers.expat.model.XML_CTYPE_SEQ: # 6
concatStr = ","
if hasChilds:
modelStr += " ("
if mixed:
childSeq.append("#PCDATA")
for child in model[3]:
childSeq.append(self.evaluate_model(child))
modelStr += concatStr.join(childSeq)
if hasChilds:
modelStr += ")"
modelStr += {
xml.parsers.expat.model.XML_CQUANT_NONE: "",
xml.parsers.expat.model.XML_CQUANT_OPT: "?",
xml.parsers.expat.model.XML_CQUANT_PLUS: "+",
xml.parsers.expat.model.XML_CQUANT_REP: "*",
}[model[1]]
return modelStr
class EndDoctypeDecl(Token):
def __unicode__(self):
str = ""
if self.list[self.pos - 1].name != "StartDoctypeDecl":
str += self.indent_create(0)
str += "]"
str += ">"
str += self.indent_create(0)
return str
class EndElement(Token):
def __init__(self, list, arg):
list.level_decrement()
super(Formatter.EndElement, self).__init__(list, arg)
def __unicode__(self):
str = ""
# Don't close empty nodes on compression mode:
if (
not (self.formatter.compress or self.formatter.selfclose)
or self.list[self.pos - 1].name != "StartElement"
):
if self.preserve in [0] and self.indent:
str += self.indent_insert()
str += "</%s>" % self.arg[0]
return str
def configure(self):
self.descendant_mixed = self.list.token_descendant_mixed(self)
self.preserve = self.list.token_preserve(self)
self.indent = self.list.token_indent(self)
class EntityDecl(Token):
def __unicode__(self):
str = self.indent_create()
str += "<!ENTITY "
if self.arg[1]:
str += "% "
str += "%s " % self.arg[0]
if self.arg[2]:
str += '"%s"' % self.arg[2]
else:
str += "%s " % self.identifier(self.arg[4], self.arg[5])
if self.arg[6]:
str += "NDATA %s" % self.arg[6]
str += ">"
return str
class NotationDecl(Token):
def __unicode__(self):
str = self.indent_create()
str += "<!NOTATION %s%s>" % (
self.arg[0],
self.identifier(self.arg[2], self.arg[3]),
)
return str
class ProcessingInstruction(Token):
def __unicode__(self):
str = ""
if self.preserve in [0, 1] and self.indent:
str += self.indent_insert()
str += "<?%s %s?>" % (self.arg[0], self.arg[1])
return str
def configure(self):
super(Formatter.ProcessingInstruction, self).configure()
self.indent = self.list.token_indent(self)
class StartCdataSection(Token):
def __unicode__(self):
return "<![CDATA["
def configure(self):
self.list.cdata_section = True
class StartDoctypeDecl(Token):
def __unicode__(self):
str = "<!DOCTYPE %s" % (self.arg[0])
if self.arg[1]:
str += self.identifier(self.arg[1], self.arg[2])
if self.arg[3]:
str += " ["
return str
class StartElement(Token):
def __init__(self, list, arg):
super(Formatter.StartElement, self).__init__(list, arg)
self.list.level_increment()
def __unicode__(self):
str = ""
if self.preserve in [0, 1] and self.indent:
str += self.indent_insert()
str += "<%s" % self.arg[0]
for attr in sorted(self.arg[1].keys()):
str += self.attribute(attr, self.arg[1][attr])
if self.list[self.pos + 1].end and (self.formatter.compress or self.formatter.selfclose):
str += "/>"
else:
str += ">"
return str
def configure(self):
self.content_model = self.list.token_model(self)
self.descendant_mixed = self.list.token_descendant_mixed(self)
self.preserve = self.list.token_preserve(self)
self.indent = self.list.token_indent(self)
class XmlDecl(Token):
def __init__(self, list, arg):
super(Formatter.XmlDecl, self).__init__(list, arg)
if len(self.arg) > 1:
self.formatter.encoding_internal = self.arg[1]
def __unicode__(self):
str = "<?xml%s%s" % (
self.attribute("version", self.arg[0]),
self.attribute("encoding", self.formatter.encoding_effective),
)
if self.arg[2] > -1:
str += self.attribute("standalone", "yes")
str += "?>\n"
return str
def cli_usage(msg=""):
""" Output usage for command line tool. """
sys.stderr.write(msg + "\n")
sys.stderr.write(
'Usage: xmlformat [--preserve "pre,literal"] [--blanks]\
[--compress] [--selfclose] [--indent num] [--indent-char char]\
[--outfile file] [--encoding enc] [--outencoding enc]\
[--disable-inlineformatting] [--overwrite] [--disable-correction]\
[--eof-newline]\
[--help] <--infile file | file | - >\n'
)
sys.exit(2)
def cli():
""" Launch xmlformatter from command line. """
res = None
indent = DEFAULT_INDENT
indent_char = DEFAULT_INDENT_CHAR
outfile = None
overwrite = False
preserve = []
blanks = False
compress = DEFAULT_COMPRESS
selfclose = DEFAULT_SELFCLOSE
infile = None
encoding = DEFAULT_ENCODING_INPUT
outencoding = DEFAULT_ENCODING_OUTPUT
inline = DEFAULT_INLINE
correct = DEFAULT_CORRECT
eof_newline = DEFAULT_EOF_NEWLINE
try:
opts, args = getopt.getopt(
sys.argv[1:],
"",
[
"compress",
"selfclose",
"disable-correction",
"disable-inlineformatting",
"encoding=",
"help",
"infile=",
"indent=",
"indent-char=",
"outfile=",
"outencoding=",
"overwrite",
"preserve=",
"blanks",
"eof-newline"
],
)
except getopt.GetoptError as err:
cli_usage(str(err))
for key, value in opts:
if key in ["--indent"]:
indent = value
elif key in ["--preserve"]:
preserve = value.replace(",", " ").split()
elif key in ["--blanks"]:
blanks = True
elif key in ["--help"]:
cli_usage()
elif key in ["--compress"]:
compress = True
elif key in ["--selfclose"]:
selfclose = True
elif key in ["--outfile"]:
outfile = value
elif key in ["--infile"]:
infile = value
elif key in ["--encoding"]:
encoding = value
elif key in ["--outencoding"]:
outencoding = value
elif key in ["--indent-char"]:
indent_char = value
elif key in ["--disable-inlineformatting"]:
inline = False
elif key in ["--disable-correction"]:
correct = False
elif key in ["--overwrite"]:
overwrite = True
elif key in ["--eof-newline"]:
eof_newline = True
try:
formatter = Formatter(
indent=indent,
preserve=preserve,
blanks=blanks,
compress=compress,
selfclose=selfclose,
encoding_input=encoding,
encoding_output=outencoding,
indent_char=indent_char,
inline=inline,
correct=correct,
eof_newline=eof_newline,
)
input_file = None
if infile:
input_file = infile
res = formatter.format_file(input_file)
elif len(args) > 0:
if args[0] == "-":
res = formatter.format_string("".join(sys.stdin.readlines()))
else:
input_file = args[0]
res = formatter.format_file(input_file)
except xml.parsers.expat.ExpatError as err:
cli_usage("XML error: %s" % err)
except IOError as err:
cli_usage("IO error: %s" % err)
except:
cli_usage("Unkonwn error")
if overwrite:
formatter.enc_output(input_file, res)
else:
formatter.enc_output(outfile, res)
|
pamoller/xmlformatter
|
xmlformatter.py
|
Python
|
mit
| 30,777
|
# setup.py - distutils configuration for esm and esmre modules
# Copyright (C) 2007 Tideway Systems Limited.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from setuptools import setup, Extension
module1 = Extension("esm",
#define_macros=[("HEAP_CHECK", 1)],
sources = ['src/esm.c',
'src/aho_corasick.c',
'src/ac_heap.c',
'src/ac_list.c'])
setup (name = "esmre",
version = '0.3.1',
description = 'Regular expression accelerator',
long_description = " ".join("""
Modules used to accelerate execution of a large collection of regular
expressions using the Aho-Corasick algorithms.
""".strip().split()),
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: '
'GNU Library or Lesser General Public License (LGPL)',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Indexing'
],
install_requires=['setuptools'],
author = 'Will Harris',
author_email = 'esmre@greatlibrary.net',
url = 'http://code.google.com/p/esmre/',
license = 'GNU LGPL',
platforms = ['POSIX'],
ext_modules = [module1],
package_dir = {'': 'src'},
py_modules = ["esmre"])
|
lewcpe/esmre
|
setup.py
|
Python
|
lgpl-2.1
| 2,269
|
# This file is part of Moksha.
# Copyright (C) 2008-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`moksha.hub.api.consumer` - The Moksha Consumer API
========================================================
Moksha provides a simple API for creating "consumers" of message topics.
This means that your consumer is instantiated when the MokshaHub is initially
loaded, and receives each message for the specified topic through the
:meth:`Consumer.consume` method.
.. moduleauthor:: Luke Macken <lmacken@redhat.com>
.. moduleauthor:: Ralph Bean <rbean@redhat.com>
"""
import json
import threading
import time
import logging
log = logging.getLogger('moksha.hub')
import six.moves.queue as queue
from collections import deque
from kitchen.iterutils import iterate
from moksha.common.lib.helpers import create_app_engine
from moksha.common.lib.converters import asbool
import moksha.hub.reactor
class Consumer(object):
""" A message consumer """
topic = ''
# Automatically decode JSON data
jsonify = True
# Internal use only
_initialized = False
_exception_count = 0
def __init__(self, hub):
self.hub = hub
self.log = log
# Set up a queue to communicate between the main twisted thread
# receiving raw messages, and a worker thread that pulls items off
# the queue to do "consume" work.
self.incoming = queue.Queue()
self.headcount_in = self.headcount_out = 0
self._times = deque(maxlen=1024)
callback = self._consume
if self.jsonify:
callback = self._consume_json
for topic in iterate(self.topic):
log.debug('Subscribing to consumer topic %s' % topic)
self.hub.subscribe(topic, callback)
# If the consumer specifies an 'app', then setup `self.engine` to
# be a SQLAlchemy engine, along with a configured DBSession
app = getattr(self, 'app', None)
self.engine = self.DBSession = None
if app:
log.debug("Setting up individual engine for consumer")
from sqlalchemy.orm import sessionmaker
self.engine = create_app_engine(app, hub.config)
self.DBSession = sessionmaker(bind=self.engine)()
self.blocking_mode = asbool(self.hub.config.get('moksha.blocking_mode', False))
if self.blocking_mode:
log.info("Blocking mode true for %r. "
"Messages handled as they arrive." % self)
else:
self.N = int(self.hub.config.get('moksha.workers_per_consumer', 1))
log.info("Blocking mode false for %r. "
"Messages to be queued and distributed to %r threads." % (
self, self.N))
for i in range(self.N):
moksha.hub.reactor.reactor.callInThread(self._work_loop)
self._initialized = True
def __json__(self):
if self._initialized:
backlog = self.incoming.qsize()
headcount_out = self.headcount_out
headcount_in = self.headcount_in
times = list(self._times)
else:
backlog = None
headcount_out = headcount_in = 0
times = []
results = {
"name": type(self).__name__,
"module": type(self).__module__,
"topic": self.topic,
"initialized": self._initialized,
"exceptions": self._exception_count,
"jsonify": self.jsonify,
"backlog": backlog,
"headcount_out": headcount_out,
"headcount_in": headcount_in,
"times": times,
}
# Reset these counters before returning.
self.headcount_out = self.headcount_in = 0
self._exception_count = 0
self._times.clear()
return results
def debug(self, message):
idx = threading.current_thread().ident
log.debug("%r thread %r | %s" % (type(self).__name__, idx, message))
def _consume_json(self, message):
""" Convert our AMQP messages into a consistent dictionary format.
This method exists because our STOMP & AMQP message brokers consume
messages in different formats. This causes our messaging abstraction
to leak into the consumers themselves.
:Note: We do not pass the message headers to the consumer (in this AMQP consumer)
because the current AMQP.js bindings do not allow the client to change them.
Thus, we need to throw any topic/queue details into the JSON body itself.
"""
try:
body = json.loads(message.body)
except:
log.debug("Unable to decode message body to JSON: %r" % message.body)
body = message.body
topic = None
# Try some stuff for AMQP:
try:
topic = message.headers[0].routing_key
except TypeError:
# We didn't get a JSON dictionary
pass
except AttributeError:
# We didn't get headers or a routing key?
pass
# If that didn't work, it might be zeromq
if not topic:
try:
topic = message.topic
except AttributeError:
# Weird. I have no idea...
pass
message_as_dict = {'body': body, 'topic': topic}
return self._consume(message_as_dict)
def _consume(self, message):
self.headcount_in += 1
if self.blocking_mode:
# Do the work right now
return self._do_work(message)
else:
# Otherwise, put the message in a queue for other threads to handle
self.incoming.put(message)
def _work_loop(self):
while True:
# This is a blocking call. It waits until a message is available.
message = self.incoming.get()
# Then we are being asked to quit
if message is StopIteration:
break
self._do_work(message)
self.debug("Worker thread exiting.")
def _do_work(self, message):
self.headcount_out += 1
start = time.time()
handled = True
self.debug("Worker thread picking a message.")
try:
self.validate(message)
except Exception as e:
log.warning("Received invalid message %r" % e)
return False # Not handled
try:
self.pre_consume(message)
except Exception as e:
self.log.exception(message)
try:
self.consume(message)
except Exception as e:
handled = False # Not handled. Return this later.
self.log.exception(message)
# Keep track of how many exceptions we've hit in a row
self._exception_count += 1
try:
self.post_consume(message)
except Exception as e:
self.log.exception(message)
# Record how long it took to process this message (for stats)
self._times.append(time.time() - start)
self.debug("Going back to waiting on the incoming queue. Message handled: %r" % handled)
return handled
def validate(self, message):
""" Override to implement your own validation scheme. """
pass
def pre_consume(self, message):
pass
def consume(self, message):
raise NotImplementedError
def post_consume(self, message):
pass
def send_message(self, topic, message):
try:
self.hub.send_message(topic, message)
except Exception as e:
log.error('Cannot send message: %s' % e)
def stop(self):
for i in range(getattr(self, 'N', 0)):
self.incoming.put(StopIteration)
if hasattr(self, 'hub'):
self.hub.close()
if getattr(self, 'DBSession', None):
self.DBSession.close()
|
mokshaproject/moksha
|
moksha.hub/moksha/hub/api/consumer.py
|
Python
|
apache-2.0
| 8,457
|
'''
All of these views are predicated on the user already being logged in to
valid session.
djago_ug/views.py
John Whelchel
Summer 2013
These are the views for the User Gateway section of the administration
site. They are all decorated with @authenticate to make sure that a user is
logged in; if not, they are redirected to the login page. Some are decorated
with precheck, a decorator that makes sure the passed g_id and passwords
are valid.
'''
import logging
from django_lib.decorators import precheck
from django_lib.auth import authenticate
from django_lib import gatewayforms
from django_lib import forms as libforms
from django.http import HttpResponse, HttpResponseRedirect
from django.template import Context, loader, RequestContext
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from storage.storagetypes import transactional, transaction
import storage.storage as db
from MS.volume import Volume
from MS.user import SyndicateUser as User
from MS.gateway import UserGateway as UG
# This is the view to be redirected to when precheck fails; i.e.
# the given password or g_id is wrong.
PRECHECK_REDIRECT = 'django_ug.views.viewgateway'
@authenticate
def viewgateway(request, g_id=0):
'''
The view for viewing and changing any of the main settings on any UG. Passes
forms for changing different settings, and the volumes attached to the gateway.
'''
session = request.session
username = session['login_email']
message = session.pop('message', "")
g_id = int(g_id)
g = db.read_user_gateway(g_id)
if not g:
logging.error("Error reading gateway %d : Does note exist" % g_id)
message = "No user gateway with the ID %d exists." % g_id
t = loader.get_template("gateway_templates/viewgateway_failure.html")
c = Context({'message':message, 'username':username})
return HttpResponse(t.render(c))
# Create necessary forms
location_form = gatewayforms.ModifyGatewayLocation(initial={'host':g.host,
'port':g.port})
change_form = gatewayforms.ChangeVolume()
password_form = libforms.Password()
change_password_form = libforms.ChangePassword()
vol = db.read_volume(g.volume_id)
if not vol:
vol = None
owner = None
"""
if g.volume_id != 0:
logging.error("Volume ID %s in gateways volume_ids does not map to volume. Gateway: %s" % (g.volume_id, g_id))
return redirect('django_ug.views.allgateways')
else:
vol = None
owner = None
"""
else:
attrs = {"SyndicateUser.owner_id ==": vol.owner_id}
owner = db.get_user(attrs)
logging.info(owner)
logging.info(vol.owner_id)
t = loader.get_template("gateway_templates/viewusergateway.html")
c = RequestContext(request, {'username':username,
'gateway':g,
'message':message,
'vol':vol,
'owner':owner,
'location_form':location_form,
'change_form':change_form,
'password_form':password_form,
'change_password_form':change_password_form})
return HttpResponse(t.render(c))
# Doesn't use precheck() because doesn't use Password() form, just ChangePassword() form.
@authenticate
def changepassword(request, g_id):
session = request.session
username = session['login_email']
user = db.read_user(username)
g_id = int(g_id)
if request.method != "POST":
return redirect('django_ug.views.viewgateway', g_id)
g = db.read_user_gateway(g_id)
if not g:
logging.error("Error reading gateway %d : Gateway does not exist." % g_id)
message = "No user gateway by the name of %d exists." % g_id
t = loader.get_template("gateway_templates/viewgateway_failure.html")
c = Context({'message':message, 'username':username})
return HttpResponse(t.render(c))
form = libforms.ChangePassword(request.POST)
if not form.is_valid():
session['message'] = "You must fill out all password fields."
return redirect('django_ug.views.viewgateway', g_id)
else:
# Check password hash
if not UG.authenticate(g, form.cleaned_data['oldpassword']):
session['message'] = "Incorrect password."
return redirect('django_ug.views.viewgateway', g_id)
elif form.cleaned_data['newpassword_1'] != form.cleaned_data['newpassword_2']:
session['message'] = "Your new passwords did not match each other."
return redirect('django_ug.views.viewgateway', g_id)
# Ok to change password
else:
new_hash = UG.generate_password_hash(form.cleaned_data['newpassword_1'])
fields = {'ms_password_hash':new_hash}
try:
db.update_user_gateway(g_id, **fields)
except Exception as e:
logging.error("Unable to update user gateway %s. Exception %s" % (g_name, e))
session['message'] = "Unable to update gateway."
return redirect('django_ug.views.viewgateway', g_id)
session['new_change'] = "We've changed your gateways's password."
session['next_url'] = '/syn/UG/viewgateway/' + str(g_id)
session['next_message'] = "Click here to go back to your volume."
return HttpResponseRedirect('/syn/thanks')
@authenticate
@precheck("UG", PRECHECK_REDIRECT)
def changelocation(request, g_id):
'''
View to enable changing host and port of UG
'''
session = request.session
g_id = int(g_id)
form = gatewayforms.ModifyGatewayLocation(request.POST)
if form.is_valid():
new_host = form.cleaned_data['host']
new_port = form.cleaned_data['port']
fields = {'host':new_host, 'port':new_port}
try:
db.update_user_gateway(g_id, **fields)
except Exception as e:
logging.error("Unable to update UG with ID %s. Error was %s." % (g_id, e))
session['message'] = "Error. Unable to change user gateway."
return redirect('django_ug.views.viewgateway', g_id)
session['new_change'] = "We've updated your UG."
session['next_url'] = '/syn/UG/viewgateway/' + str(g_id)
session['next_message'] = "Click here to go back to your gateway."
return HttpResponseRedirect('/syn/thanks')
else:
session['message'] = "Invalid form entries for gateway location."
return redirect('django_ug.views.viewgateway', g_id)
@authenticate
@precheck("UG", PRECHECK_REDIRECT)
def changevolume(request, g_id):
'''
View to enable changing volume attached to UG
'''
session = request.session
username = session['login_email']
user = db.read_user(username)
g_id = int(g_id)
g = db.read_user_gateway(g_id)
# Isolate DB calls to allow roll-backs via transactions
@transactional(xg=True)
def update_ug_and_vol(g_id, gfields, vol1_id, vol2_id):
db.update_user_gateway(g_id, **gfields)
attrs = {"UG_version":1}
# Force update of UG version
db.update_volume(vol1_id, **attrs)
db.update_volume(vol2_id, **attrs)
form = gatewayforms.ChangeVolume(request.POST)
if form.is_valid():
attrs = {"Volume.name ==":form.cleaned_data['volume_name'].strip().replace(" ", "_")}
vols = db.list_volumes(attrs, limit=1)
if vols:
new_vol = vols[0]
else:
session['message'] = "No volume %s exists." % form.cleaned_data['volume_name']
return redirect('django_ug.views.viewgateway', g_id)
if (new_vol.volume_id not in user.volumes_r) and (new_vol.volume_id not in user.volumes_rw):
session['message'] = "Must have read rights to volume %s to assign UG to it." % form.cleaned_data['volume_name']
return redirect('django_ug.views.viewgateway', g_id)
old_vol = g.volume_id
#fields = {"volume_id":new_vol.volume_id, "cert_version": True}
try:
db.update_user_gateway( g_id, volume_id=new_vol.volume_id, cert_version=True )
db.update_volume( new_vol.volume_id, version=True, cert_version=True )
if g.is_bound_to_volume():
# update the old Volume
db.update_volume( old_vol, version=True, cert_version=True )
#update_ug_and_vol(g_id, fields, old_vol, new_vol.volume_id)
except Exception, e:
logging.error("Unable to update UG with ID %s. Error was %s." % (g_id, e))
session['message'] = "Error. Unable to change user gateway."
return redirect('django_ug.views.viewgateway', g_id)
session['new_change'] = "We've updated your UG."
session['next_url'] = '/syn/UG/viewgateway/' + str(g_id)
session['next_message'] = "Click here to go back to your gateway."
return HttpResponseRedirect('/syn/thanks')
else:
session['message'] = "Invalid field entries. Volume name required."
return redirect('django_ug.views.viewgateway', g_id)
@authenticate
@precheck("UG", PRECHECK_REDIRECT)
def changewrite(request, g_id):
'''
View to change write capabilities of UG
'''
session = request.session
g_id = int(g_id)
g = db.read_user_gateway(g_id)
if not g:
return redirect('django_ug.views.viewgateway', g_id)
if g.read_write:
attrs = {'read_write':False}
else:
attrs = {'read_write':True}
try:
db.update_user_gateway(g_id, **attrs)
except Exception as e:
logging.error("Unable to update UG with ID %s. Error was %s." % (g_id, e))
session['message'] = "Error. Unable to change user gateway."
return redirect('django_ug.views.viewgateway', g_id)
session['new_change'] = "We've updated your UG."
session['next_url'] = '/syn/UG/viewgateway/' + str(g_id)
session['next_message'] = "Click here to go back to your gateway."
return HttpResponseRedirect('/syn/thanks')
@authenticate
def allgateways(request):
'''
List all UG gateways view
'''
session = request.session
username = session['login_email']
try:
qry = db.list_user_gateways()
except:
qry = []
gateways = []
for g in qry:
gateways.append(g)
vols = []
for g in gateways:
add_vol = db.read_volume(g.volume_id)
if add_vol:
vols.append(add_vol)
else:
vols.append([])
owners = []
for v in vols:
if v:
volume_owner = v.owner_id
attrs = {"SyndicateUser.owner_id ==":volume_owner}
owners.append(db.get_user(attrs))
else:
owners.append("")
gateway_vols_owners = zip(gateways, vols, owners)
t = loader.get_template('gateway_templates/allusergateways.html')
c = RequestContext(request, {'username':username, 'gateway_vols_owners':gateway_vols_owners})
return HttpResponse(t.render(c))
@authenticate
def mygateways(request):
'''
Show all of logged in user's UG's
'''
session = request.session
username = session['login_email']
user = db.read_user(username)
try:
attrs = {"UserGateway.owner_id ==":user.owner_id}
gateways = db.list_user_gateways(attrs)
except:
gateways = []
vols = []
for g in gateways:
vols.append(db.read_volume(g.volume_id))
gateway_vols = zip(gateways, vols)
t = loader.get_template('gateway_templates/myusergateways.html')
c = RequestContext(request, {'username':username, 'gateway_vols':gateway_vols})
return HttpResponse(t.render(c))
@authenticate
def create(request):
'''
View for creating UG's
'''
session = request.session
username = session['login_email']
user = db.read_user(username)
# Helper method that simplifies returning forms after user error.
def give_create_form(username, session):
message = session.pop('message', "")
form = gatewayforms.CreateUG()
t = loader.get_template('gateway_templates/create_user_gateway.html')
c = RequestContext(request, {'username':username,'form':form, 'message':message})
return HttpResponse(t.render(c))
if request.POST:
# Validate input forms
form = gatewayforms.CreateUG(request.POST)
if form.is_valid():
if not form.cleaned_data['volume_name']:
logging.info("DLFKJSDF")
vol = None
else:
attrs = {"Volume.name ==":form.cleaned_data['volume_name'].strip().replace(" ", "_")}
vols = db.list_volumes(attrs, limit=1)
if vols:
vol = vols[0]
else:
session['message'] = "No volume %s exists." % form.cleaned_data['volume_name']
return give_create_form(username, session)
if (vol.volume_id not in user.volumes_r) and (vol.volume_id not in user.volumes_rw):
session['message'] = "Must have read rights to volume %s to create UG for it." % form.cleaned_data['volume_name']
return give_create_form(username, session)
# Force update of UG version
attrs = {"UG_version":1}
try:
transaction( lambda: db.update_volume(vol.volume_id, **attrs) )
except Exception as E:
session['message'] = "UG creation error: %s" % E
return give_create_form( username, session )
try:
# Prep kwargs
kwargs = {}
kwargs['read_write'] = form.cleaned_data['read_write']
kwargs['ms_username'] = form.cleaned_data['g_name']
kwargs['port'] = form.cleaned_data['port']
kwargs['host'] = form.cleaned_data['host']
kwargs['ms_password'] = form.cleaned_data['g_password']
# Create
new_ug = db.create_user_gateway(user, vol, **kwargs)
except Exception as E:
session['message'] = "UG creation error: %s" % E
return give_create_form(username, session)
session['new_change'] = "Your new gateway is ready."
session['next_url'] = '/syn/UG/mygateways'
session['next_message'] = "Click here to see your gateways."
return HttpResponseRedirect('/syn/thanks/')
else:
# Prep returned form values (so they don't have to re-enter stuff)
if 'g_name' in form.errors:
oldname = ""
else:
oldname = request.POST['g_name']
if 'volume_name' in form.errors:
oldvolume_name = ""
else:
oldvolume_name = request.POST['volume_name']
if 'host' in form.errors:
oldhost = ""
else:
oldhost = request.POST['host']
if 'port' in form.errors:
oldport = ""
else:
oldport = request.POST['port']
# Prep error message
message = "Invalid form entry: "
for k, v in form.errors.items():
message = message + "\"" + k + "\"" + " -> "
for m in v:
message = message + m + " "
# Give then the form again
form = gatewayforms.CreateUG(initial={'g_name': oldname,
'volume_name': oldvolume_name,
'host': oldhost,
'port': oldport,
})
t = loader.get_template('gateway_templates/create_user_gateway.html')
c = RequestContext(request, {'username':username,'form':form, 'message':message})
return HttpResponse(t.render(c))
else:
# Not a POST, give them blank form
return give_create_form(username, session)
@authenticate
def delete(request, g_id):
'''
View for deleting UGs
'''
# Helper method that simplifies returning forms after user error.
def give_delete_form(username, g_name, session):
message = session.pop('message' "")
form = gatewayforms.DeleteGateway()
t = loader.get_template('gateway_templates/delete_user_gateway.html')
c = RequestContext(request, {'username':username, 'g_name':g_name, 'form':form, 'message':message})
return HttpResponse(t.render(c))
session = request.session
username = session['login_email']
g_id = int(g_id)
ug = db.read_user_gateway(g_id)
if not ug:
t = loader.get_template('gateway_templates/delete_user_gateway_failure.html')
c = RequestContext(request, {'username':username})
return HttpResponse(t.render(c))
g_name = ug.ms_username
if ug.owner_id != user.owner_id:
t = loader.get_template('gateway_templates/delete_user_gateway_failure.html')
c = RequestContext(request, {'username':username})
return HttpResponse(t.render(c))
if request.POST:
# Validate input forms
form = gatewayforms.DeleteGateway(request.POST)
if form.is_valid():
if not UG.authenticate(ug, form.cleaned_data['g_password']):
session['message'] = "Incorrect User Gateway password"
return give_delete_form(username, g_name, session)
if not form.cleaned_data['confirm_delete']:
session['message'] = "You must tick the delete confirmation box."
return give_delete_form(username, g_name, session)
db.delete_user_gateway(g_id)
session['new_change'] = "Your gateway has been deleted."
session['next_url'] = '/syn/UG/mygateways'
session['next_message'] = "Click here to see your gateways."
return HttpResponseRedirect('/syn/thanks/')
# invalid forms
else:
# Prep error message
session['message'] = "Invalid form entry: "
for k, v in form.errors.items():
session['message'] = session['message'] + "\"" + k + "\"" + " -> "
for m in v:
session['message'] = session['message'] + m + " "
return give_delete_form(username, g_name, session)
else:
# Not a POST, give them blank form
return give_delete_form(username, g_name, session)
'''
BEYOND HERE DEPRECATED
'''
@csrf_exempt
@authenticate
def urlcreate(request, volume_name, g_name, g_password, host, port, read_write):
session = request.session
username = session['login_email']
user = db.read_user(username)
kwargs = {}
kwargs['port'] = int(port)
kwargs['host'] = host
kwargs['ms_username'] = g_name
kwargs['ms_password'] = g_password
kwargs['read_write'] = read_write
vol = db.read_volume(volume_name)
if not vol:
return HttpResponse("No volume %s exists." % volume_name)
if (vol.volume_id not in user.volumes_r) and (vol.volume_id not in user.volumes_rw):
return HttpResponse("Must have read rights to volume %s to create UG for it." % volume_name)
try:
new_ug = db.create_user_gateway(user, vol, **kwargs)
except Exception as E:
return HttpResponse("UG creation error: %s" % E)
return HttpResponse("UG succesfully created: " + str(new_ug))
@csrf_exempt
@authenticate
def urldelete(request, g_id, g_password):
session = request.session
username = session['login_email']
user = db.read_user(username)
ug = db.read_user_gateway(g_id)
if not ug:
return HttpResponse("UG %d does not exist." % g_id)
if ug.owner_id != user.owner_id:
return HttpResponse("You must own this UG to delete it.")
if not UG.authenticate(ug, g_password):
return HttpResponse("Incorrect UG password.")
db.delete_user_gateway(g_id)
return HttpResponse("Gateway succesfully deleted.")
|
jcnelson/syndicate
|
old/ms/django_ug/views.py
|
Python
|
apache-2.0
| 20,371
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import textwrap
import unittest
from skydoc import load_extractor
class LoadExtractorTest(unittest.TestCase):
def check_symbols(self, src, expected):
tf = tempfile.NamedTemporaryFile(mode='w+', delete=False)
tf.write(src)
tf.flush()
tf.close()
extractor = load_extractor.LoadExtractor()
load_symbols = extractor.extract(tf.name)
os.remove(tf.name)
self.assertEqual(expected, load_symbols)
def test_load(self):
src = textwrap.dedent("""\
load("//foo/bar:bar.bzl", "foo_library")
load("//foo/bar:baz.bzl", "foo_test", orig_foo_binary = "foo_binary")
""")
expected = [
load_extractor.LoadSymbol('//foo/bar:bar.bzl', 'foo_library', None),
load_extractor.LoadSymbol('//foo/bar:baz.bzl', 'foo_test', None),
load_extractor.LoadSymbol('//foo/bar:baz.bzl', 'foo_binary',
'orig_foo_binary'),
]
self.check_symbols(src, expected)
def raises_error(self, src):
tf = tempfile.NamedTemporaryFile(mode='w+', delete=False)
tf.write(src)
tf.flush()
tf.close()
extractor = load_extractor.LoadExtractor()
self.assertRaises(load_extractor.LoadExtractorError,
extractor.extract, tf.name)
os.remove(tf.name)
def test_invalid_non_string_literal_in_label(self):
src = textwrap.dedent("""\
load(load_label, "foo_library")
""")
self.raises_error(src)
def test_invalid_non_string_literal_in_keywords(self):
src = textwrap.dedent("""\
load("//foo/bar:bar.bzl", loaded_symbol)
""")
self.raises_error(src)
def test_invalid_symbol_conflict(self):
src = textwrap.dedent("""\
load("//foo:bar.bzl", "foo_binary", "foo_library")
load("//foo:baz.bzl", "foo_library")
""")
self.raises_error(src)
def test_invalid_symbol_alias_conflict(self):
src = textwrap.dedent("""\
load("//foo:bar.bzl", foo_library="some_foo_library")
load("//foo:baz.bzl", "foo_library")
""")
self.raises_error(src)
def test_invalid_duplicate_symbol_loaded(self):
src = textwrap.dedent("""\
load("//foo:bar.bzl", "foo_library", "foo_library")
""")
self.raises_error(src)
if __name__ == '__main__':
unittest.main()
|
bazelbuild/skydoc
|
skydoc/load_extractor_test.py
|
Python
|
apache-2.0
| 2,898
|
#---------------------------------------------------------------------------
#
# Cell.py: hierarchical compartments containing molecules that react,
# and possibly other compartments too, like in P Systems
#
# by Lidia Yamamoto, July 2013
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright (C) 2015 Lidia A. R. Yamamoto
# Contact: http://www.artificial-chemistries.org/
#
# This file is part of PyCellChemistry.
#
# PyCellChemistry is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 3, as published by the Free Software Foundation.
#
# PyCellChemistry is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyCellChemistry, see file COPYING. If not, see
# http://www.gnu.org/licenses/
#
import sys
import numpy as np
class Cell():
def __init__( self ):
""" initialize empty cell """
self.mycells = [] # list of internal compartments within this cell
self.cyto = {} # molecules in the cytoplasm of this cell
self.parent = {} # parent cell
self.prop = [] # list of propensities, from cytoplast to subcells
self.wt = 0.0 # sum of propensities
self.vt = 0.0 # virtual time
def set_cytoplasm( self, c):
""" assign a cytoplasm object to this cell; a cytoplasm represents
the set of molecules floating inside the cell, which are not
inside another subcell; it is typically implemented as a Multiset
or a more elaborated class derived from it.
"""
self.cyto = c
def get_cytoplasm( self ):
""" get cytoplasm object for this cell """
return self.cyto
def add( self, c ):
""" add a new internal compartment c to this cell """
self.mycells.append(c)
def get_cells():
""" get list of internal compartments for this cell """
return self.mycells
def clear_cells():
""" delete all the internal compartments from this cell """
self.mycells = []
def dissolve( self, i ):
""" dissolve internal compartment [i], and release its content to the
cytoplasm
"""
icell = self.mycells[i]
icyto = icell.get_cytoplasm()
if icyto != {}:
self.cyto.absorb(icyto)
icomp = icell.get_cells()
if icomp != []:
for c in icomp:
self.add(c)
icell.clear_cells()
def divide( self ):
""" cell division: approximately half of the objects (chosen randomly)
goes to the daughter cell; returns the newly created daughter cell
"""
newcell = Cell() # create daughter cell
if self.cyto != {}:
# divide cytoplasm molecules evenly among the two cells
newcyto = self.cyto.divide()
newcell.set_cytoplasm(newcyto)
if self.mycells != []:
# send half of the internal compartments to daughter cell
n = len(self.mycells) / 2
for i in range(n):
p = np.random.randint(len(self.mycells))
c = self.mycells.pop(p)
newcell.add(c)
def inert( self ):
""" true if none of the vessels in this cell (including the
cytoplasm and all the subcells, recursively) has reactions to
perform
"""
if self.cyto != {}:
if not self.cyto.inert(): return False
for cell in self.mycells:
if not cell.inert(): return False
return True
def propensity( self ):
""" calculate propensities for the elements within this cell """
self.wt = 0.0
self.prop = []
if self.cyto != {}:
w = self.cyto.propensity()
self.prop.append(w)
self.wt += w
for cell in self.mycells:
w = cell.propensity()
self.prop.append(w)
self.wt += w
return self.wt
def gillespie( self ):
""" hierarchical gillespie for cell and its subcells """
if self.wt <= 0: return
w = np.random.random() * self.wt
i = 0
if self.cyto != {}:
if self.prop[0] > 0 and w < self.prop[0]:
self.cyto.react(w)
return
w -= self.prop[0]
i += 1
for cell in self.mycells:
if (self.prop[i] > 0 and w < self.prop[i]):
cell.react(w)
return
w -= self.prop[i]
i += 1
# pending: self.vt += ... IN TOP CELL ONLY!!
def run( self, niter ):
""" run 'niter' iterations of gillespie for this cell, or until cell
becomes inert
"""
for i in range(niter):
print >> sys.stderr, "ITER=", i
self.propensity()
if self.inert(): return
self.gillespie()
|
danielrcardenas/ac-course-2017
|
frameworks/pycellchem-2.0/src/artchem/Cell.py
|
Python
|
apache-2.0
| 5,204
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRmysql(RPackage):
"""Implements 'DBI' Interface to 'MySQL' and 'MariaDB' Databases."""
homepage = "https://github.com/rstats-db/rmysql"
url = "https://cran.r-project.org/src/contrib/RMySQL_0.10.9.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/RMySQL"
version('0.10.9', '3628200a1864ac3005cfd55cc7cde17a')
depends_on('r-dbi', type=('build', 'run'))
depends_on('mariadb')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-rmysql/package.py
|
Python
|
lgpl-2.1
| 1,690
|
# Copyright (C) 2013-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class TestResult(object):
"""Base class to record and report test results.
Method record is to record the results of test case, and report
method is to report the recorded results by a given reporter.
"""
def record(self, parameter, result):
raise NotImplementedError("Abstract Method:record.")
def report(self, reporter, name):
"""Report the test results by reporter."""
raise NotImplementedError("Abstract Method:report.")
class SingleStatisticTestResult(TestResult):
"""Test results for the test case with a single statistic."""
def __init__(self):
super (SingleStatisticTestResult, self).__init__ ()
self.results = dict ()
def record(self, parameter, result):
if parameter in self.results:
self.results[parameter].append(result)
else:
self.results[parameter] = [result]
def report(self, reporter, name):
reporter.start()
for key in sorted(self.results.iterkeys()):
reporter.report(name, key, self.results[key])
reporter.end()
class ResultFactory(object):
"""A factory to create an instance of TestResult."""
def create_result(self):
"""Create an instance of TestResult."""
raise NotImplementedError("Abstract Method:create_result.")
class SingleStatisticResultFactory(ResultFactory):
"""A factory to create an instance of SingleStatisticTestResult."""
def create_result(self):
return SingleStatisticTestResult()
|
cupertinomiranda/binutils
|
gdb/testsuite/gdb.perf/lib/perftest/testresult.py
|
Python
|
gpl-2.0
| 2,216
|
# -*-coding:Utf-8 -*
import xml.etree.cElementTree as ET
__classRegister={}
def RegisterType(cls):
clsName=cls.__name__
__classRegister[clsName]=cls
def GetType(name):
return __classRegister[name]
def GetSubTypes(cls):
res=[]
for T in __classRegister.values():
if issubclass(T, cls):
res.append(T)
return res
class AType():
"""
base type declaration
such as str, float, bool, ...
But custom types can be
handled as well
"""
BaseType = str
def __init__(self,*a,**k):
"""
AType constructor creates a
BaseType object
"""
self._baseValue=self.BaseType(*a,**k)
def getBase(self):
"""
@return BaseType object to get
"""
return self._baseValue
def setBase(self,value):
"""
@param BaseType object to set
"""
self._baseValue=value
def __str__(self,it=""):
return "%s(%s)"%(self._baseValue,
self.__class__.__name__)
def torootxml(self,parameter,*a,**k):
root = ET.Element("root")
self.toxml(parameter,root,*a,**k)
return root
def tofilexml(self,parameter,filename,*a,**k):
root = self.torootxml(parameter,*a,**k)
tree = ET.ElementTree(root)
tree.write(filename)
def tostringxml(self,parameter,*a,**k):
root = self.torootxml(parameter,*a,**k)
return ET.tostring(root)
def toxml(self,parameter,et,*a,**k):
"""
Performs XML serialization
@param parameter: str
parameter name
@param et: ElementTree
parent element tree to populate
@param k: dict
optional attributes (for derived class)
"""
subEt = self.getSubEt(parameter,et,*a,**k)
self.populate(subEt,*a,**k)
def getSubEt(self,parameter,et,*a,**k):
"""
Create a sub element for this from
a parent element
@param parameter: str
parameter name
@param et: ElementTree
element tree to populate
@param k: dict
optional attributes (for derived class)
@return ElementTree
"""
subEt = ET.SubElement(et, parameter)
subEt.set("type",self.__class__.__name__)
return subEt
def populate(self,subEt,*a,**k):
"""
Parse this on sub element
@param subEt: ElementTree
element tree dedicated to parse value
@param k: dict
optional attributes (for derived class)
"""
subEt.text = unicode(self._baseValue)
@staticmethod
def fromfilexml(filename,*a,**k):
tree = ET.ElementTree(file=filename)
return AType.fromxml(tree.getroot(),*a,**k)
@staticmethod
def fromstringxml(text,*a,**k):
root = ET.fromstring(text)
return AType.fromxml(root[0],*a,**k)
@staticmethod
def fromxml(et,*a,**k):
"""
Return the attribute value
@param et: Element
element tree to decode
@param k: dict
optional attributes (for derived class)
"""
clsName = et.attrib["type"]
cls = GetType(clsName)
return cls.fromxmlclass(et,*a,**k)
@classmethod
def fromxmlclass(cls,et,*a,**k):
"""
Return the attribute value
@param et: Element
element tree to decode
@param k: dict
optional attributes (for derived class)
"""
text = et.text if et.text is not None else u""
return cls(text)
class STRING(AType):
pass
class COLOR(AType):
pass
class INT(AType):
"""
>>> valRef = INT(4)
>>> msg=valRef.tostringxml("max")
>>> print msg
<root><max type="INT">4</max></root>
>>> valExp=AType.fromstringxml(msg)
>>> print valExp.getBase()
4
"""
BaseType = int
class FLOAT(AType):
"""
>>> valRef = FLOAT(4.5)
>>> msg=valRef.tostringxml("max")
>>> print msg
<root><max type="FLOAT">4.5</max></root>
>>> valExp=AType.fromstringxml(msg)
>>> print valExp.getBase()
4.5
"""
BaseType = float
class BOOL(AType):
"""
>>> valRef = BOOL(True)
>>> msg=valRef.tostringxml("max")
>>> print msg
<root><max type="BOOL">1</max></root>
>>> valExp=AType.fromstringxml(msg)
>>> print valExp.getBase()
True
"""
BaseType = bool
def populate(self,subEt,*a,**k):
subEt.text = u"%o"%self._baseValue
@classmethod
def fromxmlclass(cls,et,*a,**k):
val = True if et.text==u"1" else False
return cls(val)
class DICT(AType):
"""
>>> root = ET.Element("root")
>>> adi = DICT()
>>> d = adi.getBase()
>>> d["status"]=BOOL(False)
>>> d["id"]=INT(36)
>>> adi.toxml("mydict",root)
>>> print ET.tostring(root)
<root><mydict type="DICT"><status type="BOOL">0</status><id type="INT">36</id></mydict></root>
>>> valExp=AType.fromxml(root[0])
>>> dictExp=valExp.getBase()
>>> dictExp["status"].getBase()
False
>>> dictExp["id"].getBase()
36
"""
BaseType = dict
def populate(self,subEt,*a,**k):
for key,value in self._baseValue.items():
value.toxml(key,subEt,*a,**k)
@classmethod
def fromxmlclass(cls,et,*a,**k):
obj=cls(*a,**k)
dico=obj.getBase()
for subEt in et:
dico[subEt.tag]=AType.fromxml(subEt,*a,**k)
return obj
class LIST(AType):
"""
>>> root = ET.Element("root")
>>> ali = LIST()
>>> l = ali.getBase()
>>> l.append(BOOL(False))
>>> l.append(INT(36))
>>> ali.toxml("mylist",root)
>>> print ET.tostring(root)
<root><mylist type="LIST"><elem type="BOOL">0</elem><elem type="INT">36</elem></mylist></root>
>>> valExp=AType.fromxml(root[0])
>>> ll=valExp.getBase()
>>> ll[0].getBase()
False
>>> ll[1].getBase()
36
"""
BaseType = list
def populate(self,subEt,*a,**k):
for value in self._baseValue:
value.toxml("elem",subEt,*a,**k)
@classmethod
def fromxmlclass(cls,et,*a,**k):
obj=cls(**k)
ll=obj.getBase()
for subEt in et:
ll.append(AType.fromxml(subEt,*a,**k))
return obj
# Atype Registration
RegisterType(STRING)
RegisterType(COLOR)
RegisterType(INT)
RegisterType(FLOAT)
RegisterType(BOOL)
RegisterType(DICT)
RegisterType(LIST)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
astyl/wxPlotLab
|
mplotlab/utils/abctypes.py
|
Python
|
mit
| 6,855
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""The code in this module is mostly copy/pasted out of the distutils2 source
code, as recommended by Tarek Ziade. As such, it may be subject to some change
as distutils2 development continues, and will have to be kept up to date.
I didn't want to use it directly from distutils2 itself, since I do not want it
to be an installation dependency for our packages yet--it is still too unstable
(the latest version on PyPI doesn't even install).
"""
# These first two imports are not used, but are needed to get around an
# irritating Python bug that can crop up when using ./setup.py test.
# See: http://www.eby-sarna.com/pipermail/peak/2010-May/003355.html
try:
import multiprocessing # noqa
except ImportError:
pass
import logging # noqa
from collections import defaultdict
import os
import re
import sys
import traceback
import distutils.ccompiler
from distutils import errors
from distutils import log
import pkg_resources
from setuptools import dist as st_dist
from setuptools import extension
try:
import ConfigParser as configparser
except ImportError:
import configparser
from pbr import extra_files
import pbr.hooks
# A simplified RE for this; just checks that the line ends with version
# predicates in ()
_VERSION_SPEC_RE = re.compile(r'\s*(.*?)\s*\((.*)\)\s*$')
# Mappings from setup() keyword arguments to setup.cfg options;
# The values are (section, option) tuples, or simply (section,) tuples if
# the option has the same name as the setup() argument
D1_D2_SETUP_ARGS = {
"name": ("metadata",),
"version": ("metadata",),
"author": ("metadata",),
"author_email": ("metadata",),
"maintainer": ("metadata",),
"maintainer_email": ("metadata",),
"url": ("metadata", "home_page"),
"project_urls": ("metadata",),
"description": ("metadata", "summary"),
"keywords": ("metadata",),
"long_description": ("metadata", "description"),
"long_description_content_type": ("metadata", "description_content_type"),
"download_url": ("metadata",),
"classifiers": ("metadata", "classifier"),
"platforms": ("metadata", "platform"), # **
"license": ("metadata",),
# Use setuptools install_requires, not
# broken distutils requires
"install_requires": ("metadata", "requires_dist"),
"setup_requires": ("metadata", "setup_requires_dist"),
"python_requires": ("metadata",),
"provides": ("metadata", "provides_dist"), # **
"provides_extras": ("metadata",),
"obsoletes": ("metadata", "obsoletes_dist"), # **
"package_dir": ("files", 'packages_root'),
"packages": ("files",),
"package_data": ("files",),
"namespace_packages": ("files",),
"data_files": ("files",),
"scripts": ("files",),
"py_modules": ("files", "modules"), # **
"cmdclass": ("global", "commands"),
# Not supported in distutils2, but provided for
# backwards compatibility with setuptools
"use_2to3": ("backwards_compat", "use_2to3"),
"zip_safe": ("backwards_compat", "zip_safe"),
"tests_require": ("backwards_compat", "tests_require"),
"dependency_links": ("backwards_compat",),
"include_package_data": ("backwards_compat",),
}
# setup() arguments that can have multiple values in setup.cfg
MULTI_FIELDS = ("classifiers",
"platforms",
"install_requires",
"provides",
"obsoletes",
"namespace_packages",
"packages",
"package_data",
"data_files",
"scripts",
"py_modules",
"dependency_links",
"setup_requires",
"tests_require",
"keywords",
"cmdclass",
"provides_extras")
# setup() arguments that can have mapping values in setup.cfg
MAP_FIELDS = ("project_urls",)
# setup() arguments that contain boolean values
BOOL_FIELDS = ("use_2to3", "zip_safe", "include_package_data")
CSV_FIELDS = ()
def resolve_name(name):
"""Resolve a name like ``module.object`` to an object and return it.
Raise ImportError if the module or name is not found.
"""
parts = name.split('.')
cursor = len(parts) - 1
module_name = parts[:cursor]
attr_name = parts[-1]
while cursor > 0:
try:
ret = __import__('.'.join(module_name), fromlist=[attr_name])
break
except ImportError:
if cursor == 0:
raise
cursor -= 1
module_name = parts[:cursor]
attr_name = parts[cursor]
ret = ''
for part in parts[cursor:]:
try:
ret = getattr(ret, part)
except AttributeError:
raise ImportError(name)
return ret
def cfg_to_args(path='setup.cfg', script_args=()):
"""Distutils2 to distutils1 compatibility util.
This method uses an existing setup.cfg to generate a dictionary of
keywords that can be used by distutils.core.setup(kwargs**).
:param path:
The setup.cfg path.
:param script_args:
List of commands setup.py was called with.
:raises DistutilsFileError:
When the setup.cfg file is not found.
"""
# The method source code really starts here.
if sys.version_info >= (3, 2):
parser = configparser.ConfigParser()
else:
parser = configparser.SafeConfigParser()
if not os.path.exists(path):
raise errors.DistutilsFileError("file '%s' does not exist" %
os.path.abspath(path))
try:
parser.read(path, encoding='utf-8')
except TypeError:
# Python 2 doesn't accept the encoding kwarg
parser.read(path)
config = {}
for section in parser.sections():
config[section] = dict()
for k, value in parser.items(section):
config[section][k.replace('-', '_')] = value
# Run setup_hooks, if configured
setup_hooks = has_get_option(config, 'global', 'setup_hooks')
package_dir = has_get_option(config, 'files', 'packages_root')
# Add the source package directory to sys.path in case it contains
# additional hooks, and to make sure it's on the path before any existing
# installations of the package
if package_dir:
package_dir = os.path.abspath(package_dir)
sys.path.insert(0, package_dir)
try:
if setup_hooks:
setup_hooks = [
hook for hook in split_multiline(setup_hooks)
if hook != 'pbr.hooks.setup_hook']
for hook in setup_hooks:
hook_fn = resolve_name(hook)
try:
hook_fn(config)
except SystemExit:
log.error('setup hook %s terminated the installation')
except Exception:
e = sys.exc_info()[1]
log.error('setup hook %s raised exception: %s\n' %
(hook, e))
log.error(traceback.format_exc())
sys.exit(1)
# Run the pbr hook
pbr.hooks.setup_hook(config)
kwargs = setup_cfg_to_setup_kwargs(config, script_args)
# Set default config overrides
kwargs['include_package_data'] = True
kwargs['zip_safe'] = False
register_custom_compilers(config)
ext_modules = get_extension_modules(config)
if ext_modules:
kwargs['ext_modules'] = ext_modules
entry_points = get_entry_points(config)
if entry_points:
kwargs['entry_points'] = entry_points
# Handle the [files]/extra_files option
files_extra_files = has_get_option(config, 'files', 'extra_files')
if files_extra_files:
extra_files.set_extra_files(split_multiline(files_extra_files))
finally:
# Perform cleanup if any paths were added to sys.path
if package_dir:
sys.path.pop(0)
return kwargs
def setup_cfg_to_setup_kwargs(config, script_args=()):
"""Convert config options to kwargs.
Processes the setup.cfg options and converts them to arguments accepted
by setuptools' setup() function.
"""
kwargs = {}
# Temporarily holds install_requires and extra_requires while we
# parse env_markers.
all_requirements = {}
for arg in D1_D2_SETUP_ARGS:
if len(D1_D2_SETUP_ARGS[arg]) == 2:
# The distutils field name is different than distutils2's.
section, option = D1_D2_SETUP_ARGS[arg]
elif len(D1_D2_SETUP_ARGS[arg]) == 1:
# The distutils field name is the same thant distutils2's.
section = D1_D2_SETUP_ARGS[arg][0]
option = arg
in_cfg_value = has_get_option(config, section, option)
if not in_cfg_value:
# There is no such option in the setup.cfg
if arg == "long_description":
in_cfg_value = has_get_option(config, section,
"description_file")
if in_cfg_value:
in_cfg_value = split_multiline(in_cfg_value)
value = ''
for filename in in_cfg_value:
description_file = open(filename)
try:
value += description_file.read().strip() + '\n\n'
finally:
description_file.close()
in_cfg_value = value
else:
continue
if arg in CSV_FIELDS:
in_cfg_value = split_csv(in_cfg_value)
if arg in MULTI_FIELDS:
in_cfg_value = split_multiline(in_cfg_value)
elif arg in MAP_FIELDS:
in_cfg_map = {}
for i in split_multiline(in_cfg_value):
k, v = i.split('=', 1)
in_cfg_map[k.strip()] = v.strip()
in_cfg_value = in_cfg_map
elif arg in BOOL_FIELDS:
# Provide some flexibility here...
if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'):
in_cfg_value = True
else:
in_cfg_value = False
if in_cfg_value:
if arg in ('install_requires', 'tests_require'):
# Replaces PEP345-style version specs with the sort expected by
# setuptools
in_cfg_value = [_VERSION_SPEC_RE.sub(r'\1\2', pred)
for pred in in_cfg_value]
if arg == 'install_requires':
# Split install_requires into package,env_marker tuples
# These will be re-assembled later
install_requires = []
requirement_pattern = (
r'(?P<package>[^;]*);?(?P<env_marker>[^#]*?)(?:\s*#.*)?$')
for requirement in in_cfg_value:
m = re.match(requirement_pattern, requirement)
requirement_package = m.group('package').strip()
env_marker = m.group('env_marker').strip()
install_requires.append((requirement_package, env_marker))
all_requirements[''] = install_requires
elif arg == 'package_dir':
in_cfg_value = {'': in_cfg_value}
elif arg in ('package_data', 'data_files'):
data_files = {}
firstline = True
prev = None
for line in in_cfg_value:
if '=' in line:
key, value = line.split('=', 1)
key, value = (key.strip(), value.strip())
if key in data_files:
# Multiple duplicates of the same package name;
# this is for backwards compatibility of the old
# format prior to d2to1 0.2.6.
prev = data_files[key]
prev.extend(value.split())
else:
prev = data_files[key.strip()] = value.split()
elif firstline:
raise errors.DistutilsOptionError(
'malformed package_data first line %r (misses '
'"=")' % line)
else:
prev.extend(line.strip().split())
firstline = False
if arg == 'data_files':
# the data_files value is a pointlessly different structure
# from the package_data value
data_files = data_files.items()
in_cfg_value = data_files
elif arg == 'cmdclass':
cmdclass = {}
dist = st_dist.Distribution()
for cls_name in in_cfg_value:
cls = resolve_name(cls_name)
cmd = cls(dist)
cmdclass[cmd.get_command_name()] = cls
in_cfg_value = cmdclass
kwargs[arg] = in_cfg_value
# Transform requirements with embedded environment markers to
# setuptools' supported marker-per-requirement format.
#
# install_requires are treated as a special case of extras, before
# being put back in the expected place
#
# fred =
# foo:marker
# bar
# -> {'fred': ['bar'], 'fred:marker':['foo']}
if 'extras' in config:
requirement_pattern = (
r'(?P<package>[^:]*):?(?P<env_marker>[^#]*?)(?:\s*#.*)?$')
extras = config['extras']
# Add contents of test-requirements, if any, into an extra named
# 'test' if one does not already exist.
if 'test' not in extras:
from pbr import packaging
extras['test'] = "\n".join(packaging.parse_requirements(
packaging.TEST_REQUIREMENTS_FILES)).replace(';', ':')
for extra in extras:
extra_requirements = []
requirements = split_multiline(extras[extra])
for requirement in requirements:
m = re.match(requirement_pattern, requirement)
extras_value = m.group('package').strip()
env_marker = m.group('env_marker')
extra_requirements.append((extras_value, env_marker))
all_requirements[extra] = extra_requirements
# Transform the full list of requirements into:
# - install_requires, for those that have no extra and no
# env_marker
# - named extras, for those with an extra name (which may include
# an env_marker)
# - and as a special case, install_requires with an env_marker are
# treated as named extras where the name is the empty string
extras_require = {}
for req_group in all_requirements:
for requirement, env_marker in all_requirements[req_group]:
if env_marker:
extras_key = '%s:(%s)' % (req_group, env_marker)
# We do not want to poison wheel creation with locally
# evaluated markers. sdists always re-create the egg_info
# and as such do not need guarded, and pip will never call
# multiple setup.py commands at once.
if 'bdist_wheel' not in script_args:
try:
if pkg_resources.evaluate_marker('(%s)' % env_marker):
extras_key = req_group
except SyntaxError:
log.error(
"Marker evaluation failed, see the following "
"error. For more information see: "
"http://docs.openstack.org/"
"pbr/latest/user/using.html#environment-markers"
)
raise
else:
extras_key = req_group
extras_require.setdefault(extras_key, []).append(requirement)
kwargs['install_requires'] = extras_require.pop('', [])
kwargs['extras_require'] = extras_require
return kwargs
def register_custom_compilers(config):
"""Handle custom compilers.
This has no real equivalent in distutils, where additional compilers could
only be added programmatically, so we have to hack it in somehow.
"""
compilers = has_get_option(config, 'global', 'compilers')
if compilers:
compilers = split_multiline(compilers)
for compiler in compilers:
compiler = resolve_name(compiler)
# In distutils2 compilers these class attributes exist; for
# distutils1 we just have to make something up
if hasattr(compiler, 'name'):
name = compiler.name
else:
name = compiler.__name__
if hasattr(compiler, 'description'):
desc = compiler.description
else:
desc = 'custom compiler %s' % name
module_name = compiler.__module__
# Note; this *will* override built in compilers with the same name
# TODO(embray): Maybe display a warning about this?
cc = distutils.ccompiler.compiler_class
cc[name] = (module_name, compiler.__name__, desc)
# HACK!!!! Distutils assumes all compiler modules are in the
# distutils package
sys.modules['distutils.' + module_name] = sys.modules[module_name]
def get_extension_modules(config):
"""Handle extension modules"""
EXTENSION_FIELDS = ("sources",
"include_dirs",
"define_macros",
"undef_macros",
"library_dirs",
"libraries",
"runtime_library_dirs",
"extra_objects",
"extra_compile_args",
"extra_link_args",
"export_symbols",
"swig_opts",
"depends")
ext_modules = []
for section in config:
if ':' in section:
labels = section.split(':', 1)
else:
# Backwards compatibility for old syntax; don't use this though
labels = section.split('=', 1)
labels = [l.strip() for l in labels]
if (len(labels) == 2) and (labels[0] == 'extension'):
ext_args = {}
for field in EXTENSION_FIELDS:
value = has_get_option(config, section, field)
# All extension module options besides name can have multiple
# values
if not value:
continue
value = split_multiline(value)
if field == 'define_macros':
macros = []
for macro in value:
macro = macro.split('=', 1)
if len(macro) == 1:
macro = (macro[0].strip(), None)
else:
macro = (macro[0].strip(), macro[1].strip())
macros.append(macro)
value = macros
ext_args[field] = value
if ext_args:
if 'name' not in ext_args:
ext_args['name'] = labels[1]
ext_modules.append(extension.Extension(ext_args.pop('name'),
**ext_args))
return ext_modules
def get_entry_points(config):
"""Process the [entry_points] section of setup.cfg.
Processes setup.cfg to handle setuptools entry points. This is, of course,
not a standard feature of distutils2/packaging, but as there is not
currently a standard alternative in packaging, we provide support for them.
"""
if 'entry_points' not in config:
return {}
return dict((option, split_multiline(value))
for option, value in config['entry_points'].items())
def has_get_option(config, section, option):
if section in config and option in config[section]:
return config[section][option]
else:
return False
def split_multiline(value):
"""Special behaviour when we have a multi line options"""
value = [element for element in
(line.strip() for line in value.split('\n'))
if element and not element.startswith('#')]
return value
def split_csv(value):
"""Special behaviour when we have a comma separated options"""
value = [element for element in
(chunk.strip() for chunk in value.split(','))
if element]
return value
# The following classes are used to hack Distribution.command_options a bit
class DefaultGetDict(defaultdict):
"""Like defaultdict, but get() also sets and returns the default value."""
def get(self, key, default=None):
if default is None:
default = self.default_factory()
return super(DefaultGetDict, self).setdefault(key, default)
|
ctrlaltdel/neutrinator
|
vendor/pbr/util.py
|
Python
|
gpl-3.0
| 23,333
|
r"""
**openpnm.materials**
----
This module provides a library of preconfigured Network-Geometry combinations.
In most case the topology and geometry cannot be considered in isolation.
This module provides recipes that create both the Network and Geometry objects
simultaneously to ensure sensible correspondance between things like lattice
spacing and pore sizes. Some of the classes in this module have a signficant
amount of custom code (e.g. ``VoronoiFibers``), while others are simple
recipes that combine existing models in OpenPNM (e.g. ``BereaCubic``).
The table below gives a list of available Material generators:
+---------------------+-------------------------------------------------------+
| Material Name | Description |
+=====================+=======================================================+
| VoronoiFibers | Resembles a fibrous paper or mat with straight |
| | intersecting fibers. |
+---------------------+-------------------------------------------------------+
"""
from .VoronoiFibers import VoronoiFibers
from .BundleOfTubes import BundleOfTubes
|
TomTranter/OpenPNM
|
openpnm/materials/__init__.py
|
Python
|
mit
| 1,200
|
"""Find modules used by a script, using introspection."""
import dis
import imp
import importlib.machinery
import marshal
import os
import sys
import types
import struct
# XXX Clean up once str8's cstor matches bytes.
LOAD_CONST = bytes([dis.opname.index('LOAD_CONST')])
IMPORT_NAME = bytes([dis.opname.index('IMPORT_NAME')])
STORE_NAME = bytes([dis.opname.index('STORE_NAME')])
STORE_GLOBAL = bytes([dis.opname.index('STORE_GLOBAL')])
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = bytes([dis.HAVE_ARGUMENT])
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError("Bad magic number in %s" % pathname)
fp.read(8) # Skip mtime and size.
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = bytes([code[0]])
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_code(self, co, m):
code = co.co_code
scanner = self.scan_opcodes_25
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
|
timm/timmnix
|
pypy3-v5.5.0-linux64/lib-python/3/modulefinder.py
|
Python
|
mit
| 23,198
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import contextlib
import warnings
from ...tests.helper import catch_warnings
@contextlib.contextmanager
def ignore_non_integer_warning():
# We need to ignore this warning on Scipy < 0.14.
# When our minimum version of Scipy is bumped up, this can be
# removed.
with catch_warnings():
warnings.filterwarnings(
"always", "using a non-integer number instead of an integer "
"will result in an error in the future", DeprecationWarning)
yield
|
DougBurke/astropy
|
astropy/modeling/tests/utils.py
|
Python
|
bsd-3-clause
| 586
|
#!/usr/bin/env python
import unittest
from ct.crypto import error
from ct.crypto.asn1 import x509_time
class TimeTest(unittest.TestCase):
def verify_time(self, time_struct, year, month, day, hour, minute, sec):
self.assertEqual(year, time_struct.tm_year)
self.assertEqual(month, time_struct.tm_mon)
self.assertEqual(day, time_struct.tm_mday)
self.assertEqual(hour, time_struct.tm_hour)
self.assertEqual(minute, time_struct.tm_min)
self.assertEqual(sec, time_struct.tm_sec)
def test_time(self):
t = x509_time.UTCTime(value="130822153902Z").gmtime()
self.verify_time(t, 2013, 8, 22, 15, 39, 2)
t = x509_time.GeneralizedTime(value="20130822153902Z").gmtime()
self.verify_time(t, 2013, 8, 22, 15, 39, 2)
def test_utc_time_1900(self):
t = x509_time.UTCTime(value="500822153902Z").gmtime()
self.verify_time(t, 1950, 8, 22, 15, 39, 2)
def test_time_invalid(self):
self.assertRaises(error.ASN1Error, x509_time.UTCTime,
value="131322153902Z")
self.assertRaises(error.ASN1Error, x509_time.UTCTime,
value="201301322153902Z")
t = x509_time.UTCTime(value="131322153902Z", strict=False)
self.assertRaises(error.ASN1Error, t.gmtime)
t = x509_time.UTCTime(value="201301322153902Z", strict=False)
self.assertRaises(error.ASN1Error, t.gmtime)
def test_time_no_seconds(self):
t = x509_time.UTCTime(value="0001010000Z").gmtime()
self.verify_time(t, 2000, 1, 1, 0, 0, 0)
def test_time_alt_gmt(self):
t = x509_time.UTCTime(value="121214093107+0000").gmtime()
self.verify_time(t, 2012, 12, 14, 9, 31, 7)
def test_time_alt_tz(self):
"""
Test parsing a timezone with old +HHMM offset format
Right now, it is ignored.
"""
t = x509_time.UTCTime(value="121214093107+1234").gmtime()
self.verify_time(t, 2012, 12, 14, 9, 31, 7)
def test_time_missing_z(self):
self.assertRaises(x509_time.UTCTime, value="130822153902", strict=True)
t2 = x509_time.UTCTime(value="130822153902", strict=False).gmtime()
self.verify_time(t2, 2013, 8, 22, 15, 39, 2)
if __name__ == "__main__":
unittest.main()
|
php-coder/origin
|
vendor/github.com/google/certificate-transparency/python/ct/crypto/asn1/x509_time_test.py
|
Python
|
apache-2.0
| 2,308
|
"""
Find the nth digit of the infinite integer sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ...
Note:
n is positive and will fit within the range of a 32-bit signed integer (n < 231).
Example 1:
Input:
3
Output:
3
Example 2:
Input:
11
Output:
0
Explanation:
The 11th digit of the sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... is a 0, which is part of the number 10.
"""
__author__ = 'Daniel'
class Solution(object):
def findNthDigit(self, n):
"""
Math, quotient and remainder
:type n: int
:rtype: int
"""
digit_cnt = 1
num_cnt = 9
while n > digit_cnt * num_cnt:
n -= digit_cnt * num_cnt
digit_cnt += 1
num_cnt *= 10
n -= 1 # debugging: without -1, it just pass over the target digit
q, r = n / digit_cnt, n % digit_cnt
target = num_cnt / 9 + q
return int(str(target)[r])
|
algorhythms/LeetCode
|
400 Nth Digit.py
|
Python
|
mit
| 919
|
blastn=open('LTRs_5copies.txt', 'r')
blastx=open('LTR.blastx.copies.txt', 'r')
biglist=[]
for line in blastn:
line=line.strip()
biglist.append(line)
for elto in blastx:
elto=elto.strip()
biglist.append(elto)
unicos=set(biglist)
for item in unicos:
print item
|
stajichlab/localizaTE
|
scripts/joinlists.py
|
Python
|
mit
| 270
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp import api, fields, models, _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
class SaleAdvancePaymentInv(models.TransientModel):
_name = "sale.advance.payment.inv"
_description = "Sales Advance Payment Invoice"
@api.model
def _count(self):
return len(self._context.get('active_ids', []))
@api.model
def _get_advance_payment_method(self):
if self._count() == 1:
sale_obj = self.env['sale.order']
order = sale_obj.browse(self._context.get('active_ids'))[0]
if all([line.product_id.invoice_policy == 'order' for line in order.order_line]):
return 'all'
return 'delivered'
advance_payment_method = fields.Selection([
('delivered', 'Invoiceable lines'),
('all', 'Invoiceable lines (deduct down payments)'),
('percentage', 'Down payment (percentage)'),
('fixed', 'Down payment (fixed amount)')
], string='What do you want to invoice?', default=_get_advance_payment_method, required=True)
product_id = fields.Many2one('product.product', string='Down Payment Product', domain=[('type', '=', 'service')],\
default=lambda self: self.env['ir.values'].get_default('sale.config.settings', 'deposit_product_id_setting'))
count = fields.Integer(default=_count, string='# of Orders')
amount = fields.Float('Down Payment Amount', digits=dp.get_precision('Account'), help="The amount to be invoiced in advance, taxes excluded.")
deposit_account_id = fields.Many2one("account.account", string="Income Account", domain=[('deprecated', '=', False)],\
help="Account used for deposits")
deposit_taxes_id = fields.Many2many("account.tax", string="Customer Taxes", help="Taxes used for deposits")
@api.onchange('advance_payment_method')
def onchange_advance_payment_method(self):
if self.advance_payment_method == 'percentage':
return {'value': {'amount':0, 'product_id':False}}
return {}
@api.multi
def _create_invoice(self, order, so_line, amount):
inv_obj = self.env['account.invoice']
ir_property_obj = self.env['ir.property']
account_id = False
if self.product_id.id:
account_id = self.product_id.property_account_income_id.id
if not account_id:
prop = ir_property_obj.get('property_account_income_categ_id', 'product.category')
prop_id = prop and prop.id or False
account_id = order.fiscal_position_id.map_account(prop_id)
if not account_id:
raise UserError(
_('There is no income account defined for this product: "%s". You may have to install a chart of account from Accounting app, settings menu.') % \
(self.product_id.name,))
if self.amount <= 0.00:
raise UserError(_('The value of the down payment amount must be positive.'))
if self.advance_payment_method == 'percentage':
amount = order.amount_untaxed * self.amount / 100
name = _("Down payment of %s%%") % (self.amount,)
else:
amount = self.amount
name = _('Down Payment')
invoice = inv_obj.create({
'name': order.client_order_ref or order.name,
'origin': order.name,
'type': 'out_invoice',
'reference': False,
'account_id': order.partner_id.property_account_receivable_id.id,
'partner_id': order.partner_invoice_id.id,
'invoice_line_ids': [(0, 0, {
'name': name,
'origin': order.name,
'account_id': account_id,
'price_unit': amount,
'quantity': 1.0,
'discount': 0.0,
'uom_id': self.product_id.uom_id.id,
'product_id': self.product_id.id,
'sale_line_ids': [(6, 0, [so_line.id])],
'invoice_line_tax_ids': [(6, 0, [x.id for x in self.product_id.taxes_id])],
'account_analytic_id': order.project_id.id or False,
})],
'currency_id': order.pricelist_id.currency_id.id,
'payment_term_id': order.payment_term_id.id,
'fiscal_position_id': order.fiscal_position_id.id or order.partner_id.property_account_position_id.id,
'team_id': order.team_id.id,
})
invoice.compute_taxes()
return invoice
@api.multi
def create_invoices(self):
sale_orders = self.env['sale.order'].browse(self._context.get('active_ids', []))
if self.advance_payment_method == 'delivered':
sale_orders.action_invoice_create()
elif self.advance_payment_method == 'all':
sale_orders.action_invoice_create(final=True)
else:
# Create deposit product if necessary
if not self.product_id:
vals = self._prepare_deposit_product()
self.product_id = self.env['product.product'].create(vals)
self.env['ir.values'].sudo().set_default('sale.config.settings', 'deposit_product_id_setting', self.product_id.id)
sale_line_obj = self.env['sale.order.line']
for order in sale_orders:
if self.advance_payment_method == 'percentage':
amount = order.amount_untaxed * self.amount / 100
else:
amount = self.amount
if self.product_id.invoice_policy != 'order':
raise UserError(_('The product used to invoice a down payment should have an invoice policy set to "Ordered quantities". Please update your deposit product to be able to create a deposit invoice.'))
if self.product_id.type != 'service':
raise UserError(_("The product used to invoice a down payment should be of type 'Service'. Please use another product or update this product."))
so_line = sale_line_obj.create({
'name': _('Advance: %s') % (time.strftime('%m %Y'),),
'price_unit': amount,
'product_uom_qty': 0.0,
'order_id': order.id,
'discount': 0.0,
'product_uom': self.product_id.uom_id.id,
'product_id': self.product_id.id,
'tax_id': [(6, 0, self.product_id.taxes_id.ids)],
})
self._create_invoice(order, so_line, amount)
if self._context.get('open_invoices', False):
return sale_orders.action_view_invoice()
return {'type': 'ir.actions.act_window_close'}
def _prepare_deposit_product(self):
return {
'name': 'Down payment',
'type': 'service',
'invoice_policy': 'order',
'property_account_income_id': self.deposit_account_id.id,
'taxes_id': [(6, 0, self.deposit_taxes_id.ids)],
}
|
QinerTech/QinerApps
|
openerp/addons/sale/wizard/sale_make_invoice_advance.py
|
Python
|
gpl-3.0
| 7,101
|
# Make Makefile
#
# Jul 28, 2005
# Markus Chimani, markus.chimani@cs.uni-dortmund.de
#########################################################
import os, sys, fnmatch, ConfigParser
class versionclass:
def call(self):
return '$(' + self.var + ')'
def library(self):
return self.call() + '/' + libName
def objects(self):
return '$(' +self.var + '_OBJS)'
def path(self):
return '_' + self.var
def bailout(msg):
print msg
print 'Please use the original makeMakefile.config as a template'
sys.exit()
def loadConfig(sect, key, noError = False ):
if config.has_option(sect, key):
v = config.get(sect, key)
print ' [', sect, ']', key, '=', v
return v
else:
if noError:
return None
else:
bailout('Option "' + key + '" in section "' + sect + '" is missing')
#########################################################
# LOAD CONFIGURATION
config = ConfigParser.ConfigParser()
print 'Loading makeMakefile.config...'
try:
config.readfp( open('makeMakefile.config') )
except IOError:
bailout('makeMakefile.config not found')
if not config.has_section('GENERAL'):
bailout('Section "GENERAL" is missing')
if not config.has_section('VERSIONS'):
bailout('Section "VERSIONS" is missing')
if not config.has_section('COIN'):
bailout('Section "COIN" is missing')
if not config.has_section('ABACUS'):
bailout('Section "ABACUS" is missing')
libName = loadConfig('GENERAL', 'libName')
compilerCommand = loadConfig('GENERAL', 'compilerCommand')
compilerParams = loadConfig('GENERAL', 'compilerParams')
libCommand = loadConfig('GENERAL', 'libCommand')
rmCommand = loadConfig('GENERAL', 'rmCommand')
mkdirCommand = loadConfig('GENERAL', 'mkdirCommand')
includeLegacyCode = loadConfig('GENERAL', 'includeLegacyCode').startswith('t')
useOwnLpSolver = loadConfig('GENERAL', 'useOwnLpSolver').startswith('t')
gccMessageLength = loadConfig('GENERAL', 'gccMessageLength', True)
if gccMessageLength == None:
gccMessageLength = ''
else:
gccMessageLength = '-fmessage-length=' + gccMessageLength
compiler = ' '.join( [ compilerCommand, gccMessageLength, compilerParams, ' ' ] )
if useOwnLpSolver:
compiler = ' '.join( [compiler, '-DOGDF_OWN_LPSOLVER', ' '] )
useCoin = loadConfig('COIN', 'useCoin').startswith('t')
if useCoin:
coinIncl = loadConfig('COIN', 'coinIncl')
# coinLib = loadConfig('COIN', 'coinLib')
solver_name = loadConfig('COIN', 'solver_name')
solver_incl = loadConfig('COIN', 'solver_incl')
# solver_lib = loadConfig('COIN', 'solver_lib')
si2 = ' '
if solver_incl.strip() != '':
si2 = '-I'+solver_incl
compiler = ' '.join( [ compiler, '-I'+coinIncl, si2, '-D'+solver_name, '-DUSE_COIN', ' ' ] )
useAbacus = loadConfig('ABACUS', 'useAbacus').startswith('t')
if useAbacus:
abacusDef = loadConfig('ABACUS', 'abacusDef')
abacusIncl = loadConfig('ABACUS', 'abacusIncl')
# abacusLib = loadConfig('ABACUS', 'abacusLib')
compiler = ' '.join( [ compiler, abacusDef, '-I'+abacusIncl, '-DUSE_ABACUS', ' ' ] )
versions = []
V = config.items('VERSIONS')
if len(V) == 0:
bailout('Versions missing')
else:
for ve in V:
v = versionclass()
v.var, v.params = ve
print ' [ VERSIONS ] Name:', v.var, ', Cmd:',v.params
versions.append(v)
print 'Resulting compiler call:', compiler
print 'Finished loading makeMakefile.config'
#########################################################
# ANALYZE & GENERATE
print 'Analyzing sources & generating Makefile...'
makefile = open('Makefile','w')
# add header
header = open('Makefile.header')
headercontent = header.read()
header.close()
makefile.write(headercontent)
# define release & debug
for v in versions:
makefile.write(v.var + ' = ' + v.path() + '\n')
makefile.write('\n');
# just the def. nothing happens yet
def Walk( curdir ):
objs = []
names = os.listdir( curdir)
names.sort()
for name in names:
if name.startswith('.') or name.startswith('_') or (name=='legacy' and not includeLegacyCode):
continue
fullname = os.path.normpath(os.path.join(curdir, name))
if os.path.isdir(fullname) and not os.path.islink(fullname):
objs = objs + Walk( fullname )
else:
for pat in [ '*.c', '*.cpp' ]:
if fnmatch.fnmatch(name, pat):
objfullname = fullname[:-len(pat)+2] + 'o'
objs.append(objfullname)
callForDeps = callForDepsBase + fullname + ' > targetAndDepend'
os.system( callForDeps )
t = open('targetAndDepend')
targetAndDepend = t.read()
t.close()
for v in versions:
# print target&depend: add full path spec, incl. version & ignore extra line
path = v.call() + '/' +fullname[:-len(name)]
makefile.write(path + targetAndDepend[:-1] + '\n')
# ensure folder
makefile.write('\t' + mkdirCommand + ' ' + v.call() + '/' + fullname[:-len(name)-1] + '\n')
# what to do: call the compiler
makefile.write('\t' + compiler + v.params + ' -o ' + v.call() + '/' + objfullname + ' -c ' + fullname + '\n\n')
# pattern found: don't try other suffix
break
return objs
callForDepsBase = compiler + ' -MM ';
if useCoin:
callForDepsBase += '-DUSE_COIN -D' + solver_name + ' '
if useAbacus:
callForDepsBase += '-DUSE_ABACUS -DABACUS_COMPILER_GCC '
# Call recursive function
objs = Walk( '.' )
# Clean up
os.system(rmCommand + ' targetAndDepend')
# List all Objs for use in lib-generation and clear
for v in versions:
makefile.write(v.objects()[2:-1] + ' = \\\n')
for o in objs:
makefile.write(v.call() + '/' + o + ' \\\n')
makefile.write('\n')
# generate alls and cleans etc...
for v in versions:
makefile.write(v.var + ': ' + v.library() + '\n\n')
makefile.write(v.library() + ': ' + v.objects() + '\n')
makefile.write('\t' + libCommand + ' -r ' + v.library() + ' ' + v.objects() + ' $(LIBS)\n\n')
makefile.write('clean' + v.var + ':\n')
# makefile.write('\t' + rmCommand + ' ' + v.objects() + ' ' + v.library() + '\n\n')
makefile.write('\t' + rmCommand + ' ' + v.path() + '\n\n')
makefile.close()
print 'Makefile generated'
|
kdbanman/browseRDF
|
tulip-3.8.0-src/thirdparty/OGDF/makeMakefile.py
|
Python
|
gpl-3.0
| 5,988
|
from math import factorial
from itertools import permutations
import multiprocessing
class Puzzle:
def __init__(self, raw_puzzle):
self.raw_puzzle = raw_puzzle
self.rows = []
self.columns = []
self.boxes = []
self.valid = True
if self.check_raw_puzzle() != True:
self.valid = False
print "[-] Failed raw puzzle check"
return
self.populate()
if check_puzzle(self) != True:
self.valid = False
print "[-] Failed global puzzle check"
return
print "[+] Puzzle valid & populated!"
def check_raw_puzzle(self):
if len(self.raw_puzzle) != 9:
return False
for row in self.raw_puzzle:
if len(row) != 9:
return False
return True
def populate(self):
for row in self.raw_puzzle:
i_row = []
for i in row:
try:
i_row.append(int(i))
except:
i_row.append(i)
self.rows.append(i_row)
for i in range(9):
col = []
for row in self.rows:
col.append(row[i])
self.columns.append(col)
for j in range(3):
for i in range(3):
box = []
for k in range(9):
x = (k % 3) + (i * 3)
y = int(k / 3) + (j * 3)
cell = self.rows[y][x]
box.append(cell)
self.boxes.append(box)
def repopulate(self):
self.columns = []
for i in range(9):
col = []
for row in self.rows:
col.append(row[i])
self.columns.append(col)
self.boxes = []
for j in range(3):
for i in range(3):
box = []
for k in range(9):
x = (k % 3) + (i * 3)
y = int(k / 3) + (j * 3)
cell = self.rows[y][x]
box.append(cell)
self.boxes.append(box)
def debugPrint(self):
for r in self.rows:
print r
print ''
class MultiSolver:
def __init__(self, puzzle):
self.puzzle = puzzle
self.valid_possibilities = []
self.initialize()
def initialize(self):
pass
def check_row_1(self, row):
guess = row_guesses.get_permutation()
while guess != None:
row_guesses.make_attempt(guess)
|
thaReal/sodoku
|
src/multisolver.py
|
Python
|
mit
| 1,965
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import time
import types
import openerp
import openerp.modules.registry
from openerp import SUPERUSER_ID
from openerp import netsvc, pooler, tools
from openerp.osv import fields,osv
from openerp.osv.orm import Model
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools import config
from openerp.tools.translate import _
from openerp.osv.orm import except_orm, browse_record, MAGIC_COLUMNS
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool.get("ir.module.module")
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if self.pool.get(model.model):
res[model.id] = self.pool.get(model.model).is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool.get("ir.ui.view").search(cr, uid, [('model', '=', model.model)])
return res
_columns = {
'name': fields.char('Model Description', size=64, translate=True, required=True),
'model': fields.char('Model', size=64, required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type',readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', size=128, string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool.get(model.model)
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
pooler.restart_pool(cr.dbname)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
self.instanciate(cr, user, vals['model'], context)
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
self.pool.get(vals['model'])._auto_init(cr, ctx)
self.pool.get(vals['model'])._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
class x_custom_model(osv.osv):
_custom = True
x_custom_model._name = model
x_custom_model._module = False
a = x_custom_model.create_instance(self.pool, cr)
if not a._columns:
x_name = 'id'
elif 'x_name' in a._columns.keys():
x_name = 'x_name'
else:
x_name = a._columns.keys()[0]
x_custom_model._rec_name = x_name
a._rec_name = x_name
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_columns = {
'name': fields.char('Name', required=True, size=64, select=1),
'model': fields.char('Object Name', size=64, required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation', size=64,
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field', size=64,
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True, size=256),
'ttype': fields.selection(_get_fields_type, 'Field Type',size=64, required=True),
'selection': fields.char('Selection Options',size=128, help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade','Cascade'),('set null','Set NULL')], 'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', size=256, help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'view_load': fields.boolean('View Auto-Load'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', size=128, string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'view_load': 0,
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool.get(field.model)
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
model._columns.pop(field.name, None)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
cr.commit()
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool.get('ir.model').browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool.get('ir.model').search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if self.pool.get(vals['model']):
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
self.pool[vals['model']]._rec_name = 'x_name'
self.pool.get(vals['model']).__init__(self.pool, cr)
#Added context to _auto_init for special treatment to custom field for select_level
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
self.pool.get(vals['model'])._auto_init(cr, ctx)
self.pool.get(vals['model'])._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
column_rename = None # if set, *one* column can be renamed here
obj = None
models_patch = {} # structs of (obj, [(field, prop, change_to),..])
# data to be updated on the orm model
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', '_domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('view_load', 'view_load', bool),
('selectable', 'selectable', bool),
('select_level', 'select', int),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
if not (obj and obj._name == item.model):
obj = self.pool.get(item.model)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj:
models_patch.setdefault(obj._name, (obj,[]))
# find out which properties (per model) we need to update
for field_name, field_property, set_fn in model_props:
if field_name in vals:
property_value = set_fn(vals[field_name])
if getattr(obj._columns[item.name], field_property) != property_value:
models_patch[obj._name][1].append((final_name, field_property, property_value))
# our dict is ready here, but no properties are changed so far
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % column_rename[1])
# This is VERY risky, but let us have this feature:
# we want to change the key of column in obj._columns dict
col = column_rename[0]._columns.pop(column_rename[1][1]) # take object out, w/o copy
column_rename[0]._columns[column_rename[1][2]] = col
if models_patch:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context, select=vals.get('select_level', '0'),
update_custom_fields=True)
for __, patch_struct in models_patch.items():
obj = patch_struct[0]
for col_name, col_prop, val in patch_struct[1]:
setattr(obj._columns[col_name], col_prop, val)
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, size=128, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool.get('ir.model.access').check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool.get(model)
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, size=128, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool.get('ir.model.access').check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', size=64, required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, browse_record):
assert model._table_name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, browse_record):
assert model._table_name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if not self.pool.get(model_name):
_logger.error('Missing model %s' % (model_name, ))
elif self.pool.get(model_name).is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise except_orm(_('Access Denied'), msg % msg_params)
return r or False
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
object_ = self.pool.get(model)
if object_:
getattr(object_, method)()
#
# Check rights on actions
#
def write(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, *args, **argv)
return res
def create(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, *args, **argv)
return res
def unlink(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, *args, **argv)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def _display_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
result2 = {}
for res in self.browse(cr, uid, ids, context=context):
if res.id:
result.setdefault(res.model, {})
result[res.model][res.res_id] = res.id
result2[res.id] = False
for model in result:
try:
r = dict(self.pool.get(model).name_get(cr, uid, result[model].keys(), context=context))
for key,val in result[model].items():
result2[val] = r.get(key, False)
except:
# some object have no valid name_get implemented, we accept this
pass
return result2
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, size=128, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'display_name': fields.function(_display_name_get, type='char', string='Record Name'),
'model': fields.char('Model Name', required=True, size=64, select=1),
'module': fields.char('Module', required=True, size=64, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'date_update': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
self.doinit = True
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
self.loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
@tools.ormcache()
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
ids = self.search(cr, uid, [('module','=',module), ('name','=', xml_id)])
if not ids:
raise ValueError('No such external ID currently defined in the system: %s.%s' % (module, xml_id))
# the sql constraints ensure us we have only one result
return ids[0]
@tools.ormcache()
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
data_id = self._get_id(cr, uid, module, xml_id)
res = self.read(cr, uid, data_id, ['model', 'res_id'])
if not res['res_id']:
raise ValueError('No such external ID currently defined in the system: %s.%s' % (module, xml_id))
return res['model'], res['res_id']
def get_object(self, cr, uid, module, xml_id, context=None):
"""Returns a browsable record for the given module name and xml_id or raise ValueError if not found"""
res_model, res_id = self.get_object_reference(cr, uid, module, xml_id)
result = self.pool.get(res_model).browse(cr, uid, res_id, context=context)
if not result.exists():
raise ValueError('No record found for unique ID %s.%s. It may have been deleted.' % (module, xml_id))
return result
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
try:
id = self.read(cr, uid, [self._get_id(cr, uid, module, xml_id)], ['res_id'])[0]['res_id']
self.loads[(module,xml_id)] = (model,id)
except:
id = False
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self._get_id.clear_cache(self)
self.get_object_reference.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool.get(model)
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
if (not xml_id) and (not self.doinit):
return False
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model in results:
if not real_id2:
self._get_id.clear_cache(self, uid, module, xml_id)
self.get_object_reference.clear_cache(self, uid, module, xml_id)
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, res_id,
[inherit_field])[inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
if not res:
ir_values_obj = pooler.get_pool(cr.dbname).get('ir.values')
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool.get('ir.model.access').check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
wf_service = netsvc.LocalService("workflow")
for model,res_id in wkf_todo:
try:
wf_service.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool.get(model).unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool.get('ir.model.relation')
ir_module_module = self.pool.get('ir.module.module')
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)])
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules:
return True
to_unlink = []
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s
ORDER BY id DESC""",
(tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module,name) not in self.loads:
to_unlink.append((model,res_id))
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if self.pool.get(model):
_logger.info('Deleting %s@%s', res_id, model)
self.pool.get(model).unlink(cr, uid, [res_id])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gqwest-erp/server
|
openerp/addons/base/ir/ir_model.py
|
Python
|
agpl-3.0
| 55,319
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog for editing the IRC server configuration.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from .Ui_IrcServerEditDialog import Ui_IrcServerEditDialog
class IrcServerEditDialog(QDialog, Ui_IrcServerEditDialog):
"""
Class implementing a dialog for editing the IRC server configuration.
"""
def __init__(self, server, parent=None):
"""
Constructor
@param server reference to the IRC server object (IrcServer)
@param parent reference to the parent widget (QWidget)
"""
super(IrcServerEditDialog, self).__init__(parent)
self.setupUi(self)
self.__okButton = self.buttonBox.button(QDialogButtonBox.Ok)
if server:
self.serverEdit.setText(server.getName())
self.portSpinBox.setValue(server.getPort())
self.passwordEdit.setText(server.getPassword())
self.sslCheckBox.setChecked(server.useSSL())
self.__updateOkButton()
msh = self.minimumSizeHint()
self.resize(max(self.width(), msh.width()), msh.height())
def __updateOkButton(self):
"""
Private method to update the OK button state.
"""
self.__okButton.setEnabled(self.serverEdit.text() != "")
@pyqtSlot(str)
def on_serverEdit_textChanged(self, name):
"""
Private slot handling changes of the server name.
@param name current name of the server (string)
"""
self.__updateOkButton()
def getServer(self):
"""
Public method to create a server object from the data entered into
the dialog.
@return server object (IrcServer)
"""
from .IrcNetworkManager import IrcServer
server = IrcServer(self.serverEdit.text())
server.setPort(self.portSpinBox.value())
server.setPassword(self.passwordEdit.text())
server.setUseSSL(self.sslCheckBox.isChecked())
return server
|
testmana2/test
|
Network/IRC/IrcServerEditDialog.py
|
Python
|
gpl-3.0
| 2,240
|
__author__ = 'labx'
|
lucarebuffi/OASYS1
|
oasys/menus/__init__.py
|
Python
|
gpl-3.0
| 20
|
# -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
""" Task """
import gettext
from inginious.common.base import id_checker
class Task(object):
""" Contains the data for a task """
def __init__(self, course, taskid, content, task_fs, hook_manager, task_problem_types):
"""
Init the task. course is a Course object, taskid the task id, and content is a dictionnary containing the data needed to initialize the Task object.
If init_data is None, the data will be taken from the course tasks' directory.
"""
self._course = course
self._taskid = taskid
self._fs = task_fs
self._hook_manager = hook_manager
self._data = content
self._environment = self._data.get('environment', None)
# Response is HTML
self._response_is_html = self._data.get("responseIsHTML", False)
# Limits
self._limits = {"time": 20, "memory": 1024, "disk": 1024}
if "limits" in self._data:
try:
self._limits['time'] = int(self._data["limits"].get("time", 20))
self._limits['hard_time'] = int(self._data["limits"].get("hard_time", 3 * self._limits['time']))
self._limits['memory'] = int(self._data["limits"].get("memory", 1024))
self._limits['disk'] = int(self._data["limits"].get("disk", 1024))
if self._limits['time'] <= 0 or self._limits['hard_time'] <= 0 or self._limits['memory'] <= 0 or self._limits['disk'] <= 0:
raise Exception("Invalid limit")
except:
raise Exception("Invalid limit")
if "problems" not in self._data:
raise Exception("Tasks must have some problems descriptions")
# Network access in grading container?
self._network_grading = self._data.get("network_grading", False)
# i18n
self._translations = {}
translations_fs = self._fs.from_subfolder("$i18n")
if translations_fs.exists():
for f in translations_fs.list(folders=False, files=True, recursive=False):
lang = f[0:len(f) - 3]
if translations_fs.exists(lang + ".mo"):
self._translations[lang] = gettext.GNUTranslations(translations_fs.get_fd(lang + ".mo"))
else:
self._translations[lang] = gettext.NullTranslations()
# Check all problems
self._problems = []
for problemid in self._data['problems']:
self._problems.append(self._create_task_problem(problemid, self._data['problems'][problemid], task_problem_types))
# Order
self._order = int(self._data.get('order', -1))
def gettext(self, language, *args, **kwargs):
translation = self._translations.get(language, gettext.NullTranslations())
return translation.gettext(*args, **kwargs)
def input_is_consistent(self, task_input, default_allowed_extension, default_max_size):
""" Check if an input for a task is consistent. Return true if this is case, false else """
for problem in self._problems:
if not problem.input_is_consistent(task_input, default_allowed_extension, default_max_size):
return False
return True
def get_order(self):
""" Get the position of this task in the course """
return self._order
def get_environment(self):
""" Returns the environment in which the agent have to launch this task"""
return self._environment
def get_id(self):
""" Get the id of this task """
return self._taskid
def get_problems(self):
""" Get problems contained in this task """
return self._problems
def get_course_id(self):
""" Return the courseid of the course that contains this task """
return self._course.get_id()
def get_course(self):
""" Return the course that contains this task """
return self._course
def get_limits(self):
""" Return the limits of this task """
vals = self._hook_manager.call_hook('task_limits', course=self.get_course(), task=self, default=self._limits)
return vals[0] if len(vals) else self._limits
def allow_network_access_grading(self):
""" Return True if the grading container should have access to the network """
vals = self._hook_manager.call_hook('task_network_grading', course=self.get_course(), task=self, default=self._network_grading)
return vals[0] if len(vals) else self._network_grading
def get_response_type(self):
""" Returns the method used to parse the output of the task: HTML or rst """
return "HTML" if self._response_is_html else "rst"
def get_fs(self):
""" Returns a FileSystemProvider which points to the folder of this task """
return self._fs
def use_linter_automatically(self):
""" Returns True if Linter is allowed to be used automatically in course settings """
course = self.get_course()
enable_linter_option = course.enable_automatic_linter()
return enable_linter_option
def check_answer(self, task_input, language):
"""
Verify the answers in task_input. Returns six values
1st: True the input is **currently** valid. (may become invalid after running the code), False else
2nd: True if the input needs to be run in the VM, False else
3rd: Main message, as a list (that can be join with \n or <br/> for example)
4th: Problem specific message, as a dictionnary (tuple of result/text)
5th: Number of subproblems that (already) contain errors. <= Number of subproblems
6th: Number of errors in MCQ problems. Not linked to the number of subproblems
"""
valid = True
need_launch = False
main_message = []
problem_messages = {}
error_count = 0
multiple_choice_error_count = 0
for problem in self._problems:
problem_is_valid, problem_main_message, problem_s_messages, problem_mc_error_count = problem.check_answer(task_input, language)
if problem_is_valid is None:
need_launch = True
elif problem_is_valid == False:
error_count += 1
valid = False
if problem_main_message is not None:
main_message.append(problem_main_message)
if problem_s_messages is not None:
problem_messages[problem.get_id()] = (("success" if problem_is_valid else "failed"), problem_s_messages)
multiple_choice_error_count += problem_mc_error_count
return valid, need_launch, main_message, problem_messages, error_count, multiple_choice_error_count
def _create_task_problem(self, problemid, problem_content, task_problem_types):
"""Creates a new instance of the right class for a given problem."""
# Basic checks
if not id_checker(problemid):
raise Exception("Invalid problem _id: " + problemid)
if problem_content.get('type', "") not in task_problem_types:
raise Exception("Invalid type for problem " + problemid)
return task_problem_types.get(problem_content.get('type', ""))(self, problemid, problem_content, self._translations)
|
JuezUN/INGInious
|
inginious/common/tasks.py
|
Python
|
agpl-3.0
| 7,488
|
from setuptools import setup, find_packages
setup(
name = 'athletic_pandas',
packages = find_packages(),
version = '0.8.0',
description = 'Workout analysis',
author='Aart Goossens',
author_email='aart@goossens.me',
url='https://github.com/AartGoossens/athletic_pandas',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
]
)
|
AartGoossens/athletic_pandas
|
setup.py
|
Python
|
mit
| 690
|
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='poloniex',
version='0.1',
packages=[
'poloniex',
'poloniex.wamp',
'poloniex.api'
],
include_package_data=True,
description='Python Poloniex API',
long_description=README,
url='https://github.com/absortium/poloniex.git',
author='Andrey Samokhvalov',
license='MIT',
author_email='andrew.shvv@gmail.com',
install_requires=[
'asyncio',
'aiohttp',
'autobahn',
'pp-ez',
'requests'
],
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
],
)
|
absortium/poloniex-api
|
setup.py
|
Python
|
mit
| 909
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2007 Donald N. Allingham, Martin Hawlisch
# Copyright (C) 2009 Douglas S. Blank
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
## Based on the normal fanchart
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Pango
from gi.repository import Gtk
import math
from gi.repository import Gdk
try:
import cairo
except ImportError:
pass
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.plug import Gramplet
from gramps.gen.errors import WindowActiveError
from gramps.gui.editors import EditPerson
from gramps.gui.widgets.fanchartdesc import (FanChartDescWidget, FanChartDescGrampsGUI,
ANGLE_WEIGHT)
from gramps.gui.widgets.fanchart import FORM_HALFCIRCLE, BACKGROUND_SCHEME1
class FanChartDescGramplet(FanChartDescGrampsGUI, Gramplet):
"""
The Gramplet code that realizes the FanChartWidget.
"""
def __init__(self, gui, nav_group=0):
Gramplet.__init__(self, gui, nav_group)
FanChartDescGrampsGUI.__init__(self, self.on_childmenu_changed)
self.maxgen = 6
self.background = BACKGROUND_SCHEME1
self.fonttype = 'Sans'
self.grad_start = '#0000FF'
self.grad_end = '#FF0000'
self.dupcolor = '#888A85' #light grey
self.generic_filter = None
self.alpha_filter = 0.2
self.form = FORM_HALFCIRCLE
self.angle_algo = ANGLE_WEIGHT
self.set_fan(FanChartDescWidget(self.dbstate, self.uistate, self.on_popup))
# Replace the standard textview with the fan chart widget:
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add_with_viewport(self.fan)
# Make sure it is visible:
self.fan.show()
def init(self):
self.set_tooltip(_("Click to expand/contract person\nRight-click for options\nClick and drag in open area to rotate"))
def active_changed(self, handle):
"""
Method called when active person changes.
"""
# Reset everything but rotation angle (leave it as is)
self.update()
def on_childmenu_changed(self, obj, person_handle):
"""Callback for the pulldown menu selection, changing to the person
attached with menu item."""
self.set_active('Person', person_handle)
return True
|
pmghalvorsen/gramps_branch
|
gramps/plugins/gramplet/fanchartdescgramplet.py
|
Python
|
gpl-2.0
| 3,450
|
class Solution:
def numTrees(self, n: int) -> int:
dp = [0] * (n + 1)
dp[0] = 1
for j in range(1, n + 1):
for i in range(j):
dp[j] += dp[i] * dp[j - i - 1]
return dp[n]
# Catalan Number
class Solution2:
def numTrees(self, n: int) -> int:
result = 1
for i in range(1, n + 1):
result *= n + i
result //= i
return result // (n + 1)
|
jiadaizhao/LeetCode
|
0001-0100/0096-Unique Binary Search Trees/0096-Unique Binary Search Trees.py
|
Python
|
mit
| 447
|
import unittest
from mox import MoxTestBase, IsA
from gevent.socket import socket
from slimta.smtp.datareader import DataReader
from slimta.smtp.io import IO
from slimta.smtp import ConnectionLost, MessageTooBig
class TestSmtpDataReader(MoxTestBase, unittest.TestCase):
def setUp(self):
super(TestSmtpDataReader, self).setUp()
self.sock = self.mox.CreateMock(socket)
self.sock.fileno = lambda: -1
def test_append_line(self):
dr = DataReader(None)
dr._append_line(b'asdf')
dr._append_line(b'jkl\r\n')
dr.i += 1
dr._append_line(b'qwerty')
self.assertEqual([b'asdfjkl\r\n', b'qwerty'], dr.lines)
def test_from_recv_buffer(self):
io = IO(None)
io.recv_buffer = b'test\r\ndata'
dr = DataReader(io)
dr.from_recv_buffer()
self.assertEqual([b'test\r\n', b'data'], dr.lines)
def test_handle_finished_line_EOD(self):
dr = DataReader(None)
dr.lines = [b'.\r\n']
dr.handle_finished_line()
self.assertEqual(0, dr.EOD)
def test_handle_finished_line_initial_period(self):
dr = DataReader(None)
dr.lines = [b'..stuff\r\n']
dr.handle_finished_line()
self.assertEqual(b'.stuff\r\n', dr.lines[0])
def test_add_lines(self):
dr = DataReader(None)
dr.add_lines(b'\r\ntwo\r\n.three\r\nfour')
self.assertEqual([b'\r\n', b'two\r\n', b'three\r\n', b'four'], dr.lines)
self.assertEqual(3, dr.i)
self.assertEqual(None, dr.EOD)
def test_recv_piece(self):
self.sock.recv(IsA(int)).AndReturn(b'one\r\ntwo')
self.sock.recv(IsA(int)).AndReturn(b'\r\nthree\r\n.\r\nstuff\r\n')
self.mox.ReplayAll()
dr = DataReader(IO(self.sock))
self.assertTrue(dr.recv_piece())
self.assertFalse(dr.recv_piece())
self.assertEqual([b'one\r\n', b'two\r\n', b'three\r\n',
b'.\r\n', b'stuff\r\n', b''], dr.lines)
self.assertEqual(3, dr.EOD)
self.assertEqual(5, dr.i)
def test_recv_piece_already_eod(self):
dr = DataReader(None)
dr.EOD = 2
self.assertFalse(dr.recv_piece())
def test_recv_piece_connectionlost(self):
self.sock.recv(IsA(int)).AndReturn(b'')
self.mox.ReplayAll()
dr = DataReader(IO(self.sock))
self.assertRaises(ConnectionLost, dr.recv_piece)
def test_recv_piece_messagetoobig(self):
self.sock.recv(IsA(int)).AndReturn(b'1234567890')
self.mox.ReplayAll()
dr = DataReader(IO(self.sock), 9)
self.assertRaises(MessageTooBig, dr.recv_piece)
def test_return_all(self):
io = IO(None)
dr = DataReader(io)
dr.lines = [b'one\r\n', b'two\r\n', b'.\r\n', b'three\r\n']
dr.EOD = 2
self.assertEqual(b'one\r\ntwo\r\n', dr.return_all())
self.assertEqual(b'three\r\n', io.recv_buffer)
def test_recv(self):
self.sock.recv(IsA(int)).AndReturn(b'\r\nthree\r\n')
self.sock.recv(IsA(int)).AndReturn(b'.\r\nstuff\r\n')
self.mox.ReplayAll()
io = IO(self.sock)
io.recv_buffer = b'one\r\ntwo'
dr = DataReader(io)
self.assertEqual(b'one\r\ntwo\r\nthree\r\n', dr.recv())
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
slimta/python-slimta
|
test/test_slimta_smtp_datareader.py
|
Python
|
mit
| 3,309
|
from flask import Flask, url_for, render_template
app = Flask(__name__)
@app.route('/')
def hello_world(name=None):
hello = 'hello'
#return render_template('static/index.html', name=name)
#return url_for('static', filename='index.html')
return render_template('index.html', name=name)
if __name__ == '__main__':
app.debug = True
app.use_debugger = True
app.run()
|
jpdoyle/OmegaGo
|
server/hello.py
|
Python
|
mit
| 394
|
# Created by Albert Aparicio on 21/10/16
# coding: utf-8
# This import makes Python use 'print' as in Python 3.x
from __future__ import print_function
import h5py
import numpy as np
from tfglib.utils import kronecker_delta
def parse_file(param_len, file_path, offset=0):
# TODO Document the offset parameter
"""This function parses a vocoder data file.
INPUTS:
param_len: The length of the parameters vector of each vocoder frame
file_path: The file path of the data
(each frame's parameters are located in a single row)
OUTPUTS:
file_params: NumPy.ndarray with the file's parameters organized by rows
size is len(source_path) x param_len."""
# Source
open_file = open(file_path, 'r')
file_lines = open_file.readlines()
# Preallocate matrix for an increased memory efficiency
file_params = np.empty([len(file_lines) - offset, param_len])
for index in range(offset, len(file_lines), 1):
aux = file_lines[index].split('\n')
file_params[index - offset, :] = aux[0].split('\t')
return file_params
def align_frames(dtw_frames, source_params, target_params):
"""Align source and target frames according to the rows of 'dtw_frames'.
INPUTS:
dtw_frames: 2-column 'ndarray' with the DTW matching for each frame
source_params: 'ndarray' with concatenated parameters of the source file
target_params: 'ndarray' with concatenated parameters of the target file
OUTPUT:
An 'ndarray' with the aligned data.
NOTE: Length of aligned array may be greater than source and target files"""
assert source_params.shape[1] == target_params.shape[1]
data = np.empty(
[dtw_frames.shape[0], source_params.shape[1] + target_params.shape[1]]
)
for row, matching in enumerate(dtw_frames):
data[row, :] = np.concatenate((
source_params[int(dtw_frames[row, 0]), :],
target_params[int(dtw_frames[row, 1]), :]
))
return data
def build_file_table(basename, source_dir, target_dir, dtw_dir):
"""This function builds a datatable from the aligned vocoder frames.
It reads and parses the input files, concatenates the data and aligns it.
INPUTS:
basename: the name without extension of the file to be prepared
source_dir: directory path to the source files. (it must end in '/')
target_fir: directory path to the target files. (it must end in '/')
dtw_dir: dir path to DTW frame matchings file. (it must end in '/')"""
# Parse files
source_f0 = parse_file(1, source_dir + basename + '.' + 'lf0' + '.dat')
source_f0_i = parse_file(
1,
source_dir + basename + '.' + 'lf0' + '.i.dat'
) # Interpolated data
source_mcp = parse_file(40, source_dir + basename + '.' + 'mcp' + '.dat')
source_vf = parse_file(1, source_dir + basename + '.' + 'vf' + '.dat')
source_vf_i = parse_file(
1,
source_dir + basename + '.' + 'vf' + '.i.dat'
) # Use interpolated data
target_f0 = parse_file(1, target_dir + basename + '.' + 'lf0' + '.dat')
target_f0_i = parse_file(
1,
target_dir + basename + '.' + 'lf0' + '.i.dat'
) # Use interpolated data
target_mcp = parse_file(40, target_dir + basename + '.' + 'mcp' + '.dat')
target_vf = parse_file(1, target_dir + basename + '.' + 'vf' + '.dat')
target_vf_i = parse_file(
1,
target_dir + basename + '.' + 'vf' + '.i.dat'
) # Use interpolated data
dtw_frames = parse_file(2, dtw_dir + basename + '.dtw', 5)
# Build voiced/unvoiced flag arrays
# The flags are:
# 1 -> voiced
# 0 -> unvoiced
assert source_vf.shape == source_f0.shape
source_voiced = np.empty(source_vf.shape)
for index, vf in enumerate(source_vf):
source_voiced[index] = 1 - kronecker_delta(source_vf[index])
assert target_vf.shape == target_f0.shape
target_voiced = np.empty(target_vf.shape)
for index, vf in enumerate(target_vf):
target_voiced[index] = 1 - kronecker_delta(target_vf[index])
# Concatenate source and target params
source_params = np.concatenate((
source_mcp,
source_f0_i,
source_vf_i,
source_voiced
), axis=1)
target_params = np.concatenate((
target_mcp,
target_f0_i,
target_vf_i,
target_voiced
), axis=1)
# Align parameters
return align_frames(dtw_frames, source_params, target_params)
def construct_datatable(basenames_list, source_dir, target_dir, dtw_dir):
# Parse basenames list
basenames_file = open(basenames_list, 'r')
basenames_lines = basenames_file.readlines()
# Strip '\n' characters
basenames_lines = [line.split('\n')[0] for line in basenames_lines]
# Construct table of 1st file
datatable = build_file_table(
basenames_lines[0],
source_dir,
target_dir,
dtw_dir
)
# Iterate through the rest of files and concatenate them below
for i in range(1, len(basenames_lines), 1):
datatable = np.concatenate((
datatable,
build_file_table(
basenames_lines[i],
source_dir,
target_dir,
dtw_dir
)
))
# Return result
return datatable
def save_datatable(data_dir, dataset_name, datatable_out_file):
"""This function constructs and saves the datatable in an .h5 file
INPUTS:
data_dir: directory of data to be used in the datatable.
This path must end in a '/'
dataset_name: name of the dataset in the .h5 file
datatable_out_file: path to the output .h5 file (no extension)
OUTPUTS: None. The only output is the .h5 file of the datatable"""
# Construct datatable
data = construct_datatable(
data_dir + 'basenames.list',
data_dir + 'vocoded/SF1/',
data_dir + 'vocoded/TF1/',
data_dir + 'dtw/beam2/'
)
# Save and compress with gzip to save space
with h5py.File(datatable_out_file + '.h5', 'w') as f:
f.create_dataset(
dataset_name,
data=data,
compression="gzip",
compression_opts=9
)
f.close()
return data
def load_datatable(datatable_file, dataset_name):
"""This function loads a datatable from a previously saved file
INPUTS:
datatable_file: path to the datatable .h5 file
dataset_name: name of the dataset to retrieve from the .h5 file
OUTPUT: a NumPy.ndarray with the datatable"""
with h5py.File(datatable_file, 'r') as file:
dataset = file[dataset_name][:, :]
file.close()
return dataset
|
albertaparicio/tfglib
|
tfglib/construct_table.py
|
Python
|
lgpl-3.0
| 6,807
|
while True:
print('Enter your age:')
age = input()
if age.isdecimal():
break
print('Please enter a number for your age.')
while True:
print('Select a new password (letters and numbers only):')
password = input()
if password.isalnum():
break
print('Passwords can only have letters and numbers.')
|
ir0nb8t/tutorials
|
automateTheBoringStuff/vailidateInput.py
|
Python
|
gpl-3.0
| 344
|
import locale
import logging
from hashlib import sha1
from django import http
from django.conf.urls import url
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.mail import mail_managers
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.translation import get_language, to_locale, ugettext_lazy as _
from mooch.base import BaseMoocher, csrf_exempt_m, require_POST_m
from mooch.signals import post_charge
logger = logging.getLogger("mooch.postfinance")
class PostFinanceMoocher(BaseMoocher):
identifier = "postfinance"
title = _("Pay with PostFinance")
def __init__(
self, *, pspid, live, sha1_in, sha1_out, payment_methods=None, **kwargs
):
if any(x is None for x in (pspid, live, sha1_in, sha1_out)):
raise ImproperlyConfigured(
"%s: None is not allowed in (%r, %r, %r, %r)"
% (self.__class__.__name__, pspid, live, sha1_in, sha1_out)
)
self.pspid = pspid
# Which payment options should be shown
# Options: PostFinance Card, PostFinance e-finance, TWINT, PAYPAL
self.payment_methods = (
["PostFinance Card", "PostFinance e-finance"]
if payment_methods is None
else payment_methods
)
self.live = live
self.sha1_in = sha1_in
self.sha1_out = sha1_out
super().__init__(**kwargs)
def get_urls(self):
return [
url(
r"^postfinance_success/$", self.success_view, name="postfinance_success"
),
url(
r"^postfinance_postsale/$",
self.postsale_view,
name="postfinance_postsale",
),
]
def payment_form(self, request, payment):
postfinance = {
# Add a random suffix, because PostFinance does not like
# processing the same order ID over and over.
"orderID": "%s-%s" % (payment.id.hex, get_random_string(4)),
"amount": str(payment.amount_cents),
"currency": "CHF",
"PSPID": self.pspid,
"language": locale.normalize(to_locale(get_language())).split(".")[0],
"EMAIL": payment.email,
}
postfinance["SHASign"] = sha1(
(
"".join(
(
postfinance["orderID"],
postfinance["amount"],
postfinance["currency"],
postfinance["PSPID"],
self.sha1_in,
)
)
).encode("utf-8")
).hexdigest()
return render_to_string(
"mooch/postfinance_payment_form.html",
{
"moocher": self,
"payment": payment,
"postfinance": postfinance,
"mode": "prod" if self.live else "test",
"payment_methods": self.payment_methods,
"success_url": request.build_absolute_uri(
reverse("%s:postfinance_success" % self.app_name)
),
"failure_url": request.build_absolute_uri(str(self.failure_url)),
},
request=request,
)
def _process_query(self, data, request):
try:
parameters_repr = repr(data).encode("utf-8")
logger.info("IPN: Processing request data %s" % parameters_repr)
try:
orderID = data["orderID"]
currency = data["currency"]
amount = data["amount"]
PM = data["PM"]
ACCEPTANCE = data["ACCEPTANCE"]
STATUS = data["STATUS"]
CARDNO = data["CARDNO"]
PAYID = data["PAYID"]
NCERROR = data["NCERROR"]
BRAND = data["BRAND"]
SHASIGN = data["SHASIGN"]
except KeyError:
logger.error("IPN: Missing data in %s" % parameters_repr)
raise ValidationError("Missing data")
sha1_source = "".join(
(
orderID,
currency,
amount,
PM,
ACCEPTANCE,
STATUS,
CARDNO,
PAYID,
NCERROR,
BRAND,
self.sha1_out,
)
)
sha1_out = sha1(sha1_source.encode("utf-8")).hexdigest()
if sha1_out.lower() != SHASIGN.lower():
logger.error("IPN: Invalid hash in %s" % parameters_repr)
raise ValidationError("Hash did not validate")
try:
instance = self.model.objects.get(pk=orderID.split("-")[0])
except self.model.DoesNotExist:
logger.error("IPN: Instance %s does not exist" % orderID)
raise ValidationError("Instance %s does not exist" % orderID)
if STATUS in ("5", "9"):
instance.charged_at = timezone.now()
instance.payment_service_provider = self.identifier
instance.transaction = parameters_repr
instance.save()
post_charge.send(sender=self, payment=instance, request=request)
except Exception as e:
logger.error("IPN: Processing failure %s" % e)
raise
def success_view(self, request):
try:
self._process_query(request.GET.copy(), request)
except ValidationError as exc:
mail_managers(
"Validation error in PostFinance success view",
"\n".join(
[request.build_absolute_uri(), ""] + [m for m in exc.messages]
),
)
for m in exc.messages:
messages.error(request, m)
return redirect(self.failure_url)
else:
return redirect(self.success_url)
@csrf_exempt_m
@require_POST_m
def postsale_view(self, request):
try:
self._process_query(request.POST.copy(), request)
except ValidationError as exc:
return http.HttpResponseForbidden(exc.message)
return http.HttpResponse("OK")
|
matthiask/django-mooch
|
mooch/postfinance.py
|
Python
|
mit
| 6,553
|
from src.feat_extractor import *
from glob import *
import sys
from config import *
def get_features_dict(pkts, fileName=None):
feat_list_key = parse_cfg(fileName)
feature_set = get_conv_feature(pkts)
vec = dict()
for entry in feat_list_key:
try:
vec[entry] = feature_set[entry]
except Exception as e:
# TODO: pass
# don't set the entry if none
vec[entry] = None
return vec
def get_features_vec(pkts, fileName=None):
feat_list_key = parse_cfg(fileName)
feature_set = get_conv_feature(pkts)
vec = list()
for entry in feat_list_key:
try:
vec.append(feature_set[entry])
except Exception as e:
vec.append(None)
return vec
def get_features_set(pkts, fileName=None):
feat_list_key = parse_cfg(fileName)
feature_set = get_conv_feature(pkts)
vec = dict()
for entry in feat_list_key:
try:
vec[entry] = feature_set[entry]
except Exception as e:
vec[entry] = 'err'
return vec
|
xh2310/traffic_feature_extractor
|
__init__.py
|
Python
|
gpl-2.0
| 1,097
|
# -*- coding: utf-8 -*-
from django.conf.urls import include, url
from rest_framework import routers
from django.contrib import admin
from orea.settings import base
from defcdb import views
from defcdb import api_views
import autocomplete_light.shortcuts as al
al.autodiscover()
router = routers.DefaultRouter()
router.register(r'geojson', api_views.GeoJsonViewSet, base_name='places')
router.register(r'dc_finds_lithics_raw_material', api_views.DC_finds_lithics_raw_materialViewSet)
router.register(
r'dc_finds_lithics_retouched_tools', api_views.DC_finds_lithics_retouched_toolsViewSet
)
router.register(
r'dc_finds_lithics_retouched_tools', api_views.DC_finds_lithics_unretouched_toolsViewSet
)
router.register(r'dc_finds_lithics_core_shape', api_views.DC_finds_lithics_core_shapeViewSet)
router.register(r'dc_finds_lithics_industry', api_views.DC_finds_lithics_industryViewSet)
router.register(r'dc_area_agegroups', api_views.DC_area_agegroupsViewSet)
router.register(r'name', api_views.NameViewSet)
router.register(r'dc_country', api_views.DC_countryViewSet)
router.register(r'dc_region', api_views.DC_regionViewSet)
router.register(r'dc_province', api_views.DC_provinceViewSet)
router.register(r'dc_site_topography', api_views.DC_site_topographyViewSet)
router.register(r'dc_researchevent_researchtype', api_views.DC_researchevent_researchtypeViewSet)
router.register(r'dc_researchevent_institution', api_views.DC_researchevent_institutionViewSet)
router.register(
r'dc_researchevent_special_analysis', api_views.DC_researchevent_special_analysisViewSet
)
router.register(
r'dc_site_geographicalreferencesystem', api_views.DC_site_geographicalreferencesystemViewSet
)
router.register(r'dc_area_areatype', api_views.DC_area_areatypeViewSet)
router.register(r'dc_area_settlementtype', api_views.DC_area_settlementtypeViewSet)
router.register(r'dc_area_settlementstructure', api_views.DC_area_settlementstructureViewSet)
router.register(r'dc_area_constructiontype', api_views.DC_area_constructiontypeViewSet)
router.register(r'dc_area_constructionshape', api_views.DC_area_constructionshapeViewSet)
router.register(r'dc_area_buildingtechnique', api_views.DC_area_buildingtechniqueViewSet)
router.register(r'dc_area_specialfeatures', api_views.DC_area_specialfeaturesViewSet)
router.register(
r'dc_area_evidenceofgraveshumanremains', api_views.DC_area_evidenceofgraveshumanremainsViewSet
)
router.register(r'dc_area_evidenceofoccupation', api_views.DC_area_evidenceofoccupationViewSet)
router.register(r'dc_area_caverockshelterstype', api_views.DC_area_caverockshelterstypeViewSet)
router.register(r'dc_area_rawmaterial', api_views.DC_area_rawmaterialViewSet)
router.register(r'dc_area_exploitationtype', api_views.DC_area_exploitationtypeViewSet)
router.register(r'dc_area_topography', api_views.DC_area_topographyViewSet)
router.register(r'dc_area_mortuaryfeatures', api_views.DC_area_mortuaryfeaturesViewSet)
router.register(r'dc_area_gravetype', api_views.DC_area_gravetypeViewSet)
router.register(r'dc_area_typeofhumanremains', api_views.DC_area_typeofhumanremainsViewSet)
router.register(r'dc_area_sexes', api_views.DC_area_sexesViewSet)
router.register(r'dc_area_manipulationofgraves', api_views.DC_area_manipulationofgravesViewSet)
router.register(r'dc_finds_type', api_views.DC_finds_typeViewSet)
router.register(r'dc_finds_material', api_views.DC_finds_materialViewSet)
router.register(r'dc_finds_amount', api_views.DC_finds_amountViewSet)
router.register(r'dc_finds_small_finds_type', api_views.DC_finds_small_finds_typeViewSet)
router.register(r'dc_finds_small_finds_category', api_views.DC_finds_small_finds_categoryViewSet)
router.register(r'dc_finds_botany_species', api_views.DC_finds_botany_speciesViewSet)
router.register(
r'dc_finds_animal_remains_species', api_views.DC_finds_animal_remains_speciesViewSet
)
router.register(
r'dc_finds_animal_remains_completeness', api_views.DC_finds_animal_remains_completenessViewSet
)
router.register(r'dc_finds_animal_remains_part', api_views.DC_finds_animal_remains_partViewSet)
router.register(r'dc_finds_lithics_technology', api_views.DC_finds_lithics_technologyViewSet)
router.register(r'dc_finds_pottery_form', api_views.DC_finds_pottery_formViewSet)
router.register(r'dc_finds_pottery_detail', api_views.DC_finds_pottery_detailViewSet)
router.register(r'dc_finds_pottery_decoration', api_views.DC_finds_pottery_decorationViewSet)
router.register(
r'dc_interpretation_productiontype', api_views.DC_interpretation_productiontypeViewSet
)
router.register(
r'dc_interpretation_subsistencetype', api_views.DC_interpretation_subsistencetypeViewSet
)
router.register(r'dc_chronological_system', api_views.DC_chronological_systemViewSet)
router.register(r'dc_period_datingmethod', api_views.DC_period_datingmethodViewSet)
router.register(r'dc_period_datedby', api_views.DC_period_datedbyViewSet)
router.register(r'dc_site_coordinatesource', api_views.DC_site_coordinatesource)
router.register(r'Book', api_views.BookViewSet)
router.register(r'ResearchEvent', api_views.ResearchEventViewSet)
router.register(r'Site', api_views.SiteViewSet)
router.register(r'Area', api_views.AreaViewSet)
router.register(r'Finds', api_views.FindsViewSet)
router.register(r'Interpretation', api_views.InterpretationViewSet)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls')),
url(r'^defcdb/', include('defcdb.urls', namespace='defcdb')),
url(r'^geolocation/', include('geolocation.urls', namespace="geolocation")),
url(r'^login/$', views.user_login, name='user_login'),
url(r'^accounts/login/$', views.user_login, name='user_login'),
url(r'^logout/$', views.user_logout, name='user_logout'),
url(r'^autocomplete/', include('autocomplete_light.urls')),
url(r'^', include('webpage.urls', namespace='webpage')),
url(r'^bib/', include('bib.urls', namespace='bib')),
url(
r'^media/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': base.MEDIA_ROOT, }, name='media_root_url'
),
url(r'^image_gallery/', include('images_metadata.urls', namespace="image_gallery")),
url(r'^publicrecords/', include('publicrecords.urls', namespace="publicrecords")),
url(r'^3Dmodels/', include('threedmodels.urls', namespace="3Dmodels")),
url(r'^browsing/', include('browsing.urls', namespace='browsing')),
url(r'^datamodel/', include('django_spaghetti.urls', namespace="datamodel")),
url(r'^sparql/', include('sparql.urls', namespace='sparql')),
]
|
acdh-oeaw/defc-app
|
orea/urls.py
|
Python
|
mit
| 6,607
|
from django.contrib import admin
from commoner.server.models import TrustedRelyingParty, TrustedMetadata
class TrustedMetadataAdmin(admin.TabularInline):
model = TrustedMetadata
class TrustedRelyingPartyAdmin(admin.ModelAdmin):
list_display = ('user', 'root')
inlines = [TrustedMetadataAdmin]
admin.site.register(TrustedRelyingParty,
TrustedRelyingPartyAdmin)
|
cc-archive/commoner
|
src/commoner/server/admin.py
|
Python
|
agpl-3.0
| 395
|
from setuptools import setup
from setuptools import find_packages
setup(name='keras_extensions',
version='1.1',
description='Extension library for Keras',
author='Yoshiaki Takahashi',
author_email='',
url='https://github.com/bnsnapper/keras_bn_library',
download_url='',
license='MIT',
install_requires=[],
packages=find_packages())
|
bnsnapper/keras_bn_library
|
setup.py
|
Python
|
mit
| 403
|
# META: timeout=long
import json
_window_id = "window-fcc6-11e5-b4f8-330a88ab9d7f"
_frame_id = "frame-075b-4da1-b6ba-e579c2d3230a"
def test_initial_window(session):
# non-auxiliary top-level browsing context
raw_json = session.execute_script("return window;")
obj = json.loads(raw_json)
assert len(obj) == 1
assert _window_id in obj
handle = obj[_window_id]
assert handle in session.window_handles
def test_window_open(session):
# auxiliary browsing context
session.execute_script("window.foo = window.open()")
raw_json = session.execute_script("return window.foo;")
obj = json.loads(raw_json)
assert len(obj) == 1
assert _window_id in obj
handle = obj[_window_id]
assert handle in session.window_handles
def test_frame(session):
# nested browsing context
append = """
window.frame = document.createElement('iframe');
document.body.appendChild(frame);
"""
session.execute_script(append)
raw_json = session.execute_script("return frame.contentWindow;")
obj = json.loads(raw_json)
assert len(obj) == 1
assert _frame_id in obj
handle = obj[_frame_id]
assert handle not in session.window_handles
|
anthgur/servo
|
tests/wpt/web-platform-tests/webdriver/tests/contexts/json_serialize_windowproxy.py
|
Python
|
mpl-2.0
| 1,216
|
x01 = 1 in [1, 2, 3]
x02 = 'a' in [1, 2, 3]
x03 = 'a' in {'a': 1, 'b': 2}
x04 = 1 in (1, 2, 3)
x05 = 1 in 2 in 3
x06 = 1 in 2
|
clark800/pystarch
|
test/testcases/in.py
|
Python
|
mit
| 126
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-23 11:21
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service', '0011_auto_20170723_1657'),
]
operations = [
migrations.AlterField(
model_name='play',
name='time',
field=models.DateTimeField(default=datetime.datetime(2017, 7, 23, 18, 21, 24, 829667)),
),
]
|
arifspica/oldTraffordTreasureHunt
|
webservice/service/migrations/0012_auto_20170723_1821.py
|
Python
|
gpl-3.0
| 533
|
from __future__ import absolute_import, unicode_literals
from stravalib import model, attributes, exc, unithelper as uh
from stravalib.client import Client
from stravalib.tests.functional import FunctionalTestBase
import datetime
import requests
class ClientTest(FunctionalTestBase):
def test_get_starred_segment(self):
"""
Test get_starred_segment
"""
i = 0
for segment in self.client.get_starred_segment(limit=5):
self.assertIsInstance(segment, model.Segment)
i+=1
self.assertGreater(i, 0) # star at least one segment
self.assertLessEqual(i, 5)
def test_get_activity(self):
""" Test basic activity fetching. """
activity = self.client.get_activity(96089609)
self.assertEquals('El Dorado County, CA, USA', activity.location_city)
self.assertIsInstance(activity.start_latlng, attributes.LatLon)
self.assertAlmostEquals(-120.4357631, activity.start_latlng.lon, places=2)
self.assertAlmostEquals(38.74263759999999, activity.start_latlng.lat, places=2)
self.assertIsInstance(activity.map, model.Map)
self.assertIsInstance(activity.athlete, model.Athlete)
self.assertEquals(1513, activity.athlete.id)
#self.assertAlmostEqual(first, second, places, msg, delta)
# Ensure that iw as read in with correct units
self.assertEquals(22.5308, float(uh.kilometers(activity.distance)))
def test_get_activity_and_segments(self):
""" Test include_all_efforts parameter on activity fetching. """
if not self.activity_id:
self.fail("Include an activity_id in test.ini to test segment_efforts")
activity = self.client.get_activity(self.activity_id, include_all_efforts=True)
self.assertTrue(isinstance(activity.segment_efforts, list))
# Check also when we have no parameters segment_efforts is None
activity_no_segments = self.client.get_activity(self.activity_id)
self.assertTrue(activity.segment_efforts, None)
def test_get_activity_laps(self):
activity = self.client.get_activity(165094211)
laps = list(self.client.get_activity_laps(165094211))
self.assertEquals(5, len(laps))
# This obviously is far from comprehensive, just a sanity check
self.assertEquals(u'Lap 1', laps[0].name)
self.assertEquals(178.0, laps[0].max_heartrate)
def test_get_activity_zones(self):
"""
Test loading zones for activity.
"""
zones = self.client.get_activity_zones(99895560)
print zones
self.assertEquals(1, len(zones))
self.assertIsInstance(zones[0], model.PaceActivityZone)
# Indirectly
activity = self.client.get_activity(99895560)
self.assertEquals(len(zones), len(activity.zones))
self.assertEquals(zones[0].score, activity.zones[0].score)
def test_activity_comments(self):
"""
Test loading comments for already-loaded activity.
"""
activity = self.client.get_activity(2290897)
self.assertTrue(activity.comment_count > 0)
comments= list(activity.comments)
self.assertEquals(3, len(comments))
self.assertEquals("I love Gordo's. I've been eating there for 20 years!", comments[0].text)
def test_activity_photos(self):
"""
Test photos on activity
"""
activity = self.client.get_activity(152668627)
self.assertTrue(activity.total_photo_count > 0)
photos = list(activity.full_photos)
self.assertEqual(len(photos), 1)
self.assertEqual(len(photos), activity.total_photo_count)
self.assertIsInstance(photos[0], model.ActivityPhoto)
def test_activity_kudos(self):
"""
Test kudos on activity
"""
activity = self.client.get_activity(152668627)
self.assertTrue(activity.kudos_count > 0)
kudos = list(activity.kudos)
self.assertGreater(len(kudos), 6)
self.assertEqual(len(kudos), activity.kudos_count)
self.assertIsInstance(kudos[0], model.ActivityKudos )
def test_activity_streams(self):
"""
Test activity streams
"""
stypes = ['time', 'latlng', 'distance','altitude', 'velocity_smooth',
'heartrate', 'cadence', 'watts', 'temp', 'moving',
'grade_smooth']
streams = self.client.get_activity_streams(152668627, stypes, 'low')
self.assertGreater(len(streams.keys()), 3)
for k in streams.keys():
self.assertIn(k, stypes)
# time stream
self.assertIsInstance(streams['time'].data[0], int)
self.assertGreater(streams['time'].original_size, 100)
self.assertEqual(streams['time'].resolution, 'low')
self.assertEqual(len(streams['time'].data), 100)
# latlng stream
self.assertIsInstance(streams['latlng'].data, list)
self.assertIsInstance(streams['latlng'].data[0][0], float)
def test_related_activities(self):
"""
Test get_related_activities on an activity and related property of Activity
"""
activity_id = 152668627
activity = self.client.get_activity(activity_id)
related_activities = list(self.client.get_related_activities(activity_id))
# Check the number of related_activities matches what activity would expect
self.assertEqual(len(related_activities), activity.athlete_count-1)
# Check the related property gives the same result
related_activities_from_property = list(activity.related)
self.assertEqual(related_activities, related_activities_from_property)
def test_effort_streams(self):
"""
Test effort streams
"""
stypes = ['distance']
activity = self.client.get_activity(165479860) #152668627)
streams = self.client.get_effort_streams(activity.segment_efforts[0].id,
stypes, 'medium')
self.assertIn('distance', streams.keys())
# distance stream
self.assertIsInstance(streams['distance'].data[0], float) #xxx
self.assertEqual(streams['distance'].resolution, 'medium')
self.assertEqual(len(streams['distance'].data),
min(1000, streams['distance'].original_size))
def test_get_curr_athlete(self):
athlete = self.client.get_athlete()
# Just some basic sanity checks here
self.assertTrue(len(athlete.firstname) > 0)
self.assertTrue(athlete.athlete_type in ["runner", "cyclist"])
def test_get_athlete_clubs(self):
clubs = self.client.get_athlete_clubs()
self.assertEquals(3, len(clubs))
self.assertEquals('Team Roaring Mouse', clubs[0].name)
self.assertEquals('Team Strava Cycling', clubs[1].name)
self.assertEquals('Team Strava Cyclocross', clubs[2].name)
clubs_indirect = self.client.get_athlete().clubs
self.assertEquals(3, len(clubs_indirect))
self.assertEquals(clubs[0].name, clubs_indirect[0].name)
self.assertEquals(clubs[1].name, clubs_indirect[1].name)
self.assertEquals(clubs[2].name, clubs_indirect[2].name)
def test_get_gear(self):
g = self.client.get_gear("g69911")
self.assertTrue(float(g.distance) >= 3264.67)
self.assertEquals('Salomon XT Wings 2', g.name)
self.assertEquals('Salomon', g.brand_name)
self.assertTrue(g.primary)
self.assertEquals(model.DETAILED, g.resource_state)
self.assertEquals('g69911', g.id)
self.assertEquals('XT Wings 2', g.model_name)
self.assertEquals('', g.description)
def test_get_segment_leaderboard(self):
lb = self.client.get_segment_leaderboard(229781)
print(lb.effort_count)
print(lb.entry_count)
for i,e in enumerate(lb):
print '{0}: {1}'.format(i, e)
self.assertEquals(10, len(lb.entries)) # 10 top results
self.assertIsInstance(lb.entries[0], model.SegmentLeaderboardEntry)
self.assertEquals(1, lb.entries[0].rank)
self.assertTrue(lb.effort_count > 8000) # At time of writing 8206
# Check the relationships
athlete = lb[0].athlete
print(athlete)
self.assertEquals(lb[0].athlete_name, "{0} {1}".format(athlete.firstname, athlete.lastname))
effort = lb[0].effort
print effort
self.assertIsInstance(effort, model.SegmentEffort)
self.assertEquals('Hawk Hill', effort.name)
activity = lb[0].activity
self.assertIsInstance(activity, model.Activity)
# Can't assert much since #1 ranked activity will likely change in the future.
def test_get_segment(self):
segment = self.client.get_segment(229781)
self.assertIsInstance(segment, model.Segment)
print segment
self.assertEquals('Hawk Hill', segment.name)
self.assertAlmostEqual(2.68, float(uh.kilometers(segment.distance)), places=2)
# Fetch leaderboard
lb = segment.leaderboard
self.assertEquals(10, len(lb)) # 10 top results, 5 bottom results
def test_get_segment_efforts(self):
# test with string
efforts = self.client.get_segment_efforts(4357415,
start_date_local = "2012-12-23T00:00:00Z",
end_date_local = "2012-12-23T11:00:00Z",)
print efforts
i = 0
for effort in efforts:
print effort
self.assertEqual(4357415, effort.segment.id)
self.assertIsInstance(effort, model.BaseEffort)
effort_date = effort.start_date_local
self.assertEqual(effort_date.strftime("%Y-%m-%d"), "2012-12-23")
i+=1
print i
self.assertGreater(i, 2)
# also test with datetime object
start_date = datetime.datetime(2012, 12, 31, 6, 0)
end_date = start_date + datetime.timedelta(hours=12)
efforts = self.client.get_segment_efforts(4357415,
start_date_local = start_date,
end_date_local = end_date,)
print efforts
i = 0
for effort in efforts:
print effort
self.assertEqual(4357415, effort.segment.id)
self.assertIsInstance(effort, model.BaseEffort)
effort_date = effort.start_date_local
self.assertEqual(effort_date.strftime("%Y-%m-%d"), "2012-12-31")
i+=1
print i
self.assertGreater(i, 2)
def test_segment_explorer(self):
bounds = (37.821362,-122.505373,37.842038,-122.465977)
results = self.client.explore_segments(bounds)
# This might be brittle
self.assertEquals('Hawk Hill', results[0].name)
# Fetch full segment
segment = results[0].segment
self.assertEquals(results[0].name, segment.name)
# For some reason these don't follow the simple math rules one might expect (so we round to int)
self.assertAlmostEqual(results[0].elev_difference, segment.elevation_high - segment.elevation_low, places=0)
class AuthenticatedAthleteTest(FunctionalTestBase):
"""
Tests the function is_authenticated_athlete in model.Athlete
"""
def test_caching(self):
a = model.Athlete()
a._is_authenticated = "Not None"
self.assertEqual(a.is_authenticated_athlete(), "Not None")
def test_correct_athlete_returns_true(self):
a = self.client.get_athlete()
self.assertTrue(a.is_authenticated_athlete())
def test_detailed_resource_state_means_true(self):
a = model.Athlete()
a.resource_state = attributes.DETAILED
self.assertTrue(a.is_authenticated_athlete())
def test_correct_athlete_not_detailed_returns_true(self):
a = self.client.get_athlete()
a.resource_state = attributes.SUMMARY
# Now will have to do a look up for the authenticated athlete and check the ids match
self.assertTrue(a.is_authenticated_athlete())
def test_not_authenticated_athlete_is_false(self):
CAV_ID = 1353775
a = self.client.get_athlete(CAV_ID)
self.assertEqual(a.resource_state, attributes.SUMMARY)
self.assertFalse(a.is_authenticated_athlete())
class AthleteStatsTest(FunctionalTestBase):
"""
Tests the functionality for collecting athlete statistics
http://strava.github.io/api/v3/athlete/#stats
"""
def test_basic_get_from_client(self):
stats = self.client.get_athlete_stats()
self.assertIsInstance(stats, model.AthleteStats)
self.assertIsInstance(stats.recent_ride_totals, model.ActivityTotals)
# Check biggest_climb_elevation_gain has been set
self.assertTrue(uh.meters(stats.biggest_climb_elevation_gain) >= uh.meters(0))
def test_get_from_client_with_authenticated_id(self):
athlete_id = self.client.get_athlete().id
stats = self.client.get_athlete_stats(athlete_id)
self.assertIsInstance(stats, model.AthleteStats)
# Check same as before
self.assertEqual(stats.biggest_climb_elevation_gain, self.client.get_athlete_stats().biggest_climb_elevation_gain)
def test_get_from_client_with_wrong_id(self):
CAV_ID = 1353775
# Currently raises a requests.exceptions.HTTPError, TODO: better error handling
self.assertRaises(requests.exceptions.HTTPError, self.client.get_athlete_stats, CAV_ID)
def test_athlete_stats_property_option(self):
a = self.client.get_athlete()
stats = a.stats
self.assertIsInstance(stats, model.AthleteStats)
def test_athlete_stats_cached(self):
a = self.client.get_athlete()
a._stats = "Not None"
stats = a.stats
self.assertEqual(stats, "Not None")
def test_athlete_property_not_authenticated(self):
cav = self.client.get_athlete(1353775)
with self.assertRaises(exc.NotAuthenticatedAthlete):
cav.stats
|
matt-leach/stravalib
|
stravalib/tests/functional/test_client.py
|
Python
|
apache-2.0
| 14,147
|
# Copyright 2022 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for t5.evaluation.qa_utils."""
from absl.testing import absltest
from t5.evaluation import qa_utils
class QaUtilsTest(absltest.TestCase):
def test_normalize_trivia_qa(self):
self.assertEqual(
qa_utils.normalize_trivia_qa(
"`Needs\tA_LOT of the 'normalization'.\"‘"),
"needs lot of normalization",
)
self.assertEqual(
qa_utils.normalize_trivia_qa("needs no normalization"),
"needs no normalization",
)
def test_normalize_squad(self):
self.assertEqual(
qa_utils.normalize_squad("`Needs\tA_LOT of the 'normalization'.\"‘"),
"needs alot of normalization‘",
)
self.assertEqual(
qa_utils.normalize_squad("needs no normalization"),
"needs no normalization",
)
def test_qa_metrics(self):
with self.assertRaisesRegex(
ValueError, "Number of targets and predictions must match."):
qa_utils.qa_metrics([["answer"]] * 6, ["answer"] * 5)
self.assertDictEqual(
qa_utils.qa_metrics([["answer"]] * 5, ["answer"] * 5),
{"em": 100.0, "f1": 100.0}
)
self.assertDictEqual(
qa_utils.qa_metrics(
[
["big moose", "hippo"],
["correct1"],
["correct2.1", "correct2.2"],
["a", "b"],
],
[
"a big moose‘",
"wrong",
"correct2.2",
"c",
],
),
{"em": 25., "f1": 35.},
)
if __name__ == "__main__":
absltest.main()
|
google-research/text-to-text-transfer-transformer
|
t5/evaluation/qa_utils_test.py
|
Python
|
apache-2.0
| 2,172
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import contextlib
import mock
from stoqlib.gui.dialogs.productiondetails import ProductionDetailsDialog
from stoqlib.gui.dialogs.productionquotedialog import ProductionQuoteDialog
from stoqlib.gui.dialogs.startproduction import StartProductionDialog
from stoqlib.gui.search.productionsearch import (ProductionProductSearch,
ProductionItemsSearch,
ProductionHistorySearch)
from stoqlib.gui.search.servicesearch import ServiceSearch
from stoqlib.gui.wizards.productionwizard import ProductionWizard
from stoq.gui.production import ProductionApp
from stoq.gui.test.baseguitest import BaseGUITest
class TestProduction(BaseGUITest):
def _check_run_dialog(self, action, dialog, other_args, other_kwargs):
with contextlib.nested(
mock.patch('stoq.gui.production.ProductionApp.run_dialog'),
mock.patch('stoq.gui.production.api.new_store'),
mock.patch.object(self.store, 'commit'),
mock.patch.object(self.store, 'close')) as ctx:
new_store = ctx[1]
new_store.return_value = self.store
self.activate(action)
expected_args = [dialog, self.store]
if other_args:
expected_args.extend(other_args)
run_dialog = ctx[0]
run_dialog.assert_called_once_with(*expected_args, **other_kwargs)
def test_initial(self):
app = self.create_app(ProductionApp, u'production')
self.check_app(app, u'production')
def test_select(self):
self.create_production_order()
app = self.create_app(ProductionApp, u'production')
results = app.results
results.select(results[0])
def test_run_dialogs(self):
self.create_production_order()
app = self.create_app(ProductionApp, u'production')
results = app.results
results.select(results[0])
self._check_run_dialog(app.EditProduction,
ProductionWizard, [results[0]], {})
results.select(results[0])
self._check_run_dialog(app.StartProduction,
StartProductionDialog, [results[0]], {})
results.select(results[0])
self._check_run_dialog(app.ProductionDetails,
ProductionDetailsDialog, [results[0]], {})
self._check_run_dialog(app.ProductionPurchaseQuote,
ProductionQuoteDialog, [], {})
self._check_run_dialog(app.SearchProduct,
ProductionProductSearch, [], {})
self._check_run_dialog(app.SearchService,
ServiceSearch, [], {u'hide_price_column': True})
self._check_run_dialog(app.SearchProductionHistory,
ProductionHistorySearch, [], {})
self._check_run_dialog(app.SearchProductionItem,
ProductionItemsSearch, [], {})
|
andrebellafronte/stoq
|
stoq/gui/test/test_production.py
|
Python
|
gpl-2.0
| 3,913
|
from direct.distributed.MsgTypes import *
OTP_DO_ID_SERVER_ROOT = 4007
OTP_DO_ID_FRIEND_MANAGER = 4501
OTP_DO_ID_LEADERBOARD_MANAGER = 4502
OTP_DO_ID_SERVER = 4600
OTP_DO_ID_UBER_DOG = 4601
OTP_CHANNEL_AI_AND_UD_BROADCAST = 4602
OTP_CHANNEL_UD_BROADCAST = 4603
OTP_CHANNEL_AI_BROADCAST = 4604
OTP_NET_MSGR_CHANNEL_ID_ALL_AI = 4605
OTP_NET_MSGR_CHANNEL_ID_UBER_DOG = 4606
OTP_NET_MSGR_CHANNEL_ID_AI_ONLY = 4607
OTP_DO_ID_COMMON = 4615
OTP_DO_ID_GATEWAY = 4616
OTP_DO_ID_PIRATES = 4617
OTP_DO_ID_TOONTOWN = 4618
OTP_DO_ID_FAIRIES = 4619
OTP_DO_ID_CARS = 4620
OTP_DO_ID_AVATARS = 4630
OTP_DO_ID_FRIENDS = 4640
OTP_DO_ID_GUILDS = 4650
OTP_DO_ID_ESCROW = 4660
OTP_DO_ID_PIRATES_AVATAR_MANAGER = 4674
OTP_DO_ID_PIRATES_CREW_MANAGER = 4675
OTP_DO_ID_PIRATES_INVENTORY_MANAGER = 4677
OTP_DO_ID_PIRATES_SPEEDCHAT_RELAY = 4711
OTP_DO_ID_PIRATES_SHIP_MANAGER = 4678
OTP_DO_ID_PIRATES_TRAVEL_AGENT = 4679
OTP_DO_ID_PIRATES_FRIENDS_MANAGER = 4680
OTP_DO_ID_CHAT_MANAGER = 4681
OTP_DO_ID_TOONTOWN_AVATAR_MANAGER = 4682
OTP_DO_ID_TOONTOWN_DELIVERY_MANAGER = 4683
OTP_DO_ID_TOONTOWN_TEMP_STORE_MANAGER = 4684
OTP_DO_ID_TOONTOWN_SPEEDCHAT_RELAY = 4712
OTP_DO_ID_SWITCHBOARD_MANAGER = 4685
OTP_DO_ID_AVATAR_FRIENDS_MANAGER = 4686
OTP_DO_ID_PLAYER_FRIENDS_MANAGER = 4687
OTP_DO_ID_CENTRAL_LOGGER = 4688
OTP_DO_ID_CARS_AVATAR_MANAGER = 4689
OTP_DO_ID_TOONTOWN_MAIL_MANAGER = 4690
OTP_DO_ID_TOONTOWN_PARTY_MANAGER = 4691
OTP_DO_ID_TOONTOWN_RAT_MANAGER = 4692
OTP_DO_ID_STATUS_DATABASE = 4693
OTP_DO_ID_TOONTOWN_AWARD_MANAGER = 4694
OTP_DO_ID_TOONTOWN_CODE_REDEMPTION_MANAGER = 4695
OTP_DO_ID_TOONTOWN_IN_GAME_NEWS_MANAGER = 4696
OTP_DO_ID_TOONTOWN_NON_REPEATABLE_RANDOM_SOURCE = 4697
OTP_DO_ID_AI_TRADE_AVATAR = 4698
OTP_DO_ID_TOONTOWN_WHITELIST_MANAGER = 4699
OTP_DO_ID_PIRATES_MATCH_MAKER = 4700
OTP_DO_ID_PIRATES_GUILD_MANAGER = 4701
OTP_DO_ID_PIRATES_AWARD_MAKER = 4702
OTP_DO_ID_PIRATES_CODE_REDEMPTION = 4703
OTP_DO_ID_PIRATES_SETTINGS_MANAGER = 4704
OTP_DO_ID_PIRATES_HOLIDAY_MANAGER = 4705
OTP_DO_ID_PIRATES_CREW_MATCH_MANAGER = 4706
OTP_DO_ID_PIRATES_AVATAR_ACCESSORIES_MANAGER = 4710
OTP_DO_ID_TOONTOWN_CPU_INFO_MANAGER = 4713
OTP_DO_ID_TOONTOWN_SECURITY_MANAGER = 4714
OTP_DO_ID_SNAPSHOT_DISPATCHER = 4800
OTP_DO_ID_SNAPSHOT_RENDERER = 4801
OTP_DO_ID_SNAPSHOT_RENDERER_01 = 4801
OTP_DO_ID_SNAPSHOT_RENDERER_02 = 4802
OTP_DO_ID_SNAPSHOT_RENDERER_03 = 4803
OTP_DO_ID_SNAPSHOT_RENDERER_04 = 4804
OTP_DO_ID_SNAPSHOT_RENDERER_05 = 4805
OTP_DO_ID_SNAPSHOT_RENDERER_06 = 4806
OTP_DO_ID_SNAPSHOT_RENDERER_07 = 4807
OTP_DO_ID_SNAPSHOT_RENDERER_08 = 4808
OTP_DO_ID_SNAPSHOT_RENDERER_09 = 4809
OTP_DO_ID_SNAPSHOT_RENDERER_10 = 4810
OTP_DO_ID_SNAPSHOT_RENDERER_11 = 4811
OTP_DO_ID_SNAPSHOT_RENDERER_12 = 4812
OTP_DO_ID_SNAPSHOT_RENDERER_13 = 4813
OTP_DO_ID_SNAPSHOT_RENDERER_14 = 4814
OTP_DO_ID_SNAPSHOT_RENDERER_15 = 4815
OTP_DO_ID_SNAPSHOT_RENDERER_16 = 4816
OTP_DO_ID_SNAPSHOT_RENDERER_17 = 4817
OTP_DO_ID_SNAPSHOT_RENDERER_18 = 4818
OTP_DO_ID_SNAPSHOT_RENDERER_19 = 4819
OTP_DO_ID_SNAPSHOT_RENDERER_20 = 4820
OTP_DO_ID_PIRATES_INVENTORY_MANAGER_BASE = 5001
OTP_ZONE_ID_INVALID = 0
OTP_ZONE_ID_OLD_QUIET_ZONE = 1
OTP_ZONE_ID_MANAGEMENT = 2
OTP_ZONE_ID_DISTRICTS = 3
OTP_ZONE_ID_DISTRICTS_STATS = 4
OTP_ZONE_ID_ELEMENTS = 5
OTP_NET_MESSENGER_CHANNEL = (OTP_DO_ID_UBER_DOG << 32) + OTP_ZONE_ID_MANAGEMENT
|
ksmit799/Toontown-Source
|
otp/distributed/OtpDoGlobals.py
|
Python
|
mit
| 3,284
|
#!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
VERSION_FILENAME = 'CORE_VERSION'
CORE_NOT_COMPATIBLE_MESSAGE = (
'ycmd can\'t run: ycm_core lib too old, PLEASE RECOMPILE'
)
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def SetUpPythonPath():
sys.path.insert( 0, os.path.join( DirectoryOfThisScript(), '..' ) )
from ycmd import utils
utils.AddNearestThirdPartyFoldersToSysPath( __file__ )
def ExpectedCoreVersion():
return int( open( os.path.join( DirectoryOfThisScript(), '..',
VERSION_FILENAME ) ).read() )
def CompatibleWithCurrentCoreVersion():
import ycm_core
try:
current_core_version = ycm_core.YcmCoreVersion()
except AttributeError:
return False
return ExpectedCoreVersion() == current_core_version
|
wow2006/ycmd
|
ycmd/server_utils.py
|
Python
|
gpl-3.0
| 1,528
|
# -*- coding: utf-8 -*-
#
# This file is part of the OpenHandWrite project software.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
from collections import OrderedDict
from psychopy import gui
from psychopy.iohub.client import launchHubServer
from constants import *
#
### Misc. utility functions used by the python experiment template.
#
def getImageFilePath(file_name):
'''
Returns the full, absolute, path to the image named file_name. file_name
must be an image file located in the resources\image folder of the project.
If the file can not be found, None is returned.
:param file_name: image file name
:return: full path to image file in project, or None if file not found.
'''
pth = os.path.join(IMAGE_FOLDER,file_name)
if os.path.exists(pth):
return pth
return None
def getAudioFilePath(file_name):
'''
Returns the full, absolute, path to the audio file named file_name.
file_name must be an audio file located in the resources\audio
folder of the project. If the file can not be found, None is returned.
:param file_name: audio file name
:return: full path to audio file in project, or None if file not found.
'''
pth = os.path.join(AUDIO_FOLDER,file_name)
if os.path.exists(pth):
return pth
return None
def getAvailableConditionsFileNames():
'''
Return a list of all .xlsx experiment condition file names that are in the
projects conditions subfolder.
:return: list of condition file name str
'''
if os.path.exists(CONDITIONS_FOLDER):
import glob
cvfile_paths = glob.glob(CONDITIONS_FOLDER+os.path.sep+'*.xlsx')
return [ os.path.split(fpath)[1] for fpath in glob.glob(CONDITIONS_FOLDER+os.path.sep+'*.xlsx')]
return []
def isImageFileCandidate(file_name):
'''
Returns True if the file_name str should be considered an image file name
for use by an image stim graphic in the experiment. Otherwise returns False.
:param file_name: candidate image name string
:return: boolean
'''
try:
fname, fext = file_name.rsplit('.')
if fext in ACCEPTED_IMAGE_FORMATS:
return True
return False
except:
return False
def showSessionInfoDialog():
'''
Display a dialog to collect session or participant level information
at the start of an experiment.
If the dialog OK button is pressed, a dictionary with the values entered
for each dialog input is returned. If thew dialogs Cancel button is pressed,
None is returned.
:return: dict of session info, or None if dialog was cancelled
'''
info = OrderedDict()
info['Session Code'] = DEFAULT_SESSION_CODE
info['Conditions File'] = getAvailableConditionsFileNames()
# info['ExpName'] =EXP_NAME
# info['ExpVersion'] = EXP_VERSION
infoDlg = gui.DlgFromDict(dictionary=info,
title='{} (v{})'.format(EXP_NAME, EXP_VERSION),
order = info.keys(),
)
# fixed=['ExpName','ExpVersion'])
if infoDlg.OK:
return info
return None
def start_iohub(sess_info):
'''
Starts the iohub server process, using data from the dict returned by
showSessionInfoDialog() to create the hdf5 file name. If the file
already exists, the existing file is renamed so that it is not
overwritten by the current sessions data.
iohub device configuration information is read from an
'iohub_config.yaml' file which must be in the same folder as this file.
The created ioHubConnection object is returned after the iohub
server has started and is ready for interaction with the experiment
runtime.
:param sess_info: dict returned from showSessionInfoDialog()
:return: ioHubConnection object
'''
import os, shutil
save_to = os.path.join(os.path.dirname(__file__),u'results',
sess_info['Session Code'])
save_to = os.path.normpath(save_to)
if not save_to.endswith('.hdf5'):
save_to = save_to+u'.hdf5'
fdir, sess_code = os.path.split(save_to)
if not os.path.exists(fdir):
os.mkdir(fdir)
#TODO: Ask if file should be overwritten, or new session code entered.
si = 1
save_dest = save_to
while os.path.exists(save_dest):
sname, sext = sess_code.rsplit(u'.',1)
save_dest = os.path.join(fdir, u"{}_{}.{}".format(sname,si,sext))
si+=1
if save_dest is not save_to:
shutil.move(save_to,save_dest)
sess_code=sess_code[0:min(len(sess_code),24)]
if sess_code.endswith(u'.hdf5'):
sess_code = sess_code[:-5]
if save_to.endswith(u'.hdf5'):
save_to = save_to[:-5]
kwargs={'experiment_code':EXP_NAME,
'session_code':sess_code,
'datastore_name':save_to,
'iohub_config_name': 'iohub_config.yaml'
}
return launchHubServer(**kwargs)
def saveWintabDeviceHardwareInfo(io):
'''
Save all available wintab device hardware information to the sessions .hdf5
file as a series of experiment message events. This function is called at
the start of the experiment, after the start_iohub() function has returned
the created iohub connection object.
The following areas of information are saved:
* wintab device hardware model information
* the availability, data range, etc, for each axis of the wintab device
* wintab context values read from the C CONTEXT struct at device init
:param io: ioHubConnection instance
:return: None
'''
wtdev = io.devices.tablet
io.sendMessageEvent(text="START WINTAB HW MODEL INFO")
for k, v in wtdev.model.items():
io.sendMessageEvent(text="{}: {}".format(k,v))
io.sendMessageEvent(text="STOP WINTAB HW MODEL INFO")
io.sendMessageEvent(text="START WINTAB AXIS INFO")
for axname, axinfo in wtdev.axis.items():
io.sendMessageEvent(text="{} Axis:".format(axname))
for k, v in axinfo.items():
io.sendMessageEvent(text="{}: {}".format(k,v))
io.sendMessageEvent(text="END WINTAB AXIS INFO")
io.sendMessageEvent(text="START WINTAB CONTEXT INFO")
for k, v in wtdev.context.items():
io.sendMessageEvent(text="{}: {}".format(k,v))
io.sendMessageEvent(text="END WINTAB CONTEXT INFO")
|
isolver/OpenHandWrite
|
distribution/getwrite/experiments/ExperimentTemplate/util.py
|
Python
|
gpl-3.0
| 7,017
|
def add_native_methods(clazz):
def getOGLIdString____():
raise NotImplementedError()
clazz.getOGLIdString____ = staticmethod(getOGLIdString____)
|
laffra/pava
|
pava/implementation/natives/sun/java2d/opengl/OGLContext.py
|
Python
|
mit
| 163
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('io', parent_package, top_path)
config.add_subpackage('tests')
config.add_subpackage('imageformats')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bthirion/nipy
|
nipy/io/setup.py
|
Python
|
bsd-3-clause
| 499
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "helgapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
burk/helgapp
|
manage.py
|
Python
|
mit
| 250
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import
from testutil.dott import feature, sh, testtmp # noqa: F401
# Load extensions
(
sh % "cat"
<< r"""
[extensions]
arcconfig=$TESTDIR/../edenscm/hgext/extlib/phabricator/arcconfig.py
arcdiff=
"""
>> "$HGRCPATH"
)
# Diff with no revision
sh % "hg init repo"
sh % "cd repo"
sh % "touch foo"
sh % "hg add foo"
sh % "hg ci -qm 'No rev'"
sh % "hg diff --since-last-submit" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
# Fake a diff
sh % "echo bleet" > "foo"
sh % "hg ci -qm 'Differential Revision: https://phabricator.fb.com/D1'"
sh % "hg diff --since-last-submit" == r"""
abort: no .arcconfig found
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: no .arcconfig found
[255]"""
# Prep configuration
sh % "echo '{}'" > ".arcrc"
sh % 'echo \'{"config" : {"default" : "https://a.com/api"}, "hosts" : {"https://a.com/api/" : { "user" : "testuser", "oauth" : "garbage_cert"}}}\'' > ".arcconfig"
# Now progressively test the response handling for variations of missing data
sh % "cat" << r"""
[{}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"differential_diffs": {"count": 3},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
# This is the case when the diff is up to date with the current commit;
# there is no diff since what was landed.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"2e6531b7dada2a3e5638e136de05f51e94a427f4\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "2e6531b7dada2a3e5638e136de05f51e94a427f4 Differential Revision: https://phabricator.fb.com/D1"
# This is the case when the diff points at our parent commit, we expect to
# see the bleet text show up. There's a fake hash that I've injected into
# the commit list returned from our mocked phabricator; it is present to
# assert that we order the commits consistently based on the time field.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"88dd5a13bf28b99853a24bddfc93d4c44e07c6bd\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit-2o" == r"""
Phabricator rev: 88dd5a13bf28b99853a24bddfc93d4c44e07c6bd
Local rev: 2e6531b7dada2a3e5638e136de05f51e94a427f4 (.)
Changed: foo
| ...
| +bleet"""
# Make a new commit on top, and then use -r to look at the previous commit
sh % "echo other" > "foo"
sh % "hg commit -m 'Other commmit'"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates -r 2e6531b" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(2e6531b)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
|
facebookexperimental/eden
|
eden/scm/tests/test-fb-hgext-diff-since-last-submit-t.py
|
Python
|
gpl-2.0
| 6,148
|
#!/usr/bin/python
try:
import autotest.common as common
except ImportError:
import common
import unittest
# This has to be done very early.
from autotest.client.shared.settings import settings
settings.override_value('HOSTS', 'default_protection', 'NO_PROTECTION')
from autotest.frontend import setup_django_environment
from autotest.frontend import setup_test_environment
from autotest.frontend.shared import resource_test_utils
from autotest.frontend.afe import control_file, models, model_attributes
class AfeResourceTestCase(resource_test_utils.ResourceTestCase):
URI_PREFIX = 'http://testserver/afe/server/resources'
CONTROL_FILE_CONTENTS = 'my control file contents'
def setUp(self):
super(AfeResourceTestCase, self).setUp()
self._add_additional_data()
def _add_additional_data(self):
models.Test.objects.create(name='mytest',
test_type=model_attributes.TestTypes.SERVER,
path='/path/to/mytest')
class FilteringPagingTest(AfeResourceTestCase):
# we'll arbitrarily choose to use hosts for this
def setUp(self):
super(FilteringPagingTest, self).setUp()
self.labels[0].host_set = [self.hosts[0], self.hosts[1]]
for host in self.hosts[:3]:
host.locked = True
host.save()
def test_simple_filtering(self):
response = self.request('get', 'hosts?locked=true&has_label=label1')
self.check_collection(response, 'hostname', ['host1', 'host2'])
def test_in_filtering(self):
response = self.request('get', 'hosts?hostname:in=host1,host2')
self.check_collection(response, 'hostname', ['host1', 'host2'])
def test_paging(self):
response = self.request('get', 'hosts?start_index=1&items_per_page=2')
self.check_collection(response, 'hostname', ['host2', 'host3'])
self.assertEquals(response['total_results'], 9)
self.assertEquals(response['items_per_page'], 2)
self.assertEquals(response['start_index'], 1)
def test_full_representations(self):
response = self.request(
'get', 'hosts?hostname=host1&full_representations=true')
self.check_collection(response, 'hostname', ['host1'])
host = response['members'][0]
# invalid only included in full representation
self.assertEquals(host['invalid'], False)
class MiscellaneousTest(AfeResourceTestCase):
def test_trailing_slash(self):
response = self.request('get', 'hosts/host1/')
self.assertEquals(response['hostname'], 'host1')
class AtomicGroupClassTest(AfeResourceTestCase):
def test_collection(self):
response = self.request('get', 'atomic_group_classes')
self.check_collection(response, 'name', ['atomic1', 'atomic2'],
length=2)
def test_entry(self):
response = self.request('get', 'atomic_group_classes/atomic1')
self.assertEquals(response['name'], 'atomic1')
self.assertEquals(response['max_number_of_machines'], 2)
def test_labels(self):
self.check_relationship('atomic_group_classes/atomic1', 'labels',
'label', 'name', ['label4', 'label5'])
class LabelTest(AfeResourceTestCase):
def test_collection(self):
response = self.request('get', 'labels')
self.check_collection(response, 'name', ['label1', 'label2'], length=9,
check_number=2)
label1 = self.sorted_by(response['members'], 'name')[0]
self.assertEquals(label1['is_platform'], False)
def test_entry(self):
response = self.request('get', 'labels/label1')
self.assertEquals(response['name'], 'label1')
self.assertEquals(response['is_platform'], False)
self.assertEquals(response['atomic_group_class'], None)
def test_hosts(self):
self.check_relationship('labels/label1', 'hosts', 'host', 'hostname',
['host1'])
class UserTest(AfeResourceTestCase):
def test_collection(self):
response = self.request('get', 'users')
self.check_collection(response, 'username',
['autotest_system', 'debug_user'])
def test_entry(self):
response = self.request('get', 'users/debug_user')
self.assertEquals(response['username'], 'debug_user')
me_response = self.request('get', 'users/@me')
self.assertEquals(response, me_response)
def test_acls(self):
self.check_relationship('users/debug_user', 'acls', 'acl', 'name',
['Everyone', 'my_acl'])
def test_accessible_hosts(self):
group = models.AclGroup.objects.create(name='mygroup')
models.User.objects.get(login='debug_user').aclgroup_set = [group]
self.hosts[0].aclgroup_set = [group]
user = self.request('get', 'users/debug_user')
response = self.request('get', user['accessible_hosts']['href'])
self.check_collection(response, 'hostname', ['host1'])
class AclTest(AfeResourceTestCase):
def test_collection(self):
response = self.request('get', 'acls')
self.check_collection(response, 'name', ['Everyone', 'my_acl'])
def test_entry(self):
response = self.request('get', 'acls/my_acl')
self.assertEquals(response['name'], 'my_acl')
def test_users(self):
self.check_relationship('acls/my_acl', 'users', 'user', 'username',
['autotest_system', 'debug_user'])
def test_hosts(self):
self.check_relationship('acls/my_acl', 'hosts', 'host', 'hostname',
['host1', 'host2'], length=9, check_number=2)
class HostTest(AfeResourceTestCase):
def test_collection(self):
response = self.request('get', 'hosts')
self.check_collection(response, 'hostname', ['host1', 'host2'],
length=9, check_number=2)
host1 = self.sorted_by(response['members'], 'hostname')[0]
self.assertEquals(host1['platform']['name'], 'myplatform')
self.assertEquals(host1['locked'], False)
self.assertEquals(host1['status'], 'Ready')
def test_entry(self):
response = self.request('get', 'hosts/host1')
self.assertEquals(response['protection_level'], 'No protection')
def test_labels(self):
self.check_relationship('hosts/host1', 'labels', 'label', 'name',
['label1', 'myplatform'])
def test_acls(self):
self.check_relationship('hosts/host1', 'acls', 'acl', 'name',
['my_acl'])
def test_queue_entries(self):
self._create_job(hosts=[1])
host = self.request('get', 'hosts/host1')
entries = self.request('get', host['queue_entries']['href'])
self.check_collection(entries, ['job', 'id'], [1])
def test_health_tasks(self):
models.SpecialTask.schedule_special_task(
host=self.hosts[0], task=models.SpecialTask.Task.VERIFY)
host = self.request('get', 'hosts/host1')
tasks = self.request('get', host['health_tasks']['href'])
self.check_collection(tasks, 'task_type', ['Verify'])
def test_put(self):
response = self.request('put', 'hosts/host1', data={'locked': True})
self.assertEquals(response['locked'], True)
response = self.request('get', 'hosts/host1')
self.assertEquals(response['locked'], True)
self.assertEquals(response['locked_by']['username'], 'debug_user')
def test_post(self):
data = {'hostname': 'newhost',
'platform': {'href': self.URI_PREFIX + '/labels/myplatform'},
'protection_level': 'Do not verify'}
response = self.request('post', 'hosts', data=data)
self.assertEquals(response, self.URI_PREFIX + '/hosts/newhost')
host = models.Host.objects.get(hostname='newhost')
self.assertEquals(host.platform().name, 'myplatform')
self.assertEquals(host.protection, models.Host.Protection.DO_NOT_VERIFY)
def _check_labels(self, host, expected_labels):
label_names = sorted(label.name for label in host.labels.all())
self.assertEquals(label_names, sorted(expected_labels))
def test_add_label(self):
labels_href = self.request('get', 'hosts/host1')['labels']['href']
data = {'label': self.URI_PREFIX + '/labels/label2'}
response = self.request('post', labels_href, data=data)
self._check_labels(self.hosts[0], ['label1', 'label2', 'myplatform'])
def test_remove_label(self):
labels_href = self.request('get', 'hosts/host1')['labels']['href']
labels_href += '&label=label1'
labelings = self.request('get', labels_href)['members']
self.assertEquals(len(labelings), 1)
self.request('delete', labelings[0]['href'])
self._check_labels(self.hosts[0], ['myplatform'])
def test_delete(self):
self.request('delete', 'hosts/host1')
hosts = models.Host.valid_objects.filter(hostname='host1')
self.assertEquals(len(hosts), 0)
class TestTest(AfeResourceTestCase): # yes, we're testing the "tests" resource
def test_collection(self):
response = self.request('get', 'tests')
self.check_collection(response, 'name', ['mytest'])
def test_entry(self):
response = self.request('get', 'tests/mytest')
self.assertEquals(response['name'], 'mytest')
self.assertEquals(response['control_file_type'], 'Server')
self.assertEquals(response['control_file_path'], '/path/to/mytest')
def test_dependencies(self):
models.Test.objects.get(name='mytest').dependency_labels = [self.label3]
self.check_relationship('tests/mytest', 'dependencies', 'label', 'name',
['label3'])
class ExecutionInfoTest(AfeResourceTestCase):
def setUp(self):
super(ExecutionInfoTest, self).setUp()
def mock_read_control_file(test):
return self.CONTROL_FILE_CONTENTS
self.god.stub_with(control_file, 'read_control_file',
mock_read_control_file)
def test_get(self):
response = self.request('get', 'execution_info?tests=mytest')
info = response['execution_info']
self.assert_(self.CONTROL_FILE_CONTENTS in info['control_file'])
self.assertEquals(info['is_server'], True)
self.assertEquals(info['machines_per_execution'], 1)
class QueueEntriesRequestTest(AfeResourceTestCase):
def test_get(self):
response = self.request(
'get',
'queue_entries_request?hosts=host1,host2&meta_hosts=label1')
# choose an arbitrary but consistent ordering to ease checking
def entry_href(entry):
if 'host' in entry:
return entry['host']['href']
return entry['meta_host']['href']
entries = sorted(response['queue_entries'], key=entry_href)
expected = [
{'host': {'href': self.URI_PREFIX + '/hosts/host1'}},
{'host': {'href': self.URI_PREFIX + '/hosts/host2'}},
{'meta_host': {'href': self.URI_PREFIX + '/labels/label1'}}]
self.assertEquals(entries, expected)
class JobTest(AfeResourceTestCase):
def setUp(self):
super(JobTest, self).setUp()
for _ in xrange(2):
self._create_job(hosts=[1, 2])
job = models.Job.objects.get(id=1)
job.control_file = self.CONTROL_FILE_CONTENTS
job.save()
models.JobKeyval.objects.create(job=job, key='mykey', value='myvalue')
def test_collection(self):
response = self.request('get', 'jobs')
self.check_collection(response, 'id', [1, 2])
def test_keyval_filtering(self):
response = self.request('get', 'jobs?has_keyval=mykey=myvalue')
self.check_collection(response, 'id', [1])
def test_entry(self):
response = self.request('get', 'jobs/1')
self.assertEquals(response['id'], 1)
self.assertEquals(response['name'], 'test')
self.assertEquals(response['keyvals'], {'mykey': 'myvalue'})
info = response['execution_info']
self.assertEquals(info['control_file'], self.CONTROL_FILE_CONTENTS)
self.assertEquals(info['is_server'], False)
self.assertEquals(info['cleanup_before_job'], 'Never')
self.assertEquals(info['cleanup_after_job'], 'Always')
self.assertEquals(info['machines_per_execution'], 1)
self.assertEquals(info['run_verify'], True)
def test_queue_entries(self):
job = self.request('get', 'jobs/1')
entries = self.request('get', job['queue_entries']['href'])
self.check_collection(entries, ['host', 'hostname'], ['host1', 'host2'])
def _test_post_helper(self, owner):
data = {'name': 'myjob',
'execution_info': {'control_file': self.CONTROL_FILE_CONTENTS,
'is_server': True},
'owner': owner,
'drone_set': models.DroneSet.default_drone_set_name(),
'queue_entries':
[{'host': {'href': self.URI_PREFIX + '/hosts/host1'}},
{'host': {'href': self.URI_PREFIX + '/hosts/host2'}}]}
response = self.request('post', 'jobs', data=data)
self.assertEquals(response, self.URI_PREFIX + '/jobs/3')
job = models.Job.objects.get(id=3)
self.assertEquals(job.name, 'myjob')
self.assertEquals(job.control_file, self.CONTROL_FILE_CONTENTS)
self.assertEquals(job.control_type, models.Job.ControlType.SERVER)
entries = job.hostqueueentry_set.order_by('host__hostname')
self.assertEquals(entries[0].host.hostname, 'host1')
self.assertEquals(entries[1].host.hostname, 'host2')
owner_test = owner
if not owner_test:
owner_test = models.User.current_user().login
self.assertEquals(job.owner, owner_test)
def test_post_no_owner(self):
self._test_post_helper(None)
def test_post_with_owner(self):
self._test_post_helper('job_owner')
class DirectoryTest(AfeResourceTestCase):
def test_get(self):
response = self.request('get', '')
for key in ('atomic_group_classes', 'labels', 'users', 'acl_groups',
'hosts', 'tests', 'jobs', 'execution_info',
'queue_entries_request'):
self.assert_(key in response)
if __name__ == '__main__':
unittest.main()
|
joyxu/autotest
|
frontend/afe/resources_unittest.py
|
Python
|
gpl-2.0
| 14,636
|
from .voxel_dir import task_dir, storage_dir, image_dir
|
andyneff/voxel-globe
|
voxel_globe/tools/__init__.py
|
Python
|
mit
| 55
|
from defaults import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test_db',
}
}
|
AlexandreProenca/backend-morandofloripa
|
morandofloripa/settings/tests.py
|
Python
|
mit
| 157
|
#!/usr/bin/env python2
# Copyright (c) 2014 The FacileCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework import FacileCoinTestFramework
from util import *
import json
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def http_get_call(host, port, path, response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (FacileCoinTestFramework):
FORMAT_SEPARATOR = "."
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
bb_hash = self.nodes[0].getbestblockhash()
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check json format
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['hash'], bb_hash)
# do tx test
tx_hash = json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# now mine the transactions
newblockhash = self.nodes[1].setgenerate(True, 1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
if __name__ == '__main__':
RESTTest ().main ()
|
facilecoin/facilecoin-core
|
qa/rpc-tests/rest.py
|
Python
|
mit
| 3,267
|
# XXX: This file is copied directly from https://github.com/fuhrysteve/marshmallow-jsonschema
# If changes are made here please make sure that they are also available upstream.
import datetime
import uuid
import decimal
from marshmallow import fields, missing
from marshmallow.compat import text_type, binary_type
__all__ = ['dump_schema']
_RECURSIVE_NESTED = 'self'
TYPE_MAP = {
dict: {
'type': 'object',
},
list: {
'type': 'array',
},
datetime.time: {
'type': 'string',
'format': 'time',
},
datetime.timedelta: {
# TODO explore using 'range'?
'type': 'string',
},
datetime.datetime: {
'type': 'string',
'format': 'date-time',
},
datetime.date: {
'type': 'string',
'format': 'date',
},
uuid.UUID: {
'type': 'string',
'format': 'uuid',
},
text_type: {
'type': 'string',
},
binary_type: {
'type': 'string',
},
decimal.Decimal: {
'type': 'number',
'format': 'decimal',
},
set: {
'type': 'array',
},
tuple: {
'type': 'array',
},
float: {
'type': 'number',
'format': 'float',
},
int: {
'type': 'number',
'format': 'integer',
},
bool: {
'type': 'boolean',
},
}
def dump_schema(schema_obj):
json_schema = {
"type": "object",
"properties": {},
"required": [],
}
mapping = {v: k for k, v in schema_obj.TYPE_MAPPING.items()}
mapping[fields.Email] = text_type
mapping[fields.Dict] = dict
mapping[fields.List] = list
mapping[fields.Url] = text_type
mapping[fields.LocalDateTime] = datetime.datetime
for field_name, field in sorted(schema_obj.fields.items()):
schema = None
if field.__class__ in mapping:
pytype = mapping[field.__class__]
schema = _from_python_type(field, pytype)
elif isinstance(field, fields.Nested):
schema = _from_nested_schema(field)
elif issubclass(field.__class__, fields.Field):
for cls in mapping.keys():
if issubclass(field.__class__, cls):
pytype = mapping[cls]
schema = _from_python_type(field, pytype)
break
if schema is None:
raise ValueError('unsupported field type %s' % field)
field_name = field.dump_to or field.name
json_schema['properties'][field_name] = schema
if field.required:
json_schema['required'].append(field.name)
return json_schema
def _from_python_type(field, pytype):
json_schema = {
'title': field.attribute or field.name,
}
for key, val in TYPE_MAP[pytype].items():
json_schema[key] = val
if field.default is not missing:
json_schema['default'] = field.default
return json_schema
def _from_nested_schema(field):
if field.nested == _RECURSIVE_NESTED:
parent_class = field.parent.__class__
nested = parent_class(many=field.many, only=field.only, exclude=field.exclude)
else:
nested = field.nested()
schema = dump_schema(nested)
if field.many:
schema = {
'type': ["array"] if field.required else ['array', 'null'],
'items': schema
}
return schema
|
deliveryhero/lymph-schema
|
lymph/schema/_jsonschema.py
|
Python
|
apache-2.0
| 3,409
|
import logging
from time import sleep
from celery.result import AsyncResult
from django.core.management import BaseCommand
from delft3dcontainermanager.tasks import do_docker_remove, get_docker_ps
from delft3dworker.models import Container, Scene
"""
Synchronization command that's called periodically.
- Run docker ps (celery task)
- Loop over container models and compare with the output of docker ps
- Missing container model (orphan) -> Error, stop container
- For the other container run container.update_state(docker_ps)
- Finally loop over the scene models and call update_state()
"""
class Command(BaseCommand):
help = "sync containers with container and scene model"
def handle(self, *args, **options):
# STEP I : Loop over non empty celery_task_ids in containers
# Sets task_uuid to None except for when a task is queued
# Queued for log, no start? expire gebruiken
self._update_container_tasks()
# STEP II : Get latest container statuses
if self._get_latest_docker_status():
# STEP III : Update Scenes and their Phases
# Controls container desired states
self._update_scene_phases()
# STEP IV : Synchronise Container Models with docker containers
self._fix_container_state_mismatches_or_log()
def _update_container_tasks(self):
"""
Update Containers with results from finished tasks.
"""
celery_set = set(Container.objects.exclude(task_uuid__exact=None))
for container in celery_set:
container.update_task_result()
def _get_latest_docker_status(self):
"""
Synchronise local Django Container models with remote Docker containers
"""
ps = get_docker_ps.apply_async(queue="priority")
# Wait until the task finished successfully
# or return if waiting too long
checked = 0
while not ps.successful():
sleep(1)
# if things take too long, revoke the task and return
checked += 1
if checked >= 30:
ps.revoke()
return False
# task is succesful, so we're getting the result and create a set
containers_docker = ps.result
docker_dict = {x["Id"]: x for x in containers_docker}
docker_set = set(docker_dict.keys())
# retrieve container from database
container_set = set(
Container.objects.exclude(docker_id__exact="").values_list(
"docker_id", flat=True
)
)
# Work out matching matrix
# docker yes no
# model x
# yes 1_1 1_0
# no 0_1 0_0
#
m_1_1 = container_set & docker_set
m_1_0 = container_set - docker_set
m_0_1 = docker_set - container_set
m_0_0 = (
(docker_set | container_set)
- (docker_set ^ container_set)
- (docker_set & container_set)
)
# Update state of all matching containers
container_match = m_1_1 | m_1_0
for con_id in container_match:
snapshot = docker_dict[con_id] if con_id in docker_dict else None
for c in Container.objects.filter(docker_id=con_id):
c.update_from_docker_snapshot(snapshot)
# Call error for mismatch
container_mismatch = m_0_1 | m_0_0
for container in container_mismatch:
info = docker_dict[container]
if (
"Config" in info
and "Labels" in info["Config"]
and "type" in info["Config"]["Labels"]
):
type = info["Config"]["Labels"]["type"]
choices = Container.CONTAINER_TYPE_CHOICES
if type in [choice[0] for choice in choices]:
msg = "Docker container {} not found in database!".format(container)
self.stderr.write(msg)
do_docker_remove.delay(container, force=True)
else:
logging.info("Found non-delft3dgt docker container, ignoring.")
return True # successful
def _update_scene_phases(self):
"""
Update Scenes with latest status of their Containers, and possibly
shift Scene phase
"""
# ordering is done on start date (first, and id second):
# if a simulation slot is available, we want simulations to start
# in order of their date_started
for scene in Scene.objects.all().order_by("date_started", "id"):
scene.update_and_phase_shift()
def _fix_container_state_mismatches_or_log(self):
for container in Container.objects.all():
container.fix_mismatch_or_log()
|
openearth/delft3d-gt-server
|
delft3dworker/management/commands/containersync_sceneupdate.py
|
Python
|
gpl-3.0
| 4,813
|
from lnst.Common.Utils import bool_it
from lnst.Controller.Task import ctl
from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
from lnst.Controller.PerfRepoUtils import netperf_result_template
from lnst.RecipeCommon.ModuleWrap import ping, ping6, netperf
from lnst.RecipeCommon.IRQ import pin_dev_irqs
from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
# ------
# SETUP
# ------
mapping_file = ctl.get_alias("mapping_file")
perf_api = ctl.connect_PerfRepo(mapping_file)
product_name = ctl.get_alias("product_name")
m1 = ctl.get_host("testmachine1")
m2 = ctl.get_host("testmachine2")
m1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
m2.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
# ------
# TESTS
# ------
ipv = ctl.get_alias("ipv")
mtu = ctl.get_alias("mtu")
netperf_duration = int(ctl.get_alias("netperf_duration"))
nperf_reserve = int(ctl.get_alias("nperf_reserve"))
nperf_confidence = ctl.get_alias("nperf_confidence")
nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
nperf_cpupin = ctl.get_alias("nperf_cpupin")
nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
nperf_msg_size = ctl.get_alias("nperf_msg_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
nperf_protocols = ctl.get_alias("nperf_protocols")
official_result = bool_it(ctl.get_alias("official_result"))
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
test_if1 = m1.get_interface("test_if")
test_if1.set_mtu(mtu)
test_if2 = m2.get_interface("test_if")
test_if2.set_mtu(mtu)
if nperf_cpupin:
m1.run("service irqbalance stop")
m2.run("service irqbalance stop")
m1_phy1 = m1.get_interface("eth")
m2_phy1 = m2.get_interface("eth")
dev_list = [(m1, m1_phy1), (m2, m2_phy1)]
# this will pin devices irqs to cpu #0
for m, d in dev_list:
pin_dev_irqs(m, d, 0)
nperf_opts = ""
if nperf_cpupin and nperf_num_parallel == 1:
nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
ctl.wait(15)
ping_opts = {"count": 100, "interval": 0.1}
client_opts = {"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"num_parallel" : nperf_num_parallel,
"cpu_util" : nperf_cpu_util,
"runs": nperf_max_runs,
"netperf_opts": nperf_opts,
"debug": nperf_debug,
"max_deviation": nperf_max_dev}
if nperf_msg_size is not None:
client_opts["msg_size"] = nperf_msg_size
if ipv in [ 'ipv4', 'both' ]:
ping((m1, test_if1, 0, {"scope": 0}),
(m2, test_if2, 0, {"scope": 0}),
options=ping_opts)
ctl.wait(2)
# prepare PerfRepo result for tcp
if nperf_protocols.find("tcp") > -1:
result_tcp = perf_api.new_result("tcp_ipv4_id",
"tcp_ipv4_result",
hash_ignore=[
r'kernel_release',
r'redhat_release',
r'test_if\.hwaddr'])
result_tcp.add_tag(product_name)
if nperf_num_parallel > 1:
result_tcp.add_tag("multithreaded")
result_tcp.set_parameter('num_parallel', nperf_num_parallel)
if nperf_msg_size is not None:
result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
baseline = perf_api.get_baseline_of_result(result_tcp)
baseline = perfrepo_baseline_to_dict(baseline)
client_opts["testname"] = "TCP_STREAM"
client_opts["netperf_opts"] = nperf_opts
tcp_res_data = netperf((m1, test_if1, 0, {"scope": 0}),
(m2, test_if2, 0, {"scope": 0}),
client_opts = client_opts, baseline = baseline,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
netperf_result_template(result_tcp, tcp_res_data)
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp, official_result)
if nperf_protocols.find("udp") > -1:
# prepare PerfRepo result for udp
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
hash_ignore=[
r'kernel_release',
r'redhat_release',
r'test_if\.hwaddr'])
result_udp.add_tag(product_name)
if nperf_num_parallel > 1:
result_udp.add_tag("multithreaded")
result_udp.set_parameter('num_parallel', nperf_num_parallel)
if nperf_msg_size is not None:
result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
baseline = perf_api.get_baseline_of_result(result_udp)
baseline = perfrepo_baseline_to_dict(baseline)
client_opts["testname"] = "UDP_STREAM"
client_opts["netperf_opts"] = nperf_opts
udp_res_data = netperf((m1, test_if1, 0, {"scope": 0}),
(m2, test_if2, 0, {"scope": 0}),
client_opts = client_opts, baseline = baseline,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
netperf_result_template(result_udp, udp_res_data)
result_udp.set_comment(pr_comment)
perf_api.save_result(result_udp, official_result)
if ipv in [ 'ipv6', 'both' ]:
ping6((m1, test_if1, 1, {"scope": 0}),
(m2, test_if2, 1, {"scope": 0}),
options=ping_opts)
if nperf_protocols.find("tcp") > -1:
# prepare PerfRepo result for tcp ipv6
result_tcp = perf_api.new_result("tcp_ipv6_id",
"tcp_ipv6_result",
hash_ignore=[
r'kernel_release',
r'redhat_release',
r'test_if\.hwaddr'])
result_tcp.add_tag(product_name)
if nperf_num_parallel > 1:
result_tcp.add_tag("multithreaded")
result_tcp.set_parameter('num_parallel', nperf_num_parallel)
if nperf_msg_size is not None:
result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
baseline = perf_api.get_baseline_of_result(result_tcp)
baseline = perfrepo_baseline_to_dict(baseline)
client_opts["testname"] = "TCP_STREAM"
client_opts["netperf_opts"] = nperf_opts + " -6"
tcp_res_data = netperf((m1, test_if1, 1, {"scope": 0}),
(m2, test_if2, 1, {"scope": 0}),
client_opts = client_opts, baseline = baseline,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
netperf_result_template(result_tcp, tcp_res_data)
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp, official_result)
if nperf_protocols.find("udp") > -1:
# prepare PerfRepo result for udp ipv6
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=[
r'kernel_release',
r'redhat_release',
r'test_if\.hwaddr'])
result_udp.add_tag(product_name)
if nperf_num_parallel > 1:
result_udp.add_tag("multithreaded")
result_udp.set_parameter('num_parallel', nperf_num_parallel)
if nperf_msg_size is not None:
result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
baseline = perf_api.get_baseline_of_result(result_udp)
baseline = perfrepo_baseline_to_dict(baseline)
client_opts["testname"] = "UDP_STREAM"
client_opts["netperf_opts"] = nperf_opts + " -6"
udp_res_data = netperf((m1, test_if1, 1, {"scope": 0}),
(m2, test_if2, 1, {"scope": 0}),
client_opts = client_opts, baseline = baseline,
timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
netperf_result_template(result_udp, udp_res_data)
result_udp.set_comment(pr_comment)
perf_api.save_result(result_udp, official_result)
if nperf_cpupin:
m1.run("service irqbalance start")
m2.run("service irqbalance start")
|
jiriprochazka/lnst
|
recipes/regression_tests/phase3/vxlan_remote.py
|
Python
|
gpl-2.0
| 8,824
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class APIError(Model):
"""Error information returned by the API.
:param code: The error code.
:type code: object
:param message: A message explaining the error reported by the service.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'object'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, *, code=None, message: str=None, **kwargs) -> None:
super(APIError, self).__init__(**kwargs)
self.code = code
self.message = message
class APIErrorException(HttpOperationError):
"""Server responded with exception of type: 'APIError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(APIErrorException, self).__init__(deserialize, response, 'APIError', *args)
class ChangePointDetectRequest(Model):
"""ChangePointDetectRequest.
All required parameters must be populated in order to send to Azure.
:param series: Required. Time series data points. Points should be sorted
by timestamp in ascending order to match the change point detection
result.
:type series: list[~azure.cognitiveservices.anomalydetector.models.Point]
:param granularity: Required. Can only be one of yearly, monthly, weekly,
daily, hourly, minutely or secondly. Granularity is used for verify
whether input series is valid. Possible values include: 'yearly',
'monthly', 'weekly', 'daily', 'hourly', 'minutely', 'secondly'
:type granularity: str or
~azure.cognitiveservices.anomalydetector.models.Granularity
:param custom_interval: Custom Interval is used to set non-standard time
interval, for example, if the series is 5 minutes, request can be set as
{"granularity":"minutely", "customInterval":5}.
:type custom_interval: int
:param period: Optional argument, periodic value of a time series. If the
value is null or does not present, the API will determine the period
automatically.
:type period: int
:param stable_trend_window: Optional argument, advanced model parameter, a
default stableTrendWindow will be used in detection.
:type stable_trend_window: int
:param threshold: Optional argument, advanced model parameter, between
0.0-1.0, the lower the value is, the larger the trend error will be which
means less change point will be accepted.
:type threshold: float
"""
_validation = {
'series': {'required': True},
'granularity': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': '[Point]'},
'granularity': {'key': 'granularity', 'type': 'Granularity'},
'custom_interval': {'key': 'customInterval', 'type': 'int'},
'period': {'key': 'period', 'type': 'int'},
'stable_trend_window': {'key': 'stableTrendWindow', 'type': 'int'},
'threshold': {'key': 'threshold', 'type': 'float'},
}
def __init__(self, *, series, granularity, custom_interval: int=None, period: int=None, stable_trend_window: int=None, threshold: float=None, **kwargs) -> None:
super(ChangePointDetectRequest, self).__init__(**kwargs)
self.series = series
self.granularity = granularity
self.custom_interval = custom_interval
self.period = period
self.stable_trend_window = stable_trend_window
self.threshold = threshold
class ChangePointDetectResponse(Model):
"""ChangePointDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means
no recurrent pattern has been found.
:type period: int
:param is_change_point: Required. isChangePoint contains change point
properties for each input point. True means an anomaly either negative or
positive has been detected. The index of the array is consistent with the
input series.
:type is_change_point: list[bool]
:param confidence_scores: Required. the change point confidence of each
point
:type confidence_scores: list[float]
"""
_validation = {
'period': {'required': True},
'is_change_point': {'required': True},
'confidence_scores': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'is_change_point': {'key': 'isChangePoint', 'type': '[bool]'},
'confidence_scores': {'key': 'confidenceScores', 'type': '[float]'},
}
def __init__(self, *, period: int, is_change_point, confidence_scores, **kwargs) -> None:
super(ChangePointDetectResponse, self).__init__(**kwargs)
self.period = period
self.is_change_point = is_change_point
self.confidence_scores = confidence_scores
class EntireDetectResponse(Model):
"""EntireDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means
no recurrent pattern has been found.
:type period: int
:param expected_values: Required. ExpectedValues contain expected value
for each input point. The index of the array is consistent with the input
series.
:type expected_values: list[float]
:param upper_margins: Required. UpperMargins contain upper margin of each
input point. UpperMargin is used to calculate upperBoundary, which equals
to expectedValue + (100 - marginScale)*upperMargin. Anomalies in response
can be filtered by upperBoundary and lowerBoundary. By adjusting
marginScale value, less significant anomalies can be filtered in client
side. The index of the array is consistent with the input series.
:type upper_margins: list[float]
:param lower_margins: Required. LowerMargins contain lower margin of each
input point. LowerMargin is used to calculate lowerBoundary, which equals
to expectedValue - (100 - marginScale)*lowerMargin. Points between the
boundary can be marked as normal ones in client side. The index of the
array is consistent with the input series.
:type lower_margins: list[float]
:param is_anomaly: Required. IsAnomaly contains anomaly properties for
each input point. True means an anomaly either negative or positive has
been detected. The index of the array is consistent with the input series.
:type is_anomaly: list[bool]
:param is_negative_anomaly: Required. IsNegativeAnomaly contains anomaly
status in negative direction for each input point. True means a negative
anomaly has been detected. A negative anomaly means the point is detected
as an anomaly and its real value is smaller than the expected one. The
index of the array is consistent with the input series.
:type is_negative_anomaly: list[bool]
:param is_positive_anomaly: Required. IsPositiveAnomaly contain anomaly
status in positive direction for each input point. True means a positive
anomaly has been detected. A positive anomaly means the point is detected
as an anomaly and its real value is larger than the expected one. The
index of the array is consistent with the input series.
:type is_positive_anomaly: list[bool]
"""
_validation = {
'period': {'required': True},
'expected_values': {'required': True},
'upper_margins': {'required': True},
'lower_margins': {'required': True},
'is_anomaly': {'required': True},
'is_negative_anomaly': {'required': True},
'is_positive_anomaly': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'expected_values': {'key': 'expectedValues', 'type': '[float]'},
'upper_margins': {'key': 'upperMargins', 'type': '[float]'},
'lower_margins': {'key': 'lowerMargins', 'type': '[float]'},
'is_anomaly': {'key': 'isAnomaly', 'type': '[bool]'},
'is_negative_anomaly': {'key': 'isNegativeAnomaly', 'type': '[bool]'},
'is_positive_anomaly': {'key': 'isPositiveAnomaly', 'type': '[bool]'},
}
def __init__(self, *, period: int, expected_values, upper_margins, lower_margins, is_anomaly, is_negative_anomaly, is_positive_anomaly, **kwargs) -> None:
super(EntireDetectResponse, self).__init__(**kwargs)
self.period = period
self.expected_values = expected_values
self.upper_margins = upper_margins
self.lower_margins = lower_margins
self.is_anomaly = is_anomaly
self.is_negative_anomaly = is_negative_anomaly
self.is_positive_anomaly = is_positive_anomaly
class LastDetectResponse(Model):
"""LastDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means
no recurrent pattern has been found.
:type period: int
:param suggested_window: Required. Suggested input series points needed
for detecting the latest point.
:type suggested_window: int
:param expected_value: Required. Expected value of the latest point.
:type expected_value: float
:param upper_margin: Required. Upper margin of the latest point.
UpperMargin is used to calculate upperBoundary, which equals to
expectedValue + (100 - marginScale)*upperMargin. If the value of latest
point is between upperBoundary and lowerBoundary, it should be treated as
normal value. By adjusting marginScale value, anomaly status of latest
point can be changed.
:type upper_margin: float
:param lower_margin: Required. Lower margin of the latest point.
LowerMargin is used to calculate lowerBoundary, which equals to
expectedValue - (100 - marginScale)*lowerMargin.
:type lower_margin: float
:param is_anomaly: Required. Anomaly status of the latest point, true
means the latest point is an anomaly either in negative direction or
positive direction.
:type is_anomaly: bool
:param is_negative_anomaly: Required. Anomaly status in negative direction
of the latest point. True means the latest point is an anomaly and its
real value is smaller than the expected one.
:type is_negative_anomaly: bool
:param is_positive_anomaly: Required. Anomaly status in positive direction
of the latest point. True means the latest point is an anomaly and its
real value is larger than the expected one.
:type is_positive_anomaly: bool
"""
_validation = {
'period': {'required': True},
'suggested_window': {'required': True},
'expected_value': {'required': True},
'upper_margin': {'required': True},
'lower_margin': {'required': True},
'is_anomaly': {'required': True},
'is_negative_anomaly': {'required': True},
'is_positive_anomaly': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'suggested_window': {'key': 'suggestedWindow', 'type': 'int'},
'expected_value': {'key': 'expectedValue', 'type': 'float'},
'upper_margin': {'key': 'upperMargin', 'type': 'float'},
'lower_margin': {'key': 'lowerMargin', 'type': 'float'},
'is_anomaly': {'key': 'isAnomaly', 'type': 'bool'},
'is_negative_anomaly': {'key': 'isNegativeAnomaly', 'type': 'bool'},
'is_positive_anomaly': {'key': 'isPositiveAnomaly', 'type': 'bool'},
}
def __init__(self, *, period: int, suggested_window: int, expected_value: float, upper_margin: float, lower_margin: float, is_anomaly: bool, is_negative_anomaly: bool, is_positive_anomaly: bool, **kwargs) -> None:
super(LastDetectResponse, self).__init__(**kwargs)
self.period = period
self.suggested_window = suggested_window
self.expected_value = expected_value
self.upper_margin = upper_margin
self.lower_margin = lower_margin
self.is_anomaly = is_anomaly
self.is_negative_anomaly = is_negative_anomaly
self.is_positive_anomaly = is_positive_anomaly
class Point(Model):
"""Point.
All required parameters must be populated in order to send to Azure.
:param timestamp: Required. Timestamp of a data point (ISO8601 format).
:type timestamp: datetime
:param value: Required. The measurement of that point, should be float.
:type value: float
"""
_validation = {
'timestamp': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'float'},
}
def __init__(self, *, timestamp, value: float, **kwargs) -> None:
super(Point, self).__init__(**kwargs)
self.timestamp = timestamp
self.value = value
class Request(Model):
"""Request.
All required parameters must be populated in order to send to Azure.
:param series: Required. Time series data points. Points should be sorted
by timestamp in ascending order to match the anomaly detection result. If
the data is not sorted correctly or there is duplicated timestamp, the API
will not work. In such case, an error message will be returned.
:type series: list[~azure.cognitiveservices.anomalydetector.models.Point]
:param granularity: Required. Possible values include: 'yearly',
'monthly', 'weekly', 'daily', 'hourly', 'minutely', 'secondly'
:type granularity: str or
~azure.cognitiveservices.anomalydetector.models.Granularity
:param custom_interval: Custom Interval is used to set non-standard time
interval, for example, if the series is 5 minutes, request can be set as
{"granularity":"minutely", "customInterval":5}.
:type custom_interval: int
:param period: Optional argument, periodic value of a time series. If the
value is null or does not present, the API will determine the period
automatically.
:type period: int
:param max_anomaly_ratio: Optional argument, advanced model parameter, max
anomaly ratio in a time series.
:type max_anomaly_ratio: float
:param sensitivity: Optional argument, advanced model parameter, between
0-99, the lower the value is, the larger the margin value will be which
means less anomalies will be accepted.
:type sensitivity: int
"""
_validation = {
'series': {'required': True},
'granularity': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': '[Point]'},
'granularity': {'key': 'granularity', 'type': 'Granularity'},
'custom_interval': {'key': 'customInterval', 'type': 'int'},
'period': {'key': 'period', 'type': 'int'},
'max_anomaly_ratio': {'key': 'maxAnomalyRatio', 'type': 'float'},
'sensitivity': {'key': 'sensitivity', 'type': 'int'},
}
def __init__(self, *, series, granularity, custom_interval: int=None, period: int=None, max_anomaly_ratio: float=None, sensitivity: int=None, **kwargs) -> None:
super(Request, self).__init__(**kwargs)
self.series = series
self.granularity = granularity
self.custom_interval = custom_interval
self.period = period
self.max_anomaly_ratio = max_anomaly_ratio
self.sensitivity = sensitivity
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-anomalydetector/azure/cognitiveservices/anomalydetector/models/_models_py3.py
|
Python
|
mit
| 16,053
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Audio.filetype'
db.add_column('filer_audio', 'filetype',
self.gf('django.db.models.fields.CharField')(max_length=12, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Audio.filetype'
db.delete_column('filer_audio', 'filetype')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.audio': {
'Meta': {'object_name': 'Audio', '_ormbases': ['filer.File']},
'bits_per_sample': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'sample_rate': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'seconds_length': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_frames': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'filer.clipboard': {
'Meta': {'object_name': 'Clipboard'},
'files': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'in_clipboards'", 'symmetrical': 'False', 'through': "orm['filer.ClipboardItem']", 'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filer_clipboards'", 'to': "orm['auth.User']"})
},
'filer.clipboarditem': {
'Meta': {'object_name': 'ClipboardItem'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Clipboard']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folderpermission': {
'Meta': {'object_name': 'FolderPermission'},
'can_add_children': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'everybody': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Folder']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.User']"})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['filer']
|
hzlf/openbroadcast
|
website/__filer/migrations/0014_auto__add_field_audio_filetype.py
|
Python
|
gpl-3.0
| 11,114
|
import sys, os, glob, zipfile
ROOT = 'apps/Tasks/Release/'
FILES = [
'tasks.exe',
'locale/Tasks.*'
]
def paths():
for FILE in FILES:
for name in glob.iglob(os.path.join(ROOT, FILE)):
yield (name, os.path.join('bin', os.path.relpath(name, ROOT)))
with zipfile.ZipFile(sys.argv[1], 'w') as zip:
for src, dst in paths(): zip.write(src, dst)
zip.write('msbuild.log')
|
mbits-os/JiraDesktop
|
scripts/jenkins_win32.py
|
Python
|
mit
| 394
|
#!/usr/bin/env python
"""AFF4 RDFValue implementations.
This module contains the various RDFValue implementations.
"""
|
darrenbilby/grr
|
lib/rdfvalues/__init__.py
|
Python
|
apache-2.0
| 121
|
from django.contrib import admin
from manoseimas.scrapy import services
from manoseimas.scrapy.models import Question
from manoseimas.scrapy.models import Person
from manoseimas.scrapy.models import Voting
from manoseimas.scrapy.models import PersonVote
class VotingAdmin(admin.ModelAdmin):
list_display = ('title', 'name', 'timestamp', 'documents', 'votes', 'source')
search_fields = ('source',)
def title(self, obj):
return obj.get_title()
def documents(self, obj):
return len(obj.value['documents'])
def votes(self, obj):
return len(obj.value['votes']) if 'votes' in obj.value else None
def get_form(self, request, obj=None, **kwargs):
if obj is None:
kwargs['fields'] = ('source',)
return super(VotingAdmin, self).get_form(request, obj, **kwargs)
def save_form(self, request, form, change):
if change:
return super(VotingAdmin, self).save_form(request, form, change)
source = form.cleaned_data['source']
return services.crawl_voting(source)
def save_model(self, request, obj, form, change):
if change:
return super(VotingAdmin, self).save_model(request, obj, form, change)
def save_related(self, request, form, formsets, change):
if change:
return super(VotingAdmin, self).save_related(request, form, formsets, change)
class PersonVoteAdmin(admin.ModelAdmin):
list_display = ('p_asm_id', 'name', 'vote', 'value', 'fraction')
list_filter = ('name',)
admin.site.register(Question)
admin.site.register(Person)
admin.site.register(Voting, VotingAdmin)
admin.site.register(PersonVote, PersonVoteAdmin)
|
ManoSeimas/manoseimas.lt
|
manoseimas/scrapy/admin.py
|
Python
|
agpl-3.0
| 1,687
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class DigitekaIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:www\.)?(?:digiteka\.net|ultimedia\.com)/
(?:
deliver/
(?P<embed_type>
generic|
musique
)
(?:/[^/]+)*/
(?:
src|
article
)|
default/index/video
(?P<site_type>
generic|
music
)
/id
)/(?P<id>[\d+a-z]+)'''
_TESTS = [{
# news
'url': 'https://www.ultimedia.com/default/index/videogeneric/id/s8uk0r',
'md5': '276a0e49de58c7e85d32b057837952a2',
'info_dict': {
'id': 's8uk0r',
'ext': 'mp4',
'title': 'Loi sur la fin de vie: le texte prévoit un renforcement des directives anticipées',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 74,
'upload_date': '20150317',
'timestamp': 1426604939,
'uploader_id': '3fszv',
},
}, {
# music
'url': 'https://www.ultimedia.com/default/index/videomusic/id/xvpfp8',
'md5': '2ea3513813cf230605c7e2ffe7eca61c',
'info_dict': {
'id': 'xvpfp8',
'ext': 'mp4',
'title': 'Two - C\'est La Vie (clip)',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 233,
'upload_date': '20150224',
'timestamp': 1424760500,
'uploader_id': '3rfzk',
},
}, {
'url': 'https://www.digiteka.net/deliver/generic/iframe/mdtk/01637594/src/lqm3kl/zone/1/showtitle/1/autoplay/yes',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<(?:iframe|script)[^>]+src=["\'](?P<url>(?:https?:)?//(?:www\.)?ultimedia\.com/deliver/(?:generic|musique)(?:/[^/]+)*/(?:src|article)/[\d+a-z]+)',
webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_type = mobj.group('embed_type') or mobj.group('site_type')
if video_type == 'music':
video_type = 'musique'
deliver_info = self._download_json(
'http://www.ultimedia.com/deliver/video?video=%s&topic=%s' % (video_id, video_type),
video_id)
yt_id = deliver_info.get('yt_id')
if yt_id:
return self.url_result(yt_id, 'Youtube')
jwconf = deliver_info['jwconf']
formats = []
for source in jwconf['playlist'][0]['sources']:
formats.append({
'url': source['file'],
'format_id': source.get('label'),
})
self._sort_formats(formats)
title = deliver_info['title']
thumbnail = jwconf.get('image')
duration = int_or_none(deliver_info.get('duration'))
timestamp = int_or_none(deliver_info.get('release_time'))
uploader_id = deliver_info.get('owner_id')
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'uploader_id': uploader_id,
'formats': formats,
}
|
mxamin/youtube-dl
|
youtube_dl/extractor/digiteka.py
|
Python
|
unlicense
| 3,507
|
class Command(object):
def __init__(self, args=None):
self.args = args
def __str__(self):
return '{0}'.format(self.__class__.__name__)
class GetState(Command):
pass
class Stop(Command):
pass
class GetProperty(Command):
def __init__(self, prop=None):
self.prop = prop
def __str__(self):
return '{0}:{1}'.format(self.__class__.__name__, self.prop)
class SetProperty(Command):
def __init__(self, prop, value):
self.prop = prop
self.value = value
def __str__(self):
return '{0}:{1}={2}'.format(self.__class__.__name__, self.prop, self.value)
class CallCommand(Command):
def __init__(self, cmd, args):
self.command = cmd
self.args = args
def __str__(self):
return '{0}:{1} with args={2}'.format(self.__class__.__name__, self.command, self.args)
|
unix-beard/gloria
|
service/command.py
|
Python
|
mit
| 874
|
from __future__ import unicode_literals
from prompt_toolkit.completion import Completer, Completion
import os
__all__ = (
'PathCompleter',
'ExecutableCompleter',
)
class PathCompleter(Completer):
"""
Complete for Path variables.
:param get_paths: Callable which returns a list of directories to look into
when the user enters a relative path.
:param file_filter: Callable which takes a filename and returns whether
this file should show up in the completion. ``None``
when no filtering has to be done.
:param min_input_len: Don't do autocompletion when the input string is shorter.
"""
def __init__(self, only_directories=False, get_paths=None, file_filter=None,
min_input_len=0, expanduser=False):
assert get_paths is None or callable(get_paths)
assert file_filter is None or callable(file_filter)
assert isinstance(min_input_len, int)
assert isinstance(expanduser, bool)
self.only_directories = only_directories
self.get_paths = get_paths or (lambda: ['.'])
self.file_filter = file_filter or (lambda _: True)
self.min_input_len = min_input_len
self.expanduser = expanduser
def get_completions(self, document, complete_event):
text = document.text_before_cursor
# Complete only when we have at least the minimal input length,
# otherwise, we can too many results and autocompletion will become too
# heavy.
if len(text) < self.min_input_len:
return
try:
# Do tilde expansion.
if self.expanduser:
text = os.path.expanduser(text)
# Directories where to look.
dirname = os.path.dirname(text)
if dirname:
directories = [os.path.dirname(os.path.join(p, text))
for p in self.get_paths()]
else:
directories = self.get_paths()
# Start of current file.
prefix = os.path.basename(text)
# Get all filenames.
filenames = []
for directory in directories:
# Look for matches in this directory.
if os.path.isdir(directory):
for filename in os.listdir(directory):
if filename.startswith(prefix):
filenames.append((directory, filename))
# Sort
filenames = sorted(filenames, key=lambda k: k[1])
# Yield them.
for directory, filename in filenames:
completion = filename[len(prefix):]
full_name = os.path.join(directory, filename)
if os.path.isdir(full_name):
# For directories, add a slash to the filename.
# (We don't add them to the `completion`. Users can type it
# to trigger the autocompletion themself.)
filename += '/'
else:
if self.only_directories or not self.file_filter(full_name):
continue
yield Completion(completion, 0, display=filename)
except OSError:
pass
class ExecutableCompleter(PathCompleter):
"""
Complete only excutable files in the current path.
"""
def __init__(self):
PathCompleter.__init__(
self,
only_directories=False,
min_input_len=1,
get_paths=lambda: os.environ.get('PATH', '').split(os.pathsep),
file_filter=lambda name: os.access(name, os.X_OK),
expanduser=True),
|
niklasf/python-prompt-toolkit
|
prompt_toolkit/contrib/completers/filesystem.py
|
Python
|
bsd-3-clause
| 3,740
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.