repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
multipath-rtp/cerbero | test/test_cerbero_build_build.py | 23 | 3397 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import unittest
import os
from test.test_common import DummyConfig
from cerbero.build import build
class MakefilesBase(build.MakefilesBase):
srcdir = ''
build_dir = ''
def __init__(self, config):
self.config = config
build.MakefilesBase.__init__(self)
@build.modify_environment
def get_env_var(self, var):
if var not in os.environ:
return None
return os.environ[var]
@build.modify_environment
def get_env_var_nested(self, var):
return self.get_env_var(var)
class ModifyEnvTest(unittest.TestCase):
def setUp(self):
self.var = 'TEST_VAR'
self.val1 = 'test'
self.val2 = 'test2'
self.mk = MakefilesBase(DummyConfig())
def testAppendEnv(self):
os.environ[self.var] = self.val1
self.mk.append_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, "%s %s" % (self.val1, self.val2))
def testAppendNonExistentEnv(self):
if self.var in os.environ:
del os.environ[self.var]
self.mk.append_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, ' %s' % self.val2)
def testNewEnv(self):
os.environ[self.var] = self.val1
self.mk.new_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, self.val2)
def testAppendAndNewEnv(self):
os.environ[self.var] = ''
self.mk.append_env = {self.var: self.val1}
self.mk.new_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, self.val2)
def testSystemLibs(self):
os.environ['PKG_CONFIG_PATH'] = '/path/1'
os.environ['PKG_CONFIG_LIBDIR'] = '/path/2'
self.mk.config.allow_system_libs = True
self.mk.use_system_libs = True
val = self.mk.get_env_var('PKG_CONFIG_PATH')
self.assertEquals(val,'/path/2:/usr/lib/pkgconfig:'
'/usr/share/pkgconfig:/usr/lib/i386-linux-gnu/pkgconfig')
val = self.mk.get_env_var('PKG_CONFIG_LIBDIR')
self.assertEquals(val,'/path/2')
def testNestedModif(self):
os.environ[self.var] = self.val1
self.mk.append_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, "%s %s" % (self.val1, self.val2))
val = self.mk.get_env_var_nested(self.var)
self.assertEquals(val, "%s %s" % (self.val1, self.val2))
| lgpl-2.1 |
rezoo/chainer | tests/chainer_tests/utils_tests/test_conv_nd_kernel.py | 14 | 1324 | import unittest
import mock
import chainer
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv_nd_kernel
@testing.parameterize(*testing.product({
'ndim': [2, 3, 4],
}))
@attr.gpu
class TestIm2colNDKernelMemo(unittest.TestCase):
def setUp(self):
chainer.backends.cuda.clear_memo()
def test_im2col_nd_kernel_memo(self):
ndim = self.ndim
with mock.patch(
'chainer.utils.conv_nd_kernel.Im2colNDKernel._generate') as m:
conv_nd_kernel.Im2colNDKernel.generate(ndim)
m.assert_called_once_with(ndim)
conv_nd_kernel.Im2colNDKernel.generate(ndim)
m.assert_called_once_with(ndim)
@testing.parameterize(*testing.product({
'ndim': [2, 3, 4],
}))
@attr.gpu
class TestCol2imNDKernelMemo(unittest.TestCase):
def setUp(self):
chainer.backends.cuda.clear_memo()
def test_col2im_nd_kernel_memo(self):
ndim = self.ndim
with mock.patch(
'chainer.utils.conv_nd_kernel.Col2imNDKernel._generate') as m:
conv_nd_kernel.Col2imNDKernel.generate(ndim)
m.assert_called_once_with(ndim)
conv_nd_kernel.Col2imNDKernel.generate(ndim)
m.assert_called_once_with(ndim)
testing.run_module(__name__, __file__)
| mit |
psiwczak/quantum | quantum/plugins/ryu/nova/linux_net.py | 5 | 2822 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# <yamahata at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ryu.app.client import OFPClient
from nova import flags
from nova.network import linux_net
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
ryu_linux_net_opt = cfg.StrOpt('linuxnet_ovs_ryu_api_host',
default='127.0.0.1:8080',
help='Openflow Ryu REST API host:port')
FLAGS = flags.FLAGS
FLAGS.register_opt(ryu_linux_net_opt)
def _get_datapath_id(bridge_name):
out, _err = utils.execute('ovs-vsctl', 'get', 'Bridge',
bridge_name, 'datapath_id', run_as_root=True)
return out.strip().strip('"')
def _get_port_no(dev):
out, _err = utils.execute('ovs-vsctl', 'get', 'Interface', dev,
'ofport', run_as_root=True)
return int(out.strip())
class LinuxOVSRyuInterfaceDriver(linux_net.LinuxOVSInterfaceDriver):
def __init__(self):
super(LinuxOVSRyuInterfaceDriver, self).__init__()
LOG.debug('ryu rest host %s', FLAGS.linuxnet_ovs_ryu_api_host)
self.ryu_client = OFPClient(FLAGS.linuxnet_ovs_ryu_api_host)
self.datapath_id = _get_datapath_id(
FLAGS.linuxnet_ovs_integration_bridge)
if linux_net.binary_name == 'nova-network':
for tables in [linux_net.iptables_manager.ipv4,
linux_net.iptables_manager.ipv6]:
tables['filter'].add_rule(
'FORWARD',
'--in-interface gw-+ --out-interface gw-+ -j DROP')
linux_net.iptables_manager.apply()
def plug(self, network, mac_address, gateway=True):
LOG.debug("network %s mac_adress %s gateway %s",
network, mac_address, gateway)
ret = super(LinuxOVSRyuInterfaceDriver, self).plug(
network, mac_address, gateway)
port_no = _get_port_no(self.get_dev(network))
self.ryu_client.create_port(network['uuid'], self.datapath_id, port_no)
return ret
| apache-2.0 |
jvehent/mig | tools/client.py | 16 | 1597 | #!/usr/bin/env python
import os
import sys
import gnupg
from time import gmtime, strftime
import random
import requests
import json
def makeToken(gpghome, keyid):
gpg = gnupg.GPG(gnupghome=gpghome)
version = "1"
timestamp = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
nonce = str(random.randint(10000, 18446744073709551616))
token = version + ";" + timestamp + ";" + nonce
sig = gpg.sign(token + "\n",
keyid=keyid,
detach=True, clearsign=True)
token += ";"
linectr=0
for line in iter(str(sig).splitlines()):
linectr+=1
if linectr < 4 or line.startswith('-') or not line:
continue
token += line
return token
if __name__ == '__main__':
token = makeToken("/home/ulfr/.gnupg", "E60892BB9BD89A69F759A1A0A3D652173B763E8F")
r = requests.get(sys.argv[1],
headers={'X-PGPAUTHORIZATION': token},
verify=True)
if r.status_code == 200:
print json.dumps(r.json(), sort_keys=True, indent=4, separators=(',', ': '))
elif r.status_code == 500:
print r.json()
# api returns a 500 with an error body on failures
migjson=r.json()
raise Exception("API returned HTTP code %s and error '%s:%s'" %
(r.status_code,
migjson['collection']['error']['code'],
migjson['collection']['error']['message'])
)
else:
# another type of failure that's unlikely to have an error body
raise Exception("Failed with HTTP code %s" % r.status_code)
| mpl-2.0 |
remitamine/youtube-dl | youtube_dl/extractor/nhl.py | 18 | 5004 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
int_or_none,
parse_iso8601,
parse_duration,
)
class NHLBaseIE(InfoExtractor):
def _real_extract(self, url):
site, tmp_id = re.match(self._VALID_URL, url).groups()
video_data = self._download_json(
'https://%s/%s/%sid/v1/%s/details/web-v1.json'
% (self._CONTENT_DOMAIN, site[:3], 'item/' if site == 'mlb' else '', tmp_id), tmp_id)
if video_data.get('type') != 'video':
video_data = video_data['media']
video = video_data.get('video')
if video:
video_data = video
else:
videos = video_data.get('videos')
if videos:
video_data = videos[0]
video_id = compat_str(video_data['id'])
title = video_data['title']
formats = []
for playback in video_data.get('playbacks', []):
playback_url = playback.get('url')
if not playback_url:
continue
ext = determine_ext(playback_url)
if ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
playback_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=playback.get('name', 'hls'), fatal=False)
self._check_formats(m3u8_formats, video_id)
formats.extend(m3u8_formats)
else:
height = int_or_none(playback.get('height'))
formats.append({
'format_id': playback.get('name', 'http' + ('-%dp' % height if height else '')),
'url': playback_url,
'width': int_or_none(playback.get('width')),
'height': height,
'tbr': int_or_none(self._search_regex(r'_(\d+)[kK]', playback_url, 'bitrate', default=None)),
})
self._sort_formats(formats)
thumbnails = []
cuts = video_data.get('image', {}).get('cuts') or []
if isinstance(cuts, dict):
cuts = cuts.values()
for thumbnail_data in cuts:
thumbnail_url = thumbnail_data.get('src')
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(thumbnail_data.get('width')),
'height': int_or_none(thumbnail_data.get('height')),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'timestamp': parse_iso8601(video_data.get('date')),
'duration': parse_duration(video_data.get('duration')),
'thumbnails': thumbnails,
'formats': formats,
}
class NHLIE(NHLBaseIE):
IE_NAME = 'nhl.com'
_VALID_URL = r'https?://(?:www\.)?(?P<site>nhl|wch2016)\.com/(?:[^/]+/)*c-(?P<id>\d+)'
_CONTENT_DOMAIN = 'nhl.bamcontent.com'
_TESTS = [{
# type=video
'url': 'https://www.nhl.com/video/anisimov-cleans-up-mess/t-277752844/c-43663503',
'md5': '0f7b9a8f986fb4b4eeeece9a56416eaf',
'info_dict': {
'id': '43663503',
'ext': 'mp4',
'title': 'Anisimov cleans up mess',
'description': 'md5:a02354acdfe900e940ce40706939ca63',
'timestamp': 1461288600,
'upload_date': '20160422',
},
}, {
# type=article
'url': 'https://www.nhl.com/news/dennis-wideman-suspended/c-278258934',
'md5': '1f39f4ea74c1394dea110699a25b366c',
'info_dict': {
'id': '40784403',
'ext': 'mp4',
'title': 'Wideman suspended by NHL',
'description': 'Flames defenseman Dennis Wideman was banned 20 games for violation of Rule 40 (Physical Abuse of Officials)',
'upload_date': '20160204',
'timestamp': 1454544904,
},
}, {
# Some m3u8 URLs are invalid (https://github.com/ytdl-org/youtube-dl/issues/10713)
'url': 'https://www.nhl.com/predators/video/poile-laviolette-on-subban-trade/t-277437416/c-44315003',
'md5': '50b2bb47f405121484dda3ccbea25459',
'info_dict': {
'id': '44315003',
'ext': 'mp4',
'title': 'Poile, Laviolette on Subban trade',
'description': 'General manager David Poile and head coach Peter Laviolette share their thoughts on acquiring P.K. Subban from Montreal (06/29/16)',
'timestamp': 1467242866,
'upload_date': '20160629',
},
}, {
'url': 'https://www.wch2016.com/video/caneur-best-of-game-2-micd-up/t-281230378/c-44983703',
'only_matching': True,
}, {
'url': 'https://www.wch2016.com/news/3-stars-team-europe-vs-team-canada/c-282195068',
'only_matching': True,
}]
| unlicense |
jonparrott/google-cloud-python | trace/google/cloud/trace_v2/proto/tracing_pb2_grpc.py | 3 | 3436 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.trace_v2.proto import trace_pb2 as google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2
from google.cloud.trace_v2.proto import tracing_pb2 as google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_tracing__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class TraceServiceStub(object):
"""This file describes an API for collecting and viewing traces and spans
within a trace. A Trace is a collection of spans corresponding to a single
operation or set of operations for an application. A span is an individual
timed event which forms a node of the trace tree. A single trace may
contain span(s) from multiple services.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.BatchWriteSpans = channel.unary_unary(
'/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans',
request_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_tracing__pb2.BatchWriteSpansRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateSpan = channel.unary_unary(
'/google.devtools.cloudtrace.v2.TraceService/CreateSpan',
request_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.FromString,
)
class TraceServiceServicer(object):
"""This file describes an API for collecting and viewing traces and spans
within a trace. A Trace is a collection of spans corresponding to a single
operation or set of operations for an application. A span is an individual
timed event which forms a node of the trace tree. A single trace may
contain span(s) from multiple services.
"""
def BatchWriteSpans(self, request, context):
"""Sends new spans to new or existing traces. You cannot update
existing spans.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateSpan(self, request, context):
"""Creates a new span.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TraceServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'BatchWriteSpans': grpc.unary_unary_rpc_method_handler(
servicer.BatchWriteSpans,
request_deserializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_tracing__pb2.BatchWriteSpansRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateSpan': grpc.unary_unary_rpc_method_handler(
servicer.CreateSpan,
request_deserializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.FromString,
response_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.devtools.cloudtrace.v2.TraceService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| apache-2.0 |
Apoyhtari/Irc-Bot | beautifulsoup4-4.1.2/build/lib/bs4/builder/_html5lib.py | 119 | 7746 | __all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
return markup, None, None, False
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# Concatenate new text onto old text node
# XXX This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + node.element)
old_element.replace_with(new_element)
else:
self.element.append(node.element)
node.parent = self
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
text = TextNode(self.soup.new_string(data), self.soup)
if insertBefore:
self.insertBefore(text, insertBefore)
else:
self.appendChild(text)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, newParent):
while self.element.contents:
child = self.element.contents[0]
child.extract()
if isinstance(child, Tag):
newParent.appendChild(
Element(child, self.soup, namespaces["html"]))
else:
newParent.appendChild(
TextNode(child, self.soup))
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| mit |
hoogamaphone/chromathicity | chromathicity/rgbspec.py | 1 | 3461 | import numpy as np
import chromathicity.defaults
from chromathicity.illuminant import D
from chromathicity.interfaces import RgbSpecification, Compander
from chromathicity.observer import Standard
from chromathicity.util import SetGet
class Custom(RgbSpecification, SetGet):
def __init__(self, **kwargs):
super().__init__()
self._name = ""
self._illuminant = chromathicity.defaults.get_default_illuminant()
self._observer = chromathicity.defaults.get_default_observer()
self._xyy = np.array([[0.6, 0.3, .200],
[0.3, 0.6, .800],
[0.2, 0.1, .100]])
self.set(**kwargs)
def __repr__(self):
args = ['name', 'illuminant', 'observer', 'xyy']
kwargs_repr = ', '.join(f'{key}={getattr(self, key)!r}' for key in args)
return f'Custom({kwargs_repr!s})'
@property
def name(self):
return self._name
@name.setter
def name(self, n):
self._name = n
@property
def illuminant(self):
return self._illuminant
@illuminant.setter
def illuminant(self, ill):
self._illuminant = ill
@property
def observer(self):
return self._observer
@observer.setter
def observer(self, obs):
self._observer = obs
@property
def xyy(self):
return self._xyy
@xyy.setter
def xyy(self, x):
self._xyy = x
class Srgb(RgbSpecification):
def __init__(self):
super().__init__()
self.compander = SrgbCompander()
def __repr__(self):
return 'Srgb()'
@property
def name(self):
return 'sRGB'
@property
def illuminant(self):
return D('D_65')
@property
def observer(self):
return Standard(2)
@property
def xyy(self):
return np.array([[0.64, 0.33, .212656],
[0.30, 0.60, .715158],
[0.15, 0.06, .072186]])
class SrgbCompander(Compander):
_EPS = 0.0031308
_DELTA = 12.92
_ALPHA = 1.055
_GAMMA = 2.4
_BETA = 0.055
def __repr__(self):
return 'SrgbCompander()'
def compand(self, linear_rgb: np.ndarray) -> np.ndarray:
is_small = linear_rgb <= self._EPS
is_big = np.logical_not(is_small)
companded_rgb = np.zeros(linear_rgb.shape)
companded_rgb[is_small] = self._DELTA * linear_rgb[is_small]
a = self._ALPHA
g = self._GAMMA
b = self._BETA
companded_rgb[is_big] = a*linear_rgb[is_big] ** (1.0/g) - b
return companded_rgb
def inverse_compand(self, companded_rgb: np.ndarray) -> np.ndarray:
is_small = companded_rgb <= self._DELTA*self._EPS
is_big = np.logical_not(is_small)
linear_rgb = np.zeros(companded_rgb.shape)
linear_rgb[is_small] = companded_rgb[is_small] / self._DELTA
a = self._ALPHA
g = self._GAMMA
b = self._BETA
linear_rgb[is_big] = ((companded_rgb[is_big] + b) / a) ** g
return linear_rgb
class GammaCompander(Compander):
def __init__(self, gamma=1):
self.gamma = gamma
def __repr__(self):
return f'GammaCompander({self.gamma!r})'
def compand(self, linear_rgb: np.ndarray) -> np.ndarray:
return linear_rgb ** (1.0 / self.gamma)
def inverse_compand(self, companded_rgb: np.ndarray) -> np.ndarray:
return companded_rgb ** self.gamma
| bsd-3-clause |
maxdeliso/elevatorSim | Lib/test/test_concurrent_futures.py | 5 | 23223 | import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import sys
import threading
import time
import unittest
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest(unittest.TestCase):
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests(unittest.TestCase):
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests):
pass
class AsCompletedTests(unittest.TestCase):
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests):
pass
class ExecutorTest(unittest.TestCase):
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(ProcessPoolExecutorTest,
ThreadPoolExecutorTest,
ProcessPoolWaitTests,
ThreadPoolWaitTests,
ProcessPoolAsCompletedTests,
ThreadPoolAsCompletedTests,
FutureTests,
ProcessPoolShutdownTest,
ThreadPoolShutdownTest,
)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
| bsd-2-clause |
avinson/ansible-modules-core | cloud/openstack/os_nova_flavor.py | 93 | 6844 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_nova_flavor
short_description: Manage OpenStack compute flavors
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Add or remove flavors from OpenStack.
options:
state:
description:
- Indicate desired state of the resource. When I(state) is 'present',
then I(ram), I(vcpus), and I(disk) are all required. There are no
default values for those parameters.
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Flavor name.
required: true
ram:
description:
- Amount of memory, in MB.
required: false
default: null
vcpus:
description:
- Number of virtual CPUs.
required: false
default: null
disk:
description:
- Size of local disk, in GB.
required: false
default: null
ephemeral:
description:
- Ephemeral space size, in GB.
required: false
default: 0
swap:
description:
- Swap space size, in MB.
required: false
default: 0
rxtx_factor:
description:
- RX/TX factor.
required: false
default: 1.0
is_public:
description:
- Make flavor accessible to the public.
required: false
default: true
flavorid:
description:
- ID for the flavor. This is optional as a unique UUID will be
assigned if a value is not specified.
required: false
default: "auto"
requirements: ["shade"]
'''
EXAMPLES = '''
# Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of
# local disk, and 10GB of ephemeral.
- os_nova_flavor:
cloud=mycloud
state=present
name=tiny
ram=1024
vcpus=1
disk=10
ephemeral=10
# Delete 'tiny' flavor
- os_nova_flavor:
cloud=mycloud
state=absent
name=tiny
'''
RETURN = '''
flavor:
description: Dictionary describing the flavor.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
'''
def _system_state_change(module, flavor):
state = module.params['state']
if state == 'present' and not flavor:
return True
if state == 'absent' and flavor:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
state = dict(required=False, default='present',
choices=['absent', 'present']),
name = dict(required=False),
# required when state is 'present'
ram = dict(required=False, type='int'),
vcpus = dict(required=False, type='int'),
disk = dict(required=False, type='int'),
ephemeral = dict(required=False, default=0, type='int'),
swap = dict(required=False, default=0, type='int'),
rxtx_factor = dict(required=False, default=1.0, type='float'),
is_public = dict(required=False, default=True, type='bool'),
flavorid = dict(required=False, default="auto"),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['ram', 'vcpus', 'disk'])
],
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
try:
cloud = shade.operator_cloud(**module.params)
flavor = cloud.get_flavor(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, flavor))
if state == 'present':
if not flavor:
flavor = cloud.create_flavor(
name=name,
ram=module.params['ram'],
vcpus=module.params['vcpus'],
disk=module.params['disk'],
flavorid=module.params['flavorid'],
ephemeral=module.params['ephemeral'],
swap=module.params['swap'],
rxtx_factor=module.params['rxtx_factor'],
is_public=module.params['is_public']
)
module.exit_json(changed=True, flavor=flavor)
module.exit_json(changed=False, flavor=flavor)
elif state == 'absent':
if flavor:
cloud.delete_flavor(name)
module.exit_json(changed=True)
module.exit_json(changed=False)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
simbuerg/benchbuild | benchbuild/projects/gentoo/crafty.py | 1 | 3254 | """
crafty experiment within gentoo chroot.
"""
from benchbuild.utils.wrapping import wrap_in_uchroot as wrap
from benchbuild.projects.gentoo.gentoo import GentooGroup
from benchbuild.utils.downloader import Wget
from benchbuild.utils.run import run, uchroot
from benchbuild.utils.cmd import cat # pylint: disable=E0401
class Crafty(GentooGroup):
"""
games-board/crafty
"""
NAME = "gentoo-crafty"
DOMAIN = "games-board"
def download(self):
super(Crafty, self).download()
book_file = "book.bin"
book_bin = "http://www.craftychess.com/" + book_file
Wget(book_bin, book_file)
def build(self):
emerge_in_chroot = uchroot()["/usr/bin/emerge"]
run(emerge_in_chroot["games-board/crafty"])
def run_tests(self, experiment, run):
crafty_path = "/usr/games/bin/crafty"
wrap(crafty_path.lstrip("/"), experiment, self.builddir)
crafty = uchroot()[crafty_path]
with open("test1.sh", 'w') as test1:
lines = '''
st=10
ponder=off
display nomoves
setboard rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq
move
book off
setboard rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq
move
setboard 1k1r4/pp1b1R2/3q2pp/4p3/2B5/4Q3/PPP2B2/2K5 b
move
setboard 3r1k2/4npp1/1ppr3p/p6P/P2PPPP1/1NR5/5K2/2R5 w
move
setboard 2q1rr1k/3bbnnp/p2p1pp1/2pPp3/PpP1P1P1/1P2BNNP/2BQ1PRK/7R b
move
setboard rnbqkb1r/p3pppp/1p6/2ppP3/3N4/2P5/PPP1QPPP/R1B1KB1R w KQkq
move
setboard r1b2rk1/2q1b1pp/p2ppn2/1p6/3QP3/1BN1B3/PPP3PP/R4RK1 w
move
setboard 2r3k1/pppR1pp1/4p3/4P1P1/5P2/1P4K1/P1P5/8 w
move
setboard 1nk1r1r1/pp2n1pp/4p3/q2pPp1N/b1pP1P2/B1P2R2/2P1B1PP/R2Q2K1 w
move
setboard 4b3/p3kp2/6p1/3pP2p/2pP1P2/4K1P1/P3N2P/8 w
move
setboard 2kr1bnr/pbpq4/2n1pp2/3p3p/3P1P1B/2N2N1Q/PPP3PP/2KR1B1R w
move
setboard 3rr1k1/pp3pp1/1qn2np1/8/3p4/PP1R1P2/2P1NQPP/R1B3K1 b
move
setboard 2r1nrk1/p2q1ppp/bp1p4/n1pPp3/P1P1P3/2PBB1N1/4QPPP/R4RK1 w
move
setboard r3r1k1/ppqb1ppp/8/4p1NQ/8/2P5/PP3PPP/R3R1K1 b
move
setboard r2q1rk1/4bppp/p2p4/2pP4/3pP3/3Q4/PP1B1PPP/R3R1K1 w
move
setboard rnb2r1k/pp2p2p/2pp2p1/q2P1p2/8/1Pb2NP1/PB2PPBP/R2Q1RK1 w
move
setboard 2r3k1/1p2q1pp/2b1pr2/p1pp4/6Q1/1P1PP1R1/P1PN2PP/5RK1 w
move
setboard r1bqkb1r/4npp1/p1p4p/1p1pP1B1/8/1B6/PPPN1PPP/R2Q1RK1 w kq
move
setboard r2q1rk1/1ppnbppp/p2p1nb1/3Pp3/2P1P1P1/2N2N1P/PPB1QP2/R1B2RK1 b
move
setboard r1bq1rk1/pp2ppbp/2np2p1/2n5/P3PP2/N1P2N2/1PB3PP/R1B1QRK1 b
move
setboard 3rr3/2pq2pk/p2p1pnp/8/2QBPP2/1P6/P5PP/4RRK1 b
move
setboard r4k2/pb2bp1r/1p1qp2p/3pNp2/3P1P2/2N3P1/PPP1Q2P/2KRR3 w
move
setboard 3rn2k/ppb2rpp/2ppqp2/5N2/2P1P3/1P5Q/PB3PPP/3RR1K1 w
move
setboard 2r2rk1/1bqnbpp1/1p1ppn1p/pP6/N1P1P3/P2B1N1P/1B2QPP1/R2R2K1 b
move
setboard r1bqk2r/pp2bppp/2p5/3pP3/P2Q1P2/2N1B3/1PP3PP/R4RK1 b kq
move
setboard r2qnrnk/p2b2b1/1p1p2pp/2pPpp2/1PP1P3/PRNBB3/3QNPPP/5RK1 w
move
setboard /k/3p/p2P1p/P2P1P///K/ w
move
setboard /k/rnn////5RBB/K/ w
move
mt=0
quit
EOF
'''
test1.write(lines)
with open("test2.sh", 'w') as test2:
lines = '''
st=10
ponder=off
mt=2
setboard 2r2rk1/1bqnbpp1/1p1ppn1p/pP6/N1P1P3/P2B1N1P/1B2QPP1/R2R2K1 b
move
mt=0
quit
'''
test2.write(lines)
run((cat["test1.sh"] | crafty))
run((cat["test2.sh"] | crafty))
| mit |
evangeline97/localwiki-backend-server | localwiki/maps/fields.py | 3 | 10795 | from django.utils.translation import ugettext as _
from django.contrib.gis.db import models
from django.contrib.gis.geos import *
from .validators import validate_geometry
def flatten_collection(geoms):
"""
Args:
geoms: A GeometryCollection.
Returns:
A GeometryCollection where overlapping polygons are merged,
and points/lines fully contained in polygons are removed.
"""
# Iterate through all contained geometries, collecting all
# polygons.
polys = []
other_geom = []
for geom in geoms:
if type(geom) == Polygon:
polys.append(geom)
else:
other_geom.append(geom)
# TODO: Maybe look into collapsing only overlapping polygons.
# If we collapse only overlapping then we preserve the polygons'
# "independence" in the editor -- when clicked on they will
# appear as separate polygons. I couldn't think of a way to do
# this that wasn't a reimplementation of the cascading union
# algorithm and it didn't seem worth it given that folks might
# not care about this very minor detail.
if polys:
# Smash all polygons using a cascaded union.
cascaded_poly = MultiPolygon(polys, srid=geoms.srid).cascaded_union
# Skip points and lines that are fully contained in the flattened
# polygon.
flat_geoms = [cascaded_poly]
for geom in other_geom:
if not cascaded_poly.contains(geom):
flat_geoms.append(geom)
else:
flat_geoms = other_geom
return GeometryCollection(flat_geoms, srid=geoms.srid)
class CollectionFrom(models.GeometryCollectionField):
"""
Creates a GeometryCollection pseudo-field from the provided
component fields. When accessed, the CollectionFrom field will
return a GeometryCollection from the provided component fields.
When set to a value (upon model save) the geometries contained in
the CollectionFrom field are broken out and placed into their
relevant component fields.
Example::
class MyModel(models.Model):
points = models.MultiPointField()
lines = models.MultiLineStringField()
polys = models.MultiPolygonField()
geom = CollectionFrom(points='points', lines='lines',
polys='polys')
Then when you access the 'geom' attribute on instances of MyModel
you'll get a GeometryCollection with your points, lines and polys.
When you set the 'geom' attribute to a GeometryCollection and save
an instance of MyModel the GeometryCollection is broken into points,
lines and polygons and placed into the provided fields.
This field is useful when you want to deal with GeometryCollections
but still must maintain separate geometry fields on the model. For
instance, GeoDjango does not currently allow you to filter (with
geometry operations) based on GeometryCollections due to issues
with the underlying libraries. Someday this may be fixed. But
until then, we've got this Field.
NOTES: This field will add a column to the db, but it won't ever
store anything there except null. There's probably a way
around this. TODO.
"""
def __init__(self, *args, **kwargs):
self.points_name = kwargs.pop('points') if 'points' in kwargs else None
self.lines_name = kwargs.pop('lines') if 'lines' in kwargs else None
self.polys_name = kwargs.pop('polys') if 'polys' in kwargs else None
super(CollectionFrom, self).__init__(*args, **kwargs)
self.null = True
def contribute_to_class(self, cls, name):
models.signals.class_prepared.connect(self.finalize, sender=cls)
# Control the geometrycollection-like attribute via a special
# descriptor.
setattr(cls, name, CollectionDescriptor(self))
# Back up the points, lines, polys attributes and then point them
# to descriptors that, when set, clear out the
# geometrycollection field.
if self.points_name:
setattr(cls, '_explicit_%s' % self.points_name,
getattr(cls, self.points_name))
setattr(cls, self.points_name,
ClearCollectionOnSet(self, self.points_name))
if self.lines_name:
setattr(cls, '_explicit_%s' % self.lines_name,
getattr(cls, self.lines_name))
setattr(cls, self.lines_name,
ClearCollectionOnSet(self, self.lines_name))
if self.polys_name:
setattr(cls, '_explicit_%s' % self.polys_name,
getattr(cls, self.polys_name))
setattr(cls, self.polys_name,
ClearCollectionOnSet(self, self.polys_name))
super(models.GeometryField, self).contribute_to_class(cls, name)
def finalize(self, sender, **kws):
self._connected_to = sender
models.signals.pre_save.connect(self.pre_model_save, sender=sender,
weak=False)
def pre_model_save(self, instance, raw, **kws):
if not 'sender' in kws:
return
geom_collection = instance.__dict__.get(
'_explicit_set_%s' % self.attname, None)
if geom_collection is None:
# They didn't set an explicit GeometryCollection.
return
points, lines, polys = [], [], []
points_geom, lines_geom, polys_geom = None, None, None
for geom in geom_collection:
if type(geom) is Point:
points.append(geom)
if type(geom) is MultiPoint:
points += [g for g in geom]
if type(geom) is LineString or type(geom) is LinearRing:
lines.append(geom)
if type(geom) is MultiLineString:
lines += [g for g in geom]
if type(geom) is Polygon:
polys.append(geom)
if type(geom) is MultiPolygon:
polys += [g for g in geom]
if points:
points_geom = MultiPoint(points, srid=points[0].srid)
if lines:
lines_geom = MultiLineString(lines, srid=lines[0].srid)
if polys:
polys_geom = MultiPolygon(polys, srid=polys[0].srid)
setattr(instance, self.points_name, points_geom)
setattr(instance, self.lines_name, lines_geom)
setattr(instance, self.polys_name, polys_geom)
# Set ourself to None to avoid saving any data in our column.
setattr(instance, self.name, None)
instance.__dict__[self.name] = None
class CollectionDescriptor(object):
def __init__(self, field):
self._field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
_("The '%(fieldname)s' attribute can only be accessed from %(ownername)s instances.")
% {'fieldname':self._field.name, 'ownername':owner.__name__})
set_field_value = instance.__dict__.get(
'_explicit_set_%s' % self._field.attname, None)
if set_field_value:
# Return the value they set for the field rather than our
# constructed GeometryCollection.
return set_field_value
enum_points, enum_lines, enum_polys = [], [], []
points = getattr(instance, self._field.points_name)
if points:
enum_points = [p for p in points]
lines = getattr(instance, self._field.lines_name)
if lines:
enum_lines = [l for l in lines]
polys = getattr(instance, self._field.polys_name)
if polys:
enum_polys = [p for p in polys]
geoms = enum_points + enum_lines + enum_polys
collection = GeometryCollection(geoms, srid=self._field.srid)
collection._from_get_on_owner = owner
return collection
def __set__(self, obj, value):
# The OGC Geometry type of the field.
gtype = self._field.geom_type
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if (isinstance(value, GeometryCollection) and
(str(value.geom_type).upper() == gtype or gtype == 'GEOMETRY')):
# Assigning the SRID to the geometry.
if value.srid is None:
value.srid = self._field.srid
elif value is None:
pass
elif isinstance(value, (basestring, buffer)):
# Set with WKT, HEX, or WKB
value = GEOSGeometry(value, srid=self._field.srid)
else:
raise TypeError(
_('cannot set %(cname)s CollectionFrom with value of type: %(vtype)s') %
{'cname':obj.__class__.__name__, 'vtype': type(value)})
obj.__dict__['_explicit_set_%s' % self._field.attname] = value
return value
class ClearCollectionOnSet(object):
"""
A simple descriptor that, when set, clears out the stored
geometry collection. If we don't clear out the stored geometry
collection when, say, the 'points' are set then we will end up
with a stale geometry collection.
"""
def __init__(self, field, attrname):
self._field = field
self._attrname = attrname
def __get__(self, obj=None, owner=None):
return getattr(obj, '_explicit_%s' % self._attrname)
def __set__(self, obj, value):
# If the GeometryCollection was explicitly set then let's clear it out,
# as we've now set one of the component fields directly.
if ('_explicit_set_%s' % self._field.attname) in obj.__dict__:
del obj.__dict__['_explicit_set_%s' % self._field.attname]
return setattr(obj, '_explicit_%s' % self._attrname, value)
class FlatCollectionFrom(CollectionFrom):
"""
A CollectionFrom field that "flattens" overlapping polygons
together. Additionally, we validate that the geometry provided to
the field is valid.
Raises:
ValidationError: If the provided geometry is not valid.
"""
def __init__(self, *args, **kws):
# Ensure the geometry provided is valid.
validators = kws.get('validators', [])
if not validate_geometry in validators:
validators.append(validate_geometry)
kws['validators'] = validators
return super(FlatCollectionFrom, self).__init__(*args, **kws)
def pre_model_save(self, instance, raw, **kws):
geom = getattr(instance, self.attname)
setattr(instance, self.attname, flatten_collection(geom))
super(FlatCollectionFrom, self).pre_model_save(instance, raw, **kws)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^maps\.fields"])
except ImportError:
pass
| gpl-2.0 |
monkut/deso | deso/deso/layers/vector/management/commands/load_geojson_layers.py | 1 | 1694 | """
Load GEOJSON text files from a given directory to individua vector.GeoJsonLayer models
"""
import os
from django.core.management.base import BaseCommand, CommandError
from ...models import GeoJsonLayer
WGS84_SRID = 4326
def load_geojson_layer(geojson_filepath):
with open(geojson_filepath, "rt", encoding="utf8") as in_f:
geojson_text = in_f.read()
geojson_layer = GeoJsonLayer(name=geojson_filepath,
data=geojson_text)
bounds_polygon = geojson_layer.get_data_bounds_polygon()
geojson_layer.bounds_polygon = bounds_polygon
geojson_layer.clean()
geojson_layer.save()
geojson_layer.create_map_layer()
return geojson_layer
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument("-d", "--directory",
default=None,
required=True,
help="Direcotry containing GEOJSON text files to load to individual vector.GeoJsonLayer object.")
def handle(self, *args, **options):
directory = options["directory"]
found_geojson_filepaths = []
for f in os.listdir(directory):
if f.endswith(".geojson"):
filepath = os.path.join(directory, f)
found_geojson_filepaths.append(filepath)
if not found_geojson_filepaths:
raise CommandError("No '.geojson' files found in given directory: {}".format(directory))
for filepath in found_geojson_filepaths:
self.stdout.write("Loading ({})...".format(filepath))
load_geojson_layer(filepath)
self.stdout.write("Done!")
| mit |
smesdaghi/geonode | geonode/layers/populate_layers_data.py | 35 | 6647 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from geonode.layers.models import Style, Attribute, Layer
styles = [{"name": "test_style_1",
"sld_url": "http://localhost:8080/geoserver/rest/styles/test_style.sld",
"sld_body": "<?xml version=\"1.0\" encoding=\"UTF-8\"?><sld:StyledLayerDescriptor \
xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" \
xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" \
version=\"1.0.0\"><sld:NamedLayer><sld:Name>test_style_1</sld:Name><sld:UserStyle>\
<sld:Name>test_style_1</sld:Name><sld:Title/><sld:FeatureTypeStyle><sld:Name>name</sld:Name>\
<sld:Rule><sld:PolygonSymbolizer><sld:Fill><sld:CssParameter name=\"fill\">#888800</sld:CssParameter>\
</sld:Fill><sld:Stroke><sld:CssParameter name=\"stroke\">#ffffbb</sld:CssParameter>\
<sld:CssParameter name=\"stroke-width\">0.7</sld:CssParameter></sld:Stroke>\
</sld:PolygonSymbolizer></sld:Rule></sld:FeatureTypeStyle></sld:UserStyle>\
</sld:NamedLayer></sld:StyledLayerDescriptor>",
},
{"name": "test_style_2",
"sld_url": "http://localhost:8080/geoserver/rest/styles/test_style.sld",
"sld_body": "<?xml version=\"1.0\" encoding=\"UTF-8\"?><sld:StyledLayerDescriptor \
xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" \
xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" \
version=\"1.0.0\"><sld:NamedLayer><sld:Name>test_style_2</sld:Name><sld:UserStyle>\
<sld:Name>test_style_2</sld:Name><sld:Title/><sld:FeatureTypeStyle><sld:Name>name</sld:Name>\
<sld:Rule><sld:PolygonSymbolizer><sld:Fill><sld:CssParameter name=\"fill\">#888800</sld:CssParameter>\
</sld:Fill><sld:Stroke><sld:CssParameter name=\"stroke\">#ffffbb</sld:CssParameter>\
<sld:CssParameter name=\"stroke-width\">0.7</sld:CssParameter></sld:Stroke></sld:PolygonSymbolizer>\
</sld:Rule></sld:FeatureTypeStyle></sld:UserStyle></sld:NamedLayer></sld:StyledLayerDescriptor>",
},
{"name": "test_style_3",
"sld_url": "http://localhost:8080/geoserver/rest/styles/test_style.sld",
"sld_body": "<?xml version=\"1.0\" encoding=\"UTF-8\"?><sld:StyledLayerDescriptor \
xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" \
xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" \
version=\"1.0.0\"><sld:NamedLayer><sld:Name>test_style_3</sld:Name><sld:UserStyle>\
<sld:Name>test_style_3</sld:Name><sld:Title/><sld:FeatureTypeStyle><sld:Name>name</sld:Name>\
<sld:Rule><sld:PolygonSymbolizer><sld:Fill><sld:CssParameter name=\"fill\">#888800</sld:CssParameter>\
</sld:Fill><sld:Stroke><sld:CssParameter name=\"stroke\">#ffffbb</sld:CssParameter><sld:CssParameter \
name=\"stroke-width\">0.7</sld:CssParameter></sld:Stroke></sld:PolygonSymbolizer></sld:Rule>\
</sld:FeatureTypeStyle></sld:UserStyle></sld:NamedLayer></sld:StyledLayerDescriptor>",
},
{"name": "Evaluación",
"sld_url": "http://localhost:8080/geoserver/rest/styles/test_style.sld",
"sld_body": "<?xml version=\"1.0\" encoding=\"UTF-8\"?><sld:StyledLayerDescriptor \
xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" \
xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\
<sld:NamedLayer><sld:Name>test_style_3</sld:Name><sld:UserStyle><sld:Name>test_style_3</sld:Name>\
<sld:Title/><sld:FeatureTypeStyle><sld:Name>name</sld:Name><sld:Rule><sld:PolygonSymbolizer><sld:Fill>\
<sld:CssParameter name=\"fill\">#888800</sld:CssParameter></sld:Fill><sld:Stroke><sld:CssParameter \
name=\"stroke\">#ffffbb</sld:CssParameter><sld:CssParameter name=\"stroke-width\">0.7</sld:CssParameter>\
</sld:Stroke></sld:PolygonSymbolizer></sld:Rule></sld:FeatureTypeStyle></sld:UserStyle></sld:NamedLayer>\
</sld:StyledLayerDescriptor>",
}]
attributes = [
{
"attribute": u'N\xfamero_De_M\xe9dicos',
"attribute_label": u'N\xfamero_De_M\xe9dicos',
"attribute_type": "xsd:string",
"visible": True,
"display_order": 4
},
{
"attribute": "the_geom",
"attribute_label": "Shape",
"attribute_type": "gml:Geometry",
"visible": False,
"display_order": 3
},
{
"attribute": "description",
"attribute_label": "Description",
"attribute_type": "xsd:string",
"visible": True,
"display_order": 2
},
{
"attribute": "place_name",
"attribute_label": "Place Name",
"attribute_type": "xsd:string",
"visible": True,
"display_order": 1
}
]
def create_layer_data():
layer = Layer.objects.get(pk=1)
for style in styles:
new_style = Style.objects.create(
name=style['name'],
sld_url=style['sld_url'],
sld_body=style['sld_body'])
layer.styles.add(new_style)
layer.default_style = new_style
layer.save()
for attr in attributes:
Attribute.objects.create(layer=layer,
attribute=attr['attribute'],
attribute_label=attr['attribute_label'],
attribute_type=attr['attribute_type'],
visible=attr['visible'],
display_order=attr['display_order']
)
| gpl-3.0 |
rlbabyuk/integration_tests | utils/perf_message_stats.py | 8 | 50825 | # -*- coding: utf-8 -*
"""Functions for performance analysis/charting of the backend messages and top_output from an
appliance.
"""
from utils.log import logger
from utils.path import log_path
from utils.perf import convert_top_mem_to_mib
from utils.perf import generate_statistics
from datetime import datetime
import dateutil.parser as du_parser
from datetime import timedelta
from time import time
import csv
import numpy
import os
import pygal
import subprocess
import re
# Regular Expressions to capture relevant information from each log line:
# [----] I, [2014-03-04T08:11:14.320377 #3450:b15814] INFO -- : ....
log_stamp = re.compile(r'\[----\]\s[IWE],\s\[([0-9\-]+)T([0-9\:\.]+)\s#([0-9]+):[0-9a-z]+\]')
# [----] .* MIQ( * )
miqmsg = re.compile(r'\[----\].*MIQ\(([a-zA-Z0-9\._]*)\)')
# Command: [ * ]
miqmsg_cmd = re.compile(r'Command:\s\[([a-zA-Z0-9\._\:]*)\]')
# Message id: [ * ]
miqmsg_id = re.compile(r'Message\sid:\s\[([0-9]*)\]')
# Args: [ *]
miqmsg_args = re.compile(
r'Args:\s\[([A-Za-z0-9\{\}\(\)\[\]\s\\\-\:\"\'\,\=\<\>\_\/\.\@\?\%\&\#]*)\]')
# Dequeued in: [ * ] seconds
miqmsg_deq = re.compile(r'Dequeued\sin:\s\[([0-9\.]*)\]\sseconds')
# Delivered in [ * ] seconds
miqmsg_del = re.compile(r'Delivered\sin\s\[([0-9\.]*)\]\sseconds')
# Worker related regular expressions:
# MIQ(PriorityWorker) ID [15], PID [6461]
miqwkr = re.compile(r'MIQ\(([A-Za-z]*)\)\sID\s\[([0-9]*)\],\sPID\s\[([0-9]*)\]')
# with ID: [21]
miqwkr_id = re.compile(r'with\sID:\s\[([0-9]*)\]')
# For use with workers exiting, such as authentication failures:
miqwkr_id_2 = re.compile(r'ID\s\[([0-9]*)\]')
# top regular expressions
# Cpu(s): 13.7%us, 1.2%sy, 2.1%ni, 80.0%id, 1.7%wa, 0.0%hi, 0.1%si, 1.3%st
miq_cpu = re.compile(r'Cpu\(s\)\:\s+([0-9\.]*)%us,\s+([0-9\.]*)%sy,\s+([0-9\.]*)%ni,\s+'
r'([0-9\.]*)%id,\s+([0-9\.]*)%wa,\s+([0-9\.]*)%hi,\s+([0-9\.]*)%si,\s+([0-9\.]*)%st')
# Mem: 5990952k total, 4864016k used, 1126936k free, 441444k buffers
miq_mem = re.compile(r'Mem:\s+([0-9]*)k\stotal,\s+([0-9]*)k\sused,\s+([0-9]*)k\sfree,\s+'
r'([0-9]*)k\sbuffers')
# Swap: 9957368k total, 0k used, 9957368k free, 1153156k cached
miq_swap = re.compile(r'Swap:\s+([0-9]*)k\stotal,\s+([0-9]*)k\sused,\s+([0-9]*)k\sfree,\s+'
r'([0-9]*)k\scached')
# PID PPID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
# 17526 2320 root 30 10 324m 9.8m 2444 S 0.0 0.2 0:09.38 /var/www/miq/vmdb/lib/workers/bin/worker.rb
miq_top = re.compile(r'([0-9]+)\s+[0-9]+\s+[A-Za-z0-9]+\s+[0-9]+\s+[0-9\-]+\s+([0-9\.mg]+)\s+'
r'([0-9\.mg]+)\s+([0-9\.mg]+)\s+[SRDZ]\s+([0-9\.]+)\s+([0-9\.]+)')
def evm_to_messages(evm_file, filters):
test_start = ''
test_end = ''
line_count = 0
messages = {}
msg_cmds = {}
runningtime = time()
evmlogfile = open(evm_file, 'r')
evm_log_line = evmlogfile.readline()
while evm_log_line:
line_count += 1
evm_log_line = evm_log_line.strip()
miqmsg_result = miqmsg.search(evm_log_line)
if miqmsg_result:
# Obtains the first timestamp in the log file
if test_start == '':
ts, pid = get_msg_timestamp_pid(evm_log_line)
test_start = ts
# A message was first put on the queue, this starts its queuing time
if (miqmsg_result.group(1) == 'MiqQueue.put'):
msg_cmd = get_msg_cmd(evm_log_line)
msg_id = get_msg_id(evm_log_line)
if msg_id:
ts, pid = get_msg_timestamp_pid(evm_log_line)
test_end = ts
messages[msg_id] = MiqMsgStat()
messages[msg_id].msg_id = msg_id
messages[msg_id].msg_id = '\'' + msg_id + '\''
messages[msg_id].msg_cmd = msg_cmd
messages[msg_id].pid_put = pid
messages[msg_id].puttime = ts
msg_args = get_msg_args(evm_log_line)
if msg_args is False:
logger.debug('Could not obtain message args line #: %s', line_count)
else:
messages[msg_id].msg_args = msg_args
else:
logger.error('Could not obtain message id, line #: %s', line_count)
elif (miqmsg_result.group(1) == 'MiqQueue.get_via_drb'):
msg_id = get_msg_id(evm_log_line)
if msg_id:
if msg_id in messages:
ts, pid = get_msg_timestamp_pid(evm_log_line)
test_end = ts
messages[msg_id].pid_get = pid
messages[msg_id].gettime = ts
messages[msg_id].deq_time = get_msg_deq(evm_log_line)
else:
logger.error('Message ID not in dictionary: %s', msg_id)
else:
logger.error('Could not obtain message id, line #: %s', line_count)
elif (miqmsg_result.group(1) == 'MiqQueue.delivered'):
msg_id = get_msg_id(evm_log_line)
if msg_id:
ts, pid = get_msg_timestamp_pid(evm_log_line)
test_end = ts
if msg_id in messages:
messages[msg_id].del_time = get_msg_del(evm_log_line)
messages[msg_id].total_time = messages[msg_id].deq_time + \
messages[msg_id].del_time
else:
logger.error('Message ID not in dictionary: %s', msg_id)
else:
logger.error('Could not obtain message id, line #: %s', line_count)
if (line_count % 100000) == 0:
timediff = time() - runningtime
runningtime = time()
logger.info('Count {} : Parsed 100000 lines in %s', line_count, timediff)
evm_log_line = evmlogfile.readline()
# I tried to avoid two loops but this reduced the complexity of filtering on messages.
# By filtering over messages, we can better display what is occuring under the covers, as a
# daily rollup is picked up off the queue different than a hourly rollup, etc
for msg in sorted(messages.keys()):
msg_args = messages[msg].msg_args
# Determine if the pattern matches and append to the command if it does
for p_filter in filters:
results = filters[p_filter].search(msg_args.strip())
if results:
messages[msg].msg_cmd = '{}{}'.format(messages[msg].msg_cmd, p_filter)
break
msg_cmd = messages[msg].msg_cmd
if msg_cmd not in msg_cmds:
msg_cmds[msg_cmd] = {}
msg_cmds[msg_cmd]['total'] = []
msg_cmds[msg_cmd]['queue'] = []
msg_cmds[msg_cmd]['execute'] = []
if messages[msg].total_time != 0:
msg_cmds[msg_cmd]['total'].append(round(messages[msg].total_time, 2))
msg_cmds[msg_cmd]['queue'].append(round(messages[msg].deq_time, 2))
msg_cmds[msg_cmd]['execute'].append(round(messages[msg].del_time, 2))
return messages, msg_cmds, test_start, test_end, line_count
def evm_to_workers(evm_file):
# Use grep to reduce # of lines to sort through
p = subprocess.Popen(['grep', 'Interrupt\\|MIQ([A-Za-z]*) ID\\|"evm_worker_uptime_exceeded\\|'
'"evm_worker_memory_exceeded\\|"evm_worker_stop\\|Worker exiting.', evm_file],
stdout=subprocess.PIPE)
greppedevmlog, err = p.communicate()
greppedevmlog = greppedevmlog.strip()
evmlines = greppedevmlog.split('\n')
workers = {}
wkr_upt_exc = 0
wkr_mem_exc = 0
wkr_stp = 0
wkr_int = 0
wkr_ext = 0
for evm_log_line in evmlines:
ts, pid = get_msg_timestamp_pid(evm_log_line)
miqwkr_result = miqwkr.search(evm_log_line)
if miqwkr_result:
workerid = int(miqwkr_result.group(2))
if workerid not in workers:
workers[workerid] = MiqWorker()
workers[workerid].worker_type = miqwkr_result.group(1)
workers[workerid].pid = miqwkr_result.group(3)
workers[workerid].worker_id = int(workerid)
workers[workerid].start_ts = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f')
elif 'evm_worker_uptime_exceeded' in evm_log_line:
miqwkr_id_result = miqwkr_id.search(evm_log_line)
if miqwkr_id_result:
workerid = int(miqwkr_id_result.group(1))
if workerid in workers:
if not workers[workerid].terminated:
wkr_upt_exc += 1
workers[workerid].terminated = 'evm_worker_uptime_exceeded'
workers[workerid].end_ts = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f')
elif 'evm_worker_memory_exceeded' in evm_log_line:
miqwkr_id_result = miqwkr_id.search(evm_log_line)
if miqwkr_id_result:
workerid = int(miqwkr_id_result.group(1))
if workerid in workers:
if not workers[workerid].terminated:
wkr_mem_exc += 1
workers[workerid].terminated = 'evm_worker_memory_exceeded'
workers[workerid].end_ts = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f')
elif 'evm_worker_stop' in evm_log_line:
miqwkr_id_result = miqwkr_id.search(evm_log_line)
if miqwkr_id_result:
workerid = int(miqwkr_id_result.group(1))
if workerid in workers:
if not workers[workerid].terminated:
wkr_stp += 1
workers[workerid].terminated = 'evm_worker_stop'
workers[workerid].end_ts = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f')
elif 'Interrupt' in evm_log_line:
for workerid in workers:
if not workers[workerid].end_ts:
wkr_int += 1
workers[workerid].terminated = 'Interrupted'
workers[workerid].end_ts = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f')
elif 'Worker exiting.' in evm_log_line:
miqwkr_id_2_result = miqwkr_id_2.search(evm_log_line)
if miqwkr_id_2_result:
workerid = int(miqwkr_id_2_result.group(1))
if workerid in workers:
if not workers[workerid].terminated:
wkr_ext += 1
workers[workerid].terminated = 'Worker Exited'
workers[workerid].end_ts = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f')
return workers, wkr_mem_exc, wkr_upt_exc, wkr_stp, wkr_int, wkr_ext, len(evmlines)
def split_appliance_charts(top_appliance, charts_dir):
# Automatically split top_output data roughly per day
minutes_in_a_day = 24 * 60
size_data = len(top_appliance['datetimes'])
start_hour = top_appliance['datetimes'][0][11:13]
start_minute = top_appliance['datetimes'][0][14:16]
bracket_end = minutes_in_a_day - ((int(start_hour) * 60) + int(start_minute))
if size_data > minutes_in_a_day:
# Greater than one day worth of data, split
file_names = [generate_appliance_charts(top_appliance, charts_dir, 0, bracket_end)]
for start_bracket in range(bracket_end, len(top_appliance['datetimes']), minutes_in_a_day):
if (start_bracket + minutes_in_a_day) > size_data:
end_index = size_data - 1
else:
end_index = start_bracket + minutes_in_a_day
file_names.append(generate_appliance_charts(top_appliance, charts_dir, start_bracket,
end_index))
return file_names
else:
# Less than one day worth of data, do not split
return [generate_appliance_charts(top_appliance, charts_dir, 0, size_data - 1)]
def generate_appliance_charts(top_appliance, charts_dir, start_index, end_index):
cpu_chart_file = '/{}-app-cpu.svg'.format(top_appliance['datetimes'][start_index])
mem_chart_file = '/{}-app-mem.svg'.format(top_appliance['datetimes'][start_index])
lines = {}
lines['Idle'] = top_appliance['cpuid'][start_index:end_index]
lines['User'] = top_appliance['cpuus'][start_index:end_index]
lines['System'] = top_appliance['cpusy'][start_index:end_index]
lines['Nice'] = top_appliance['cpuni'][start_index:end_index]
lines['Wait'] = top_appliance['cpuwa'][start_index:end_index]
# lines['Hi'] = top_appliance['cpuhi'][start_index:end_index] # IRQs %
# lines['Si'] = top_appliance['cpusi'][start_index:end_index] # Soft IRQs %
# lines['St'] = top_appliance['cpust'][start_index:end_index] # Steal CPU %
line_chart_render('CPU Usage', 'Date Time', 'Percent',
top_appliance['datetimes'][start_index:end_index], lines, charts_dir.join(cpu_chart_file),
True)
lines = {}
lines['Memory Total'] = top_appliance['memtot'][start_index:end_index]
lines['Memory Free'] = top_appliance['memfre'][start_index:end_index]
lines['Memory Used'] = top_appliance['memuse'][start_index:end_index]
lines['Swap Used'] = top_appliance['swause'][start_index:end_index]
lines['cached'] = top_appliance['cached'][start_index:end_index]
line_chart_render('Memory Usage', 'Date Time', 'KiB',
top_appliance['datetimes'][start_index:end_index], lines, charts_dir.join(mem_chart_file))
return cpu_chart_file, mem_chart_file
def generate_hourly_charts_and_csvs(hourly_buckets, charts_dir):
for cmd in sorted(hourly_buckets):
current_csv = 'hourly_' + cmd + '.csv'
csv_rawdata_path = log_path.join('csv_output', current_csv)
logger.info('Writing %s csvs/charts', cmd)
output_file = csv_rawdata_path.open('w', ensure=True)
csvwriter = csv.DictWriter(output_file, fieldnames=MiqMsgBucket().headers,
delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
for dt in sorted(hourly_buckets[cmd].keys()):
linechartxaxis = []
avgdeqtimings = []
mindeqtimings = []
maxdeqtimings = []
avgdeltimings = []
mindeltimings = []
maxdeltimings = []
cmd_put = []
cmd_get = []
sortedhr = sorted(hourly_buckets[cmd][dt].keys())
for hr in sortedhr:
linechartxaxis.append(str(hr))
bk = hourly_buckets[cmd][dt][hr]
avgdeqtimings.append(round(bk.avg_deq, 2))
mindeqtimings.append(round(bk.min_deq, 2))
maxdeqtimings.append(round(bk.max_deq, 2))
avgdeltimings.append(round(bk.avg_del, 2))
mindeltimings.append(round(bk.min_del, 2))
maxdeltimings.append(round(bk.max_del, 2))
cmd_put.append(bk.total_put)
cmd_get.append(bk.total_get)
bk.date = dt
bk.hour = hr
csvwriter.writerow(dict(bk))
lines = {}
lines['Put ' + cmd] = cmd_put
lines['Get ' + cmd] = cmd_get
line_chart_render(cmd + ' Command Put/Get Count', 'Hour during ' + dt,
'# Count of Commands', linechartxaxis, lines,
charts_dir.join('/{}-{}-cmdcnt.svg'.format(cmd, dt)))
lines = {}
lines['Average Dequeue Timing'] = avgdeqtimings
lines['Min Dequeue Timing'] = mindeqtimings
lines['Max Dequeue Timing'] = maxdeqtimings
line_chart_render(cmd + ' Dequeue Timings', 'Hour during ' + dt, 'Time (s)',
linechartxaxis, lines, charts_dir.join('/{}-{}-dequeue.svg'.format(cmd, dt)))
lines = {}
lines['Average Deliver Timing'] = avgdeltimings
lines['Min Deliver Timing'] = mindeltimings
lines['Max Deliver Timing'] = maxdeltimings
line_chart_render(cmd + ' Deliver Timings', 'Hour during ' + dt, 'Time (s)',
linechartxaxis, lines, charts_dir.join('/{}-{}-deliver.svg'.format(cmd, dt)))
output_file.close()
def generate_raw_data_csv(rawdata_dict, csv_file_name):
csv_rawdata_path = log_path.join('csv_output', csv_file_name)
output_file = csv_rawdata_path.open('w', ensure=True)
csvwriter = csv.DictWriter(output_file, fieldnames=rawdata_dict[rawdata_dict.keys()[0]].headers,
delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
sorted_rd_keys = sorted(rawdata_dict.keys())
for key in sorted_rd_keys:
csvwriter.writerow(dict(rawdata_dict[key]))
def generate_total_time_charts(msg_cmds, charts_dir):
for cmd in sorted(msg_cmds):
logger.info('Generating Total Time Chart for %s', cmd)
lines = {}
lines['Total Time'] = msg_cmds[cmd]['total']
lines['Queue'] = msg_cmds[cmd]['queue']
lines['Execute'] = msg_cmds[cmd]['execute']
line_chart_render(cmd + ' Total Time', 'Message #', 'Time (s)', [], lines,
charts_dir.join('/{}-total.svg'.format(cmd)))
def generate_worker_charts(workers, top_workers, charts_dir):
for worker in top_workers:
logger.info('Generating Charts for Worker: %s Type: %s',
worker, workers[worker].worker_type)
worker_name = '{}-{}'.format(worker, workers[worker].worker_type)
lines = {}
lines['Virt Mem'] = top_workers[worker]['virt']
lines['Res Mem'] = top_workers[worker]['res']
lines['Shared Mem'] = top_workers[worker]['share']
line_chart_render(worker_name, 'Date Time', 'Memory in MiB',
top_workers[worker]['datetimes'], lines,
charts_dir.join('/{}-Memory.svg'.format(worker_name)))
lines = {}
lines['CPU %'] = top_workers[worker]['cpu_per']
line_chart_render(worker_name, 'Date Time', 'CPU Usage', top_workers[worker]['datetimes'],
lines, charts_dir.join('/{}-CPU.svg'.format(worker_name)))
def get_first_miqtop(top_log_file):
# Find first miqtop log line
p = subprocess.Popen(['grep', '-m', '1', '^miqtop\:', top_log_file], stdout=subprocess.PIPE)
greppedtop, err = p.communicate()
str_start = greppedtop.index('is->')
miqtop_time = du_parser.parse(greppedtop[str_start:], fuzzy=True, ignoretz=True)
timezone_offset = int(greppedtop[str_start + 34:str_start + 37])
miqtop_time = miqtop_time - timedelta(hours=timezone_offset)
return miqtop_time, timezone_offset
def get_msg_args(log_line):
miqmsg_args_result = miqmsg_args.search(log_line)
if miqmsg_args_result:
return miqmsg_args_result.group(1)
else:
return False
def get_msg_cmd(log_line):
miqmsg_cmd_result = miqmsg_cmd.search(log_line)
if miqmsg_cmd_result:
return miqmsg_cmd_result.group(1)
else:
return False
def get_msg_del(log_line):
miqmsg_del_result = miqmsg_del.search(log_line)
if miqmsg_del_result:
return float(miqmsg_del_result.group(1))
else:
return False
def get_msg_deq(log_line):
miqmsg_deq_result = miqmsg_deq.search(log_line)
if miqmsg_deq_result:
return float(miqmsg_deq_result.group(1))
else:
return False
def get_msg_id(log_line):
miqmsg_id_result = miqmsg_id.search(log_line)
if miqmsg_id_result:
return miqmsg_id_result.group(1)
else:
return False
def get_msg_timestamp_pid(log_line):
# Obtains the timestamp and pid
ts_result = log_stamp.search(log_line)
if ts_result:
dt_evm = '{} {}'.format(ts_result.group(1), ts_result.group(2))
return dt_evm, ts_result.group(3)
else:
return False, 0
def hour_bucket_init(init):
if init:
return MiqMsgBucket()
else:
return {}
def line_chart_render(title, xtitle, ytitle, x_labels, lines, fname, stacked=False):
if stacked:
line_chart = pygal.StackedLine()
else:
line_chart = pygal.Line()
line_chart.title = title
line_chart.x_title = xtitle
line_chart.y_title = ytitle
line_chart.title_font_size = 8
line_chart.legend_font_size = 8
line_chart.truncate_legend = 26
line_chart.x_labels = x_labels
sortedlines = sorted(lines.keys())
for line in sortedlines:
line_chart.add(line, lines[line])
line_chart.render_to_file(str(fname))
def messages_to_hourly_buckets(messages, test_start, test_end):
hr_bkt = {}
# Hour buckets look like: hr_bkt[msg_cmd][msg_date][msg_hour] = MiqMsgBucket()
for msg in messages:
# put on queue, deals with queuing:
msg_cmd = messages[msg].msg_cmd
putdate = messages[msg].puttime[:10]
puthour = messages[msg].puttime[11:13]
if msg_cmd not in hr_bkt:
hr_bkt[msg_cmd] = provision_hour_buckets(test_start, test_end)
hr_bkt[msg_cmd][putdate][puthour].total_put += 1
hr_bkt[msg_cmd][putdate][puthour].sum_deq += messages[msg].deq_time
if (hr_bkt[msg_cmd][putdate][puthour].min_deq == 0 or
hr_bkt[msg_cmd][putdate][puthour].min_deq > messages[msg].deq_time):
hr_bkt[msg_cmd][putdate][puthour].min_deq = messages[msg].deq_time
if (hr_bkt[msg_cmd][putdate][puthour].max_deq == 0 or
hr_bkt[msg_cmd][putdate][puthour].max_deq < messages[msg].deq_time):
hr_bkt[msg_cmd][putdate][puthour].max_deq = messages[msg].deq_time
hr_bkt[msg_cmd][putdate][puthour].avg_deq = \
hr_bkt[msg_cmd][putdate][puthour].sum_deq / hr_bkt[msg_cmd][putdate][puthour].total_put
# Get time is when the message is delivered
getdate = messages[msg].gettime[:10]
gethour = messages[msg].gettime[11:13]
hr_bkt[msg_cmd][getdate][gethour].total_get += 1
hr_bkt[msg_cmd][getdate][gethour].sum_del += messages[msg].del_time
if (hr_bkt[msg_cmd][getdate][gethour].min_del == 0 or
hr_bkt[msg_cmd][getdate][gethour].min_del > messages[msg].del_time):
hr_bkt[msg_cmd][getdate][gethour].min_del = messages[msg].del_time
if (hr_bkt[msg_cmd][getdate][gethour].max_del == 0 or
hr_bkt[msg_cmd][getdate][gethour].max_del < messages[msg].del_time):
hr_bkt[msg_cmd][getdate][gethour].max_del = messages[msg].del_time
hr_bkt[msg_cmd][getdate][gethour].avg_del = \
hr_bkt[msg_cmd][getdate][gethour].sum_del / hr_bkt[msg_cmd][getdate][gethour].total_get
return hr_bkt
def messages_to_statistics_csv(messages, statistics_file_name):
all_statistics = []
for msg_id in messages:
msg = messages[msg_id]
added = False
if len(all_statistics) > 0:
for msg_statistics in all_statistics:
if msg_statistics.cmd == msg.msg_cmd:
if msg.del_time > 0:
msg_statistics.delivertimes.append(float(msg.del_time))
msg_statistics.gets += 1
msg_statistics.dequeuetimes.append(float(msg.deq_time))
msg_statistics.totaltimes.append(float(msg.total_time))
msg_statistics.puts += 1
added = True
break
if not added:
msg_statistics = MiqMsgLists()
msg_statistics.cmd = msg.msg_cmd
if msg.del_time > 0:
msg_statistics.delivertimes.append(float(msg.del_time))
msg_statistics.gets = 1
msg_statistics.dequeuetimes.append(float(msg.deq_time))
msg_statistics.totaltimes.append(float(msg.total_time))
msg_statistics.puts = 1
all_statistics.append(msg_statistics)
csvdata_path = log_path.join('csv_output', statistics_file_name)
outputfile = csvdata_path.open('w', ensure=True)
try:
csvfile = csv.writer(outputfile)
metrics = ['samples', 'min', 'avg', 'median', 'max', 'std', '90', '99']
measurements = ['deq_time', 'del_time', 'total_time']
headers = ['cmd', 'puts', 'gets']
for measurement in measurements:
for metric in metrics:
headers.append('{}_{}'.format(measurement, metric))
csvfile.writerow(headers)
# Contents of CSV
for msg_statistics in sorted(all_statistics, key=lambda x: x.cmd):
if msg_statistics.gets > 1:
logger.debug('Samples/Avg/90th/Std: %s: %s : %s : %s,Cmd: %s',
str(len(msg_statistics.totaltimes)).rjust(7),
str(round(numpy.average(msg_statistics.totaltimes), 3)).rjust(7),
str(round(numpy.percentile(msg_statistics.totaltimes, 90), 3)).rjust(7),
str(round(numpy.std(msg_statistics.totaltimes), 3)).rjust(7),
msg_statistics.cmd)
stats = [msg_statistics.cmd, msg_statistics.puts, msg_statistics.gets]
stats.extend(generate_statistics(msg_statistics.dequeuetimes, 3))
stats.extend(generate_statistics(msg_statistics.delivertimes, 3))
stats.extend(generate_statistics(msg_statistics.totaltimes, 3))
csvfile.writerow(stats)
finally:
outputfile.close()
def provision_hour_buckets(test_start, test_end, init=True):
buckets = {}
start_date = datetime.strptime(test_start[:10], '%Y-%m-%d')
end_date = datetime.strptime(test_end[:10], '%Y-%m-%d')
start_hr = int(test_start[11:13])
end_hr = int(test_end[11:13]) + 1
delta_date = end_date - start_date
for dates in range(delta_date.days + 1):
new_date = start_date + timedelta(days=dates)
buckets[new_date.strftime('%Y-%m-%d')] = {}
sorteddt = sorted(buckets.keys())
for date in sorteddt:
if date == test_start[:10]:
if date == test_end[:10]:
for hr in range(start_hr, end_hr):
buckets[date][str(hr).zfill(2)] = hour_bucket_init(init)
else:
for hr in range(start_hr, 24):
buckets[date][str(hr).zfill(2)] = hour_bucket_init(init)
elif date == test_end[:10]:
for hr in range(end_hr):
buckets[date][str(hr).zfill(2)] = hour_bucket_init(init)
else:
for hr in range(24):
buckets[date][str(hr).zfill(2)] = hour_bucket_init(init)
if init:
buckets[''] = {}
buckets[''][''] = MiqMsgBucket()
return buckets
def top_to_appliance(top_file):
# Find first miqtop log line
miqtop_time, timezone_offset = get_first_miqtop(top_file)
runningtime = time()
grep_pattern = '^top\s\-\s\\|^miqtop\:\\|^Cpu(s)\:\\|^Mem\:\\|^Swap\:'
# Use grep to reduce # of lines to sort through
p = subprocess.Popen(['grep', grep_pattern, top_file], stdout=subprocess.PIPE)
greppedtop, err = p.communicate()
timediff = time() - runningtime
logger.info('Grepped top_output for CPU/Mem/Swap & time data in %s', timediff)
top_lines = greppedtop.strip().split('\n')
line_count = 0
top_keys = ['datetimes', 'cpuus', 'cpusy', 'cpuni', 'cpuid', 'cpuwa', 'cpuhi', 'cpusi', 'cpust',
'memtot', 'memuse', 'memfre', 'buffer', 'swatot', 'swause', 'swafre', 'cached']
top_app = dict((key, []) for key in top_keys)
cur_time = None
miqtop_ahead = True
runningtime = time()
for top_line in top_lines:
line_count += 1
if 'top - ' in top_line:
# top - 11:00:43
cur_hour = int(top_line[6:8])
cur_min = int(top_line[9:11])
cur_sec = int(top_line[12:14])
if miqtop_ahead:
# Have not found miqtop date/time yet so we must rely on miqtop date/time "ahead"
if cur_hour <= miqtop_time.hour:
cur_time = miqtop_time.replace(hour=cur_hour, minute=cur_min, second=cur_sec) \
- timedelta(hours=timezone_offset)
else:
# miqtop_time is ahead by date
logger.info('miqtop_time is ahead by one day')
cur_time = miqtop_time - timedelta(days=1)
cur_time = cur_time.replace(hour=cur_hour, minute=cur_min, second=cur_sec) \
- timedelta(hours=timezone_offset)
else:
cur_time = miqtop_time.replace(hour=cur_hour, minute=cur_min, second=cur_sec) \
- timedelta(hours=timezone_offset)
elif 'miqtop: ' in top_line:
miqtop_ahead = False
# miqtop: .* is-> Mon Jan 26 08:57:39 EST 2015 -0500
str_start = top_line.index('is->')
miqtop_time = du_parser.parse(top_line[str_start:], fuzzy=True, ignoretz=True)
# Time logged in top is the system's time which is ahead/behind by the timezone offset
timezone_offset = int(top_line[str_start + 34:str_start + 37])
miqtop_time = miqtop_time - timedelta(hours=timezone_offset)
elif 'Cpu(s): ' in top_line:
miq_cpu_result = miq_cpu.search(top_line)
if miq_cpu_result:
top_app['datetimes'].append(str(cur_time))
top_app['cpuus'].append(float(miq_cpu_result.group(1).strip()))
top_app['cpusy'].append(float(miq_cpu_result.group(2).strip()))
top_app['cpuni'].append(float(miq_cpu_result.group(3).strip()))
top_app['cpuid'].append(float(miq_cpu_result.group(4).strip()))
top_app['cpuwa'].append(float(miq_cpu_result.group(5).strip()))
top_app['cpuhi'].append(float(miq_cpu_result.group(6).strip()))
top_app['cpusi'].append(float(miq_cpu_result.group(7).strip()))
top_app['cpust'].append(float(miq_cpu_result.group(8).strip()))
else:
logger.error('Issue with miq_cpu regex: %s', top_line)
elif 'Mem: ' in top_line:
miq_mem_result = miq_mem.search(top_line)
if miq_mem_result:
top_app['memtot'].append(round(float(miq_mem_result.group(1).strip()) / 1024, 2))
top_app['memuse'].append(round(float(miq_mem_result.group(2).strip()) / 1024, 2))
top_app['memfre'].append(round(float(miq_mem_result.group(3).strip()) / 1024, 2))
top_app['buffer'].append(round(float(miq_mem_result.group(4).strip()) / 1024, 2))
else:
logger.error('Issue with miq_mem regex: %s', top_line)
elif 'Swap: ' in top_line:
miq_swap_result = miq_swap.search(top_line)
if miq_swap_result:
top_app['swatot'].append(round(float(miq_swap_result.group(1).strip()) / 1024, 2))
top_app['swause'].append(round(float(miq_swap_result.group(2).strip()) / 1024, 2))
top_app['swafre'].append(round(float(miq_swap_result.group(3).strip()) / 1024, 2))
top_app['cached'].append(round(float(miq_swap_result.group(4).strip()) / 1024, 2))
else:
logger.error('Issue with miq_swap regex: %s', top_line)
else:
logger.error('Issue with grepping of top file:%s', top_line)
if (line_count % 20000) == 0:
timediff = time() - runningtime
runningtime = time()
logger.info('Count {} : Parsed 20000 lines in %s', line_count, timediff)
return top_app, len(top_lines)
def top_to_workers(workers, top_file):
# Find first miqtop log line
miqtop_time, timezone_offset = get_first_miqtop(top_file)
runningtime = time()
grep_pids = ''
for wkr in workers:
grep_pids = '{}^{}\s\\|'.format(grep_pids, workers[wkr].pid)
grep_pattern = '{}^top\s\-\s\\|^miqtop\:'.format(grep_pids)
# Use grep to reduce # of lines to sort through
p = subprocess.Popen(['grep', grep_pattern, top_file], stdout=subprocess.PIPE)
greppedtop, err = p.communicate()
timediff = time() - runningtime
logger.info('Grepped top_output for pids & time data in %s', timediff)
# This is very ugly because miqtop does include the date but top does not
# Also pids can be duplicated, so careful attention to detail on when a pid starts and ends
top_lines = greppedtop.strip().split('\n')
line_count = 0
top_workers = {}
cur_time = None
miqtop_ahead = True
runningtime = time()
for top_line in top_lines:
line_count += 1
if 'top - ' in top_line:
# top - 11:00:43
cur_hour = int(top_line[6:8])
cur_min = int(top_line[9:11])
cur_sec = int(top_line[12:14])
if miqtop_ahead:
# Have not found miqtop time yet so we must rely on miqtop time "ahead"
if cur_hour <= miqtop_time.hour:
cur_time = miqtop_time.replace(hour=cur_hour, minute=cur_min, second=cur_sec) \
- timedelta(hours=timezone_offset)
else:
# miqtop_time is ahead by date
logger.info('miqtop_time is ahead by one day')
cur_time = miqtop_time - timedelta(days=1)
cur_time = cur_time.replace(hour=cur_hour, minute=cur_min, second=cur_sec) \
- timedelta(hours=timezone_offset)
else:
cur_time = miqtop_time.replace(hour=cur_hour, minute=cur_min, second=cur_sec) \
- timedelta(hours=timezone_offset)
elif 'miqtop: ' in top_line:
miqtop_ahead = False
# miqtop: .* is-> Mon Jan 26 08:57:39 EST 2015 -0500
str_start = top_line.index('is->')
miqtop_time = du_parser.parse(top_line[str_start:], fuzzy=True, ignoretz=True)
# Time logged in top is the system's time which is ahead/behind by the timezone offset
timezone_offset = int(top_line[str_start + 34:str_start + 37])
miqtop_time = miqtop_time - timedelta(hours=timezone_offset)
else:
top_results = miq_top.search(top_line)
if top_results:
top_pid = top_results.group(1)
top_virt = convert_top_mem_to_mib(top_results.group(2))
top_res = convert_top_mem_to_mib(top_results.group(3))
top_share = convert_top_mem_to_mib(top_results.group(4))
top_cpu_per = float(top_results.group(5))
top_mem_per = float(top_results.group(6))
for worker in workers:
if workers[worker].pid == top_pid:
if cur_time > workers[worker].start_ts and \
(workers[worker].end_ts == '' or cur_time < workers[worker].end_ts):
w_id = workers[worker].worker_id
if w_id not in top_workers:
top_workers[w_id] = {}
top_workers[w_id]['datetimes'] = []
top_workers[w_id]['virt'] = []
top_workers[w_id]['res'] = []
top_workers[w_id]['share'] = []
top_workers[w_id]['cpu_per'] = []
top_workers[w_id]['mem_per'] = []
top_workers[w_id]['datetimes'].append(str(cur_time))
top_workers[w_id]['virt'].append(top_virt)
top_workers[w_id]['res'].append(top_res)
top_workers[w_id]['share'].append(top_share)
top_workers[w_id]['cpu_per'].append(top_cpu_per)
top_workers[w_id]['mem_per'].append(top_mem_per)
break
else:
logger.error('Issue with miq_top regex or grepping of top file:%s', top_line)
if (line_count % 20000) == 0:
timediff = time() - runningtime
runningtime = time()
logger.info('Count %s : Parsed 20000 lines in %s', line_count, timediff)
return top_workers, len(top_lines)
def perf_process_evm(evm_file, top_file):
msg_filters = {
'-hourly': re.compile(r'\"[0-9\-]*T[0-9\:]*Z\",\s\"hourly\"'),
'-daily': re.compile(r'\"[0-9\-]*T[0-9\:]*Z\",\s\"daily\"'),
'-EmsRedhat': re.compile(r'\[\[\"EmsRedhat\"\,\s[0-9]*\]\]'),
'-EmsVmware': re.compile(r'\[\[\"EmsVmware\"\,\s[0-9]*\]\]'),
'-EmsAmazon': re.compile(r'\[\[\"EmsAmazon\"\,\s[0-9]*\]\]'),
'-EmsOpenstack': re.compile(r'\[\[\"EmsOpenstack\"\,\s[0-9]*\]\]')
}
starttime = time()
initialtime = starttime
logger.info('----------- Parsing evm log file for messages -----------')
messages, msg_cmds, test_start, test_end, msg_lc = evm_to_messages(evm_file, msg_filters)
timediff = time() - starttime
logger.info('----------- Completed Parsing evm log file -----------')
logger.info('Parsed %s lines of evm log file for messages in %s', msg_lc, timediff)
logger.info('Total # of Messages: %d', len(messages))
logger.info('Total # of Commands: %d', len(msg_cmds))
logger.info('Start Time: %s', test_start)
logger.info('End Time: %s', test_end)
logger.info('----------- Parsing evm log file for workers -----------')
starttime = time()
workers, wkr_mem_exc, wkr_upt_exc, wkr_stp, wkr_int, wkr_ext, wkr_lc = evm_to_workers(evm_file)
timediff = time() - starttime
logger.info('----------- Completed Parsing evm log for workers -----------')
logger.info('Parsed %s lines of evm log file for workers in %s', wkr_lc, timediff)
logger.info('Total # of Workers: %d', len(workers))
logger.info('# Workers Memory Exceeded: %s', wkr_mem_exc)
logger.info('# Workers Uptime Exceeded: %s', wkr_upt_exc)
logger.info('# Workers Exited: %s', wkr_ext)
logger.info('# Workers Stopped: %s', wkr_stp)
logger.info('# Workers Interrupted: %s', wkr_int)
logger.info('----------- Parsing top_output log file for Appliance Metrics -----------')
starttime = time()
top_appliance, tp_lc = top_to_appliance(top_file)
timediff = time() - starttime
logger.info('----------- Completed Parsing top_output log -----------')
logger.info('Parsed %s lines of top_output file for Appliance Metrics in %s', tp_lc,
timediff)
logger.info('----------- Parsing top_output log file for worker CPU/Mem -----------')
starttime = time()
top_workers, tp_lc = top_to_workers(workers, top_file)
timediff = time() - starttime
logger.info('----------- Completed Parsing top_output log -----------')
logger.info('Parsed %s lines of top_output file for workers in %s', tp_lc, timediff)
charts_dir = log_path.join('charts')
if not os.path.exists(str(charts_dir)):
os.mkdir(str(charts_dir))
logger.info('----------- Generating Raw Data csv files -----------')
starttime = time()
generate_raw_data_csv(messages, 'queue-rawdata.csv')
generate_raw_data_csv(workers, 'workers-rawdata.csv')
timediff = time() - starttime
logger.info('Generated Raw Data csv files in: %s', timediff)
logger.info('----------- Generating Hourly Buckets -----------')
starttime = time()
hr_bkt = messages_to_hourly_buckets(messages, test_start, test_end)
timediff = time() - starttime
logger.info('Generated Hourly Buckets in: %s', timediff)
logger.info('----------- Generating Hourly Charts and csvs -----------')
starttime = time()
generate_hourly_charts_and_csvs(hr_bkt, charts_dir)
timediff = time() - starttime
logger.info('Generated Hourly Charts and csvs in: %s', timediff)
logger.info('----------- Generating Total Time Charts -----------')
starttime = time()
generate_total_time_charts(msg_cmds, charts_dir)
timediff = time() - starttime
logger.info('Generated Total Time Charts in: %s', timediff)
logger.info('----------- Generating Appliance Charts -----------')
starttime = time()
app_chart_files = split_appliance_charts(top_appliance, charts_dir)
timediff = time() - starttime
logger.info('Generated Appliance Charts in: %s', timediff)
logger.info('----------- Generating Worker Charts -----------')
starttime = time()
generate_worker_charts(workers, top_workers, charts_dir)
timediff = time() - starttime
logger.info('Generated Worker Charts in: %s', timediff)
logger.info('----------- Generating Message Statistics -----------')
starttime = time()
messages_to_statistics_csv(messages, 'queue-statistics.csv')
timediff = time() - starttime
logger.info('Generated Message Statistics in: %s', timediff)
logger.info('----------- Writing html files for report -----------')
# Write an index.html file for fast switching between graphs:
html_index = log_path.join('index.html').open('w', ensure=True)
cmd = hr_bkt.keys()[0]
html_index.write(
'<html>\n'
'<title>Performance Worker/Message Metrics</title>\n'
'<frameset cols="17%,83%">\n'
' <frame src="msg_menu.html" name="menu"/>\n'
' <frame src="charts/{}-{}-dequeue.svg" name="showframe" />\n'
'</frameset>\n'
'</html>'.format(cmd, sorted(hr_bkt[cmd].keys())[-1]))
html_index.close()
# Write the side bar menu html file
html_menu = log_path.join('msg_menu.html').open('w', ensure=True)
html_menu.write('<html>\n')
html_menu.write('<font size="2">')
html_menu.write('Appliance:<BR>')
for cpu_mem_charts in app_chart_files:
html_menu.write('{} <a href="charts{}" target="showframe">CPU</a> | '.format(
cpu_mem_charts[0][1:11], cpu_mem_charts[0]))
html_menu.write('<a href="charts{}" target="showframe">Memory</a><br>'.format(
cpu_mem_charts[1]))
html_menu.write('<a href="worker_menu.html" target="menu">Worker CPU/Memory</a><br>')
html_menu.write('Parsed {} lines for messages<br>'.format(msg_lc))
html_menu.write('Start Time: {}<br>'.format(test_start))
html_menu.write('End Time: {}<br>'.format(test_end))
html_menu.write('Message Count: {}<br>'.format(len(messages)))
html_menu.write('Command Count: {}<br>'.format(len(msg_cmds)))
html_menu.write('Parsed {} lines for workers<br>'.format(wkr_lc))
html_menu.write('Total Workers: {}<br>'.format(len(workers)))
html_menu.write('Workers Memory Exceeded: {}<br>'.format(wkr_mem_exc))
html_menu.write('Workers Uptime Exceeded: {}<br>'.format(wkr_upt_exc))
html_menu.write('Workers Exited: {}<br>'.format(wkr_ext))
html_menu.write('Workers Stopped: {}<br>'.format(wkr_stp))
html_menu.write('Workers Interrupted: {}<br>'.format(wkr_int))
html_menu.write('<a href="csv_output/messages-rawdata.csv">messages-rawdata.csv</a><br>')
html_menu.write('<a href="csv_output/messages-statistics.csv">messages-statistics.csv</a><br>')
html_menu.write('<a href="csv_output/workers-rawdata.csv">workers-rawdata.csv</a><br><br>')
# Sorts by the the messages which have the most, descending
for cmd in sorted(msg_cmds, key=lambda x: len(msg_cmds[x]['total']), reverse=True):
html_menu.write('<a href="csv_output/hourly_{}.csv"'
'target="showframe">{}</a><br>'.format(cmd, cmd))
html_menu.write('<a href="charts/{}-total.svg" target="showframe">'
'Total Messages: {} </a><br>'.format(cmd, len(msg_cmds[cmd]['total'])))
for dt in sorted(hr_bkt[cmd].keys()):
if dt == '':
html_menu.write('Queued: ')
else:
html_menu.write('{}: '.format(dt))
html_menu.write('<a href="charts/{}-{}-cmdcnt.svg" target="showframe">'
'cnt</a> | '.format(cmd, dt))
html_menu.write('<a href="charts/{}-{}-dequeue.svg" target="showframe">'
'deq</a> | '.format(cmd, dt))
html_menu.write('<a href="charts/{}-{}-deliver.svg" target="showframe">'
'del</a><br>'.format(cmd, dt))
html_menu.write('<br>')
html_menu.write('</font>')
html_menu.write('</html>')
html_menu.close()
html_wkr_menu = log_path.join('worker_menu.html').open('w', ensure=True)
html_wkr_menu.write('<html>\n')
html_wkr_menu.write('<font size="2">')
html_wkr_menu.write('Appliance:<BR>')
for cpu_mem_charts in app_chart_files:
html_wkr_menu.write('{}-<a href="charts{}" target="showframe">CPU</a> | '.format(
cpu_mem_charts[0][1:11], cpu_mem_charts[0]))
html_wkr_menu.write('<a href="charts{}" target="showframe">Memory</a><br>'.format(
cpu_mem_charts[1]))
html_wkr_menu.write('<a href="msg_menu.html" target="menu">Message Latencies</a><br>')
html_wkr_menu.write('Parsed {} lines for messages<br>'.format(msg_lc))
html_wkr_menu.write('Start Time: {}<br>'.format(test_start))
html_wkr_menu.write('End Time: {}<br>'.format(test_end))
html_wkr_menu.write('Message Count: {}<br>'.format(len(messages)))
html_wkr_menu.write('Command Count: {}<br>'.format(len(msg_cmds)))
html_wkr_menu.write('Parsed {} lines for workers<br>'.format(wkr_lc))
html_wkr_menu.write('Total Workers: {}<br>'.format(len(workers)))
html_wkr_menu.write('Workers Memory Exceeded: {}<br>'.format(wkr_mem_exc))
html_wkr_menu.write('Workers Uptime Exceeded: {}<br>'.format(wkr_upt_exc))
html_wkr_menu.write('Workers Exited: {}<br>'.format(wkr_ext))
html_wkr_menu.write('Workers Stopped: {}<br>'.format(wkr_stp))
html_wkr_menu.write('Workers Interrupted: {}<br>'.format(wkr_int))
html_wkr_menu.write('<a href="csv_output/messages-rawdata.csv">messages-rawdata.csv</a><br>')
html_wkr_menu.write('<a href="csv_output/messages-statistics.csv">'
'messages-statistics.csv</a><br>')
html_wkr_menu.write('<a href="csv_output/workers-rawdata.csv">workers-rawdata.csv</a><br><br>')
html_wkr_menu.write('Running Workers:<br>')
w_type = ''
for worker_id in sorted(workers, key=lambda x: workers[x].worker_type):
if workers[worker_id].terminated == '':
if not w_type == workers[worker_id].worker_type:
w_type = workers[worker_id].worker_type
html_wkr_menu.write('{}<br>'.format(w_type))
worker_name = '{}-{}'.format(worker_id, workers[worker_id].worker_type)
html_wkr_menu.write('{} - '.format(worker_id))
html_wkr_menu.write('<a href="charts/{}-CPU.svg" target="showframe">CPU</a>'
' | '.format(worker_name))
html_wkr_menu.write('<a href="charts/{}-Memory.svg" target="showframe">Memory</a><br>'
''.format(worker_name))
html_wkr_menu.write('<br>Terminated Workers:<br>')
w_type = ''
for worker_id in sorted(workers, key=lambda x: workers[x].worker_type):
if not workers[worker_id].terminated == '':
if not w_type == workers[worker_id].worker_type:
w_type = workers[worker_id].worker_type
html_wkr_menu.write('<br>{}<br>'.format(w_type))
worker_name = '{}-{}'.format(worker_id, workers[worker_id].worker_type)
html_wkr_menu.write('{} - '.format(worker_id))
html_wkr_menu.write('<a href="charts/{}-CPU.svg" target="showframe">CPU</a>'
' | '.format(worker_name))
html_wkr_menu.write('<a href="charts/{}-Memory.svg" target="showframe">Memory</a><br>'
''.format(worker_name))
html_wkr_menu.write('{}<br>'.format(workers[worker_id].terminated))
html_wkr_menu.write('</font>')
html_wkr_menu.write('</html>')
html_wkr_menu.close()
timediff = time() - initialtime
logger.info('----------- Finished -----------')
logger.info('Total time processing evm log file and generating report: %s', timediff)
class MiqMsgStat(object):
def __init__(self):
self.headers = ['msg_id', 'msg_cmd', 'msg_args', 'pid_put', 'pid_get', 'puttime', 'gettime',
'deq_time', 'del_time', 'total_time']
self.msg_id = ''
self.msg_cmd = ''
self.msg_args = ''
self.pid_put = ''
self.pid_get = ''
self.puttime = ''
self.gettime = ''
self.deq_time = 0.0
self.del_time = 0.0
self.total_time = 0.0
def __iter__(self):
for header in self.headers:
yield header, getattr(self, header)
def __str__(self):
return self.msg_cmd + ' : ' + self.msg_args + ' : ' + self.pid_put + ' : ' + self.pid_get \
+ ' : ' + self.puttime + ' : ' + self.gettime + ' : ' + str(self.deq_time) + ' : ' + \
str(self.del_time) + ' : ' + str(self.total_time)
class MiqMsgLists(object):
def __init__(self):
self.cmd = ''
self.puts = 0
self.gets = 0
self.dequeuetimes = []
self.delivertimes = []
self.totaltimes = []
class MiqMsgBucket(object):
def __init__(self):
self.headers = ['date', 'hour', 'total_put', 'total_get', 'sum_deq', 'min_deq', 'max_deq',
'avg_deq', 'sum_del', 'min_del', 'max_del', 'avg_del']
self.date = ''
self.hour = ''
self.total_put = 0
self.total_get = 0
self.sum_deq = 0.0
self.min_deq = 0.0
self.max_deq = 0.0
self.avg_deq = 0.0
self.sum_del = 0.0
self.min_del = 0.0
self.max_del = 0.0
self.avg_del = 0.0
def __iter__(self):
for header in self.headers:
yield header, getattr(self, header)
def __str__(self):
return self.date + ' : ' + self.hour + ' : ' + str(self.total_put) \
+ ' : ' + str(self.total_get) + ' : ' + str(self.sum_deq) + ' : ' + str(self.min_deq) \
+ ' : ' + str(self.max_deq) + ' : ' + str(self.avg_deq) + ' : ' + str(self.sum_del) \
+ ' : ' + str(self.min_del) + ' : ' + str(self.max_del) + ' : ' + str(self.avg_del)
class MiqWorker(object):
def __init__(self):
self.headers = ['worker_id', 'worker_type', 'pid', 'start_ts', 'end_ts', 'terminated']
self.worker_id = 0
self.worker_type = ''
self.pid = ''
self.start_ts = ''
self.end_ts = ''
self.terminated = ''
def __iter__(self):
for header in self.headers:
yield header, getattr(self, header)
def __str__(self):
return self.worker_id + ' : ' + self.worker_type + ' : ' + self.pid + ' : ' + \
str(self.start_ts) + ' : ' + str(self.end_ts) + ' : ' + self.terminated
| gpl-2.0 |
rhelmer/socorro | socorro/unittest/cron/jobs/test_weekly_reports_partitions.py | 11 | 1478 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.plugins.attrib import attr
from crontabber.app import CronTabber
from socorro.unittest.cron.jobs.base import IntegrationTestBase
from socorro.unittest.cron.setup_configman import (
get_config_manager_for_crontabber,
)
#==============================================================================
@attr(integration='postgres')
class TestWeeklyReportsPartitions(IntegrationTestBase):
def get_standard_config(self):
return get_config_manager_for_crontabber().get_config()
def _setup_config_manager(self):
_super = super(TestWeeklyReportsPartitions, self)._setup_config_manager
return get_config_manager_for_crontabber(
jobs='socorro.cron.jobs.weekly_reports_partitions.'
'WeeklyReportsPartitionsCronApp|1d',
)
def test_run_weekly_reports_partitions(self):
config_manager = self._setup_config_manager()
with config_manager.context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
assert information['weekly-reports-partitions']
assert not information['weekly-reports-partitions']['last_error']
assert information['weekly-reports-partitions']['last_success']
| mpl-2.0 |
hgarrereyn/Th3g3ntl3man-CTF-Writeups | 2017/picoCTF_2017/problems/cryptography/weirderRSA/solution.py | 1 | 1411 | import binascii
import string
e = 65537
N = 211767290324398254371868231687152625271180645754760945403088952020394972457469805823582174761387551992017650132806887143281743839388543576204324782920306260516024555364515883886110655807724459040458316068890447499547881914042520229001396317762404169572753359966034696955079260396682467936073461651616640916909
dp = 10169576291048744120030378521334130877372575619400406464442859732399651284965479823750811638854185900836535026290910663113961810650660236370395359445734425
c = 42601238135749247354464266278794915846396919141313024662374579479712190675096500801203662531952565488623964806890491567595603873371264777262418933107257283084704170577649264745811855833366655322107229755242767948773320530979935167331115009578064779877494691384747024161661024803331738931358534779829183671004
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise 'Modular inverse does not exist.'
else:
return x % m
def factorize():
for k in range(2,e):
p = (e * dp - 1 + k) // k #Solves for p
if N % p == 0:
return p
return -1
p = factorize()
q = N // p
phi = (p - 1) * (q - 1)
d = modinv(e,phi)
print(binascii.unhexlify(hex(pow(c,d,N))[2:]))
| gpl-3.0 |
hanak/artshow-keeper | artshowkeeper/model/currency.py | 1 | 4797 | # Artshow Keeper: A support tool for keeping an Artshow running.
# Copyright (C) 2014 Ivo Hanak
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import logging
import decimal
from . currency_field import CurrencyField
from . dataset import Dataset
from artshowkeeper.common.convert import *
class Currency:
def __init__(self, logger, dataset, currencyCodes):
self.__logger = logger
self.__dataset = dataset
self.__currencyCodes = currencyCodes
self.__amountDecimalPlaces = self.__dataset.getCurrencyInfo(self.__currencyCodes)[0][CurrencyField.DECIMAL_PLACES]
def getDecimalPlaces(self):
"""Valid decimal places in primary currency."""
return self.__amountDecimalPlaces
def getInfo(self):
""" Get currency info.
Returns:
List of dict(CurrencyField)
"""
return self.__dataset.getCurrencyInfo(self.__currencyCodes)
def updateInfo(self, currencyInfoList):
""" Update currency info with amount in primary.
Args:
currencyInfoList(list of dict(CurrencyField))
Returns:
Result
"""
if currencyInfoList is not None and len(currencyInfoList) > 0:
primaryAmountInPrimary = toDecimal(currencyInfoList[0].get(CurrencyField.AMOUNT_IN_PRIMARY, None))
if primaryAmountInPrimary is not None and primaryAmountInPrimary != Decimal(1):
return Result.PRIMARY_AMOUNT_IN_PRIMARY_INVALID
return self.__dataset.updateCurrencyInfo(currencyInfoList)
def __convertAmountToCurrencies(self, amount, currencyInfoList):
""" Convert amount to given currencies.
Args:
amount(Decimal)
currencyInfoList(list of dict[CurrencyField])
Returns:
Array of amount in various currencies including formatting info (CurrencyField).
Primary currency is at index 0.
"""
if amount is None:
return []
currencyInfoList = [currencyInfo.copy() for currencyInfo in currencyInfoList]
for currencyInfo in currencyInfoList:
if currencyInfo[CurrencyField.AMOUNT_IN_PRIMARY] > 0:
try:
oneInFixedPoint = Decimal(10) ** currencyInfo[CurrencyField.DECIMAL_PLACES]
convertedAmountFixedPoint = (amount * oneInFixedPoint) / currencyInfo[CurrencyField.AMOUNT_IN_PRIMARY];
currencyInfo[CurrencyField.AMOUNT] = convertedAmountFixedPoint.quantize(1, rounding=decimal.ROUND_HALF_UP) / oneInFixedPoint
except decimal.InvalidOperation:
self.__logger.exception(
'__convertAmountToCurrencies: Amount "{0}" and currency "{1}" caused invalid opreration. Returning zeros.'.format(
str(amount), str(currencyInfo[CurrencyField.CODE])))
currencyInfo[CurrencyField.AMOUNT] = Decimal(0)
else:
currencyInfo[CurrencyField.AMOUNT] = Decimal(0)
return currencyInfoList
def __updateAmountWithCurrency(self, element, fields, currencyInfoList):
for sourceKey, targetKey in fields.items():
amount = element.get(sourceKey, None)
element[targetKey] = self.__convertAmountToCurrencies(amount, currencyInfoList)
def updateAmountWithAllCurrencies(self, entity, fields):
if entity is None:
return
currencyInfoList = self.getInfo()
if isinstance(entity, list):
for element in entity:
self.__updateAmountWithCurrency(element, fields, currencyInfoList)
else:
self.__updateAmountWithCurrency(entity, fields, currencyInfoList)
def convertToAllCurrencies(self, amount):
return self.__convertAmountToCurrencies(
amount,
self.getInfo())
def roundInPrimary(self, value):
value = toDecimal(value)
if value is not None:
value = value.quantize(
Decimal(10) ** -self.__amountDecimalPlaces,
rounding=decimal.ROUND_HALF_UP)
return value
| gpl-3.0 |
nlloyd/SubliminalCollaborator | libs/twisted/internet/test/test_process.py | 5 | 25327 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorProcess}.
"""
__metaclass__ = type
import os, sys, signal, threading
from twisted.trial.unittest import TestCase, SkipTest
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.compat import set
from twisted.python.log import msg, err
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.internet import utils
from twisted.internet.interfaces import IReactorProcess, IProcessTransport
from twisted.internet.defer import Deferred, succeed
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.error import ProcessDone, ProcessTerminated
from twisted.internet import _signals
class _ShutdownCallbackProcessProtocol(ProcessProtocol):
"""
An L{IProcessProtocol} which fires a Deferred when the process it is
associated with ends.
@ivar received: A C{dict} mapping file descriptors to lists of bytes
received from the child process on those file descriptors.
"""
def __init__(self, whenFinished):
self.whenFinished = whenFinished
self.received = {}
def childDataReceived(self, fd, bytes):
self.received.setdefault(fd, []).append(bytes)
def processEnded(self, reason):
self.whenFinished.callback(None)
class ProcessTestsBuilderBase(ReactorBuilder):
"""
Base class for L{IReactorProcess} tests which defines some tests which
can be applied to PTY or non-PTY uses of C{spawnProcess}.
Subclasses are expected to set the C{usePTY} attribute to C{True} or
C{False}.
"""
requiredInterfaces = [IReactorProcess]
def test_processTransportInterface(self):
"""
L{IReactorProcess.spawnProcess} connects the protocol passed to it
to a transport which provides L{IProcessTransport}.
"""
ended = Deferred()
protocol = _ShutdownCallbackProcessProtocol(ended)
reactor = self.buildReactor()
transport = reactor.spawnProcess(
protocol, sys.executable, [sys.executable, "-c", ""],
usePTY=self.usePTY)
# The transport is available synchronously, so we can check it right
# away (unlike many transport-based tests). This is convenient even
# though it's probably not how the spawnProcess interface should really
# work.
# We're not using verifyObject here because part of
# IProcessTransport is a lie - there are no getHost or getPeer
# methods. See #1124.
self.assertTrue(IProcessTransport.providedBy(transport))
# Let the process run and exit so we don't leave a zombie around.
ended.addCallback(lambda ignored: reactor.stop())
self.runReactor(reactor)
def _writeTest(self, write):
"""
Helper for testing L{IProcessTransport} write functionality. This
method spawns a child process and gives C{write} a chance to write some
bytes to it. It then verifies that the bytes were actually written to
it (by relying on the child process to echo them back).
@param write: A two-argument callable. This is invoked with a process
transport and some bytes to write to it.
"""
reactor = self.buildReactor()
ended = Deferred()
protocol = _ShutdownCallbackProcessProtocol(ended)
bytes = "hello, world" + os.linesep
program = (
"import sys\n"
"sys.stdout.write(sys.stdin.readline())\n"
)
def startup():
transport = reactor.spawnProcess(
protocol, sys.executable, [sys.executable, "-c", program])
try:
write(transport, bytes)
except:
err(None, "Unhandled exception while writing")
transport.signalProcess('KILL')
reactor.callWhenRunning(startup)
ended.addCallback(lambda ignored: reactor.stop())
self.runReactor(reactor)
self.assertEqual(bytes, "".join(protocol.received[1]))
def test_write(self):
"""
L{IProcessTransport.write} writes the specified C{str} to the standard
input of the child process.
"""
def write(transport, bytes):
transport.write(bytes)
self._writeTest(write)
def test_writeSequence(self):
"""
L{IProcessTransport.writeSequence} writes the specified C{list} of
C{str} to the standard input of the child process.
"""
def write(transport, bytes):
transport.writeSequence(list(bytes))
self._writeTest(write)
def test_writeToChild(self):
"""
L{IProcessTransport.writeToChild} writes the specified C{str} to the
specified file descriptor of the child process.
"""
def write(transport, bytes):
transport.writeToChild(0, bytes)
self._writeTest(write)
def test_writeToChildBadFileDescriptor(self):
"""
L{IProcessTransport.writeToChild} raises L{KeyError} if passed a file
descriptor which is was not set up by L{IReactorProcess.spawnProcess}.
"""
def write(transport, bytes):
try:
self.assertRaises(KeyError, transport.writeToChild, 13, bytes)
finally:
# Just get the process to exit so the test can complete
transport.write(bytes)
self._writeTest(write)
def test_spawnProcessEarlyIsReaped(self):
"""
If, before the reactor is started with L{IReactorCore.run}, a
process is started with L{IReactorProcess.spawnProcess} and
terminates, the process is reaped once the reactor is started.
"""
reactor = self.buildReactor()
# Create the process with no shared file descriptors, so that there
# are no other events for the reactor to notice and "cheat" with.
# We want to be sure it's really dealing with the process exiting,
# not some associated event.
if self.usePTY:
childFDs = None
else:
childFDs = {}
# Arrange to notice the SIGCHLD.
signaled = threading.Event()
def handler(*args):
signaled.set()
signal.signal(signal.SIGCHLD, handler)
# Start a process - before starting the reactor!
ended = Deferred()
reactor.spawnProcess(
_ShutdownCallbackProcessProtocol(ended), sys.executable,
[sys.executable, "-c", ""], usePTY=self.usePTY, childFDs=childFDs)
# Wait for the SIGCHLD (which might have been delivered before we got
# here, but that's okay because the signal handler was installed above,
# before we could have gotten it).
signaled.wait(120)
if not signaled.isSet():
self.fail("Timed out waiting for child process to exit.")
# Capture the processEnded callback.
result = []
ended.addCallback(result.append)
if result:
# The synchronous path through spawnProcess / Process.__init__ /
# registerReapProcessHandler was encountered. There's no reason to
# start the reactor, because everything is done already.
return
# Otherwise, though, start the reactor so it can tell us the process
# exited.
ended.addCallback(lambda ignored: reactor.stop())
self.runReactor(reactor)
# Make sure the reactor stopped because the Deferred fired.
self.assertTrue(result)
if getattr(signal, 'SIGCHLD', None) is None:
test_spawnProcessEarlyIsReaped.skip = (
"Platform lacks SIGCHLD, early-spawnProcess test can't work.")
def test_processExitedWithSignal(self):
"""
The C{reason} argument passed to L{IProcessProtocol.processExited} is a
L{ProcessTerminated} instance if the child process exits with a signal.
"""
sigName = 'TERM'
sigNum = getattr(signal, 'SIG' + sigName)
exited = Deferred()
source = (
"import sys\n"
# Talk so the parent process knows the process is running. This is
# necessary because ProcessProtocol.makeConnection may be called
# before this process is exec'd. It would be unfortunate if we
# SIGTERM'd the Twisted process while it was on its way to doing
# the exec.
"sys.stdout.write('x')\n"
"sys.stdout.flush()\n"
"sys.stdin.read()\n")
class Exiter(ProcessProtocol):
def childDataReceived(self, fd, data):
msg('childDataReceived(%d, %r)' % (fd, data))
self.transport.signalProcess(sigName)
def childConnectionLost(self, fd):
msg('childConnectionLost(%d)' % (fd,))
def processExited(self, reason):
msg('processExited(%r)' % (reason,))
# Protect the Deferred from the failure so that it follows
# the callback chain. This doesn't use the errback chain
# because it wants to make sure reason is a Failure. An
# Exception would also make an errback-based test pass, and
# that would be wrong.
exited.callback([reason])
def processEnded(self, reason):
msg('processEnded(%r)' % (reason,))
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Exiter(), sys.executable,
[sys.executable, "-c", source], usePTY=self.usePTY)
def cbExited((failure,)):
# Trapping implicitly verifies that it's a Failure (rather than
# an exception) and explicitly makes sure it's the right type.
failure.trap(ProcessTerminated)
err = failure.value
if platform.isWindows():
# Windows can't really /have/ signals, so it certainly can't
# report them as the reason for termination. Maybe there's
# something better we could be doing here, anyway? Hard to
# say. Anyway, this inconsistency between different platforms
# is extremely unfortunate and I would remove it if I
# could. -exarkun
self.assertIdentical(err.signal, None)
self.assertEqual(err.exitCode, 1)
else:
self.assertEqual(err.signal, sigNum)
self.assertIdentical(err.exitCode, None)
exited.addCallback(cbExited)
exited.addErrback(err)
exited.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
def test_systemCallUninterruptedByChildExit(self):
"""
If a child process exits while a system call is in progress, the system
call should not be interfered with. In particular, it should not fail
with EINTR.
Older versions of Twisted installed a SIGCHLD handler on POSIX without
using the feature exposed by the SA_RESTART flag to sigaction(2). The
most noticable problem this caused was for blocking reads and writes to
sometimes fail with EINTR.
"""
reactor = self.buildReactor()
# XXX Since pygobject/pygtk wants to use signal.set_wakeup_fd,
# we aren't actually providing this functionality on the glib2
# or gtk2 reactors yet. See #4286 for the possibility of
# improving this.
skippedReactors = ["Glib2Reactor", "Gtk2Reactor", "PortableGtkReactor"]
hasSigInterrupt = getattr(signal, "siginterrupt", None) is not None
reactorClassName = reactor.__class__.__name__
if reactorClassName in skippedReactors and not hasSigInterrupt:
raise SkipTest(
"%s is not supported without siginterrupt" % reactorClassName)
if _signals.installHandler.__name__ == "_installHandlerUsingSignal":
raise SkipTest("_signals._installHandlerUsingSignal doesn't support this feature")
result = []
def f():
try:
f1 = os.popen('%s -c "import time; time.sleep(0.1)"' %
(sys.executable,))
f2 = os.popen('%s -c "import time; time.sleep(0.5); print \'Foo\'"' %
(sys.executable,))
# The read call below will blow up with an EINTR from the
# SIGCHLD from the first process exiting if we install a
# SIGCHLD handler without SA_RESTART. (which we used to do)
result.append(f2.read())
finally:
reactor.stop()
reactor.callWhenRunning(f)
self.runReactor(reactor)
self.assertEqual(result, ["Foo\n"])
def test_openFileDescriptors(self):
"""
A spawned process has only stdin, stdout and stderr open
(file descriptor 3 is also reported as open, because of the call to
'os.listdir()').
"""
from twisted.python.runtime import platformType
if platformType != "posix":
raise SkipTest("Test only applies to POSIX platforms")
here = FilePath(__file__)
top = here.parent().parent().parent().parent()
source = (
"import sys",
"sys.path.insert(0, '%s')" % (top.path,),
"from twisted.internet import process",
"sys.stdout.write(str(process._listOpenFDs()))",
"sys.stdout.flush()")
def checkOutput(output):
self.assertEqual('[0, 1, 2, 3]', output)
reactor = self.buildReactor()
class Protocol(ProcessProtocol):
def __init__(self):
self.output = []
def outReceived(self, data):
self.output.append(data)
def processEnded(self, reason):
try:
checkOutput("".join(self.output))
finally:
reactor.stop()
proto = Protocol()
reactor.callWhenRunning(
reactor.spawnProcess, proto, sys.executable,
[sys.executable, "-Wignore", "-c", "\n".join(source)],
usePTY=self.usePTY)
self.runReactor(reactor)
def test_timelyProcessExited(self):
"""
If a spawned process exits, C{processExited} will be called in a
timely manner.
"""
reactor = self.buildReactor()
class ExitingProtocol(ProcessProtocol):
exited = False
def processExited(protoSelf, reason):
protoSelf.exited = True
reactor.stop()
self.assertEqual(reason.value.exitCode, 0)
protocol = ExitingProtocol()
reactor.callWhenRunning(
reactor.spawnProcess, protocol, sys.executable,
[sys.executable, "-c", "raise SystemExit(0)"],
usePTY=self.usePTY)
# This will timeout if processExited isn't called:
self.runReactor(reactor, timeout=30)
self.assertEqual(protocol.exited, True)
class ProcessTestsBuilder(ProcessTestsBuilderBase):
"""
Builder defining tests relating to L{IReactorProcess} for child processes
which do not have a PTY.
"""
usePTY = False
keepStdioOpenProgram = FilePath(__file__).sibling('process_helper.py').path
if platform.isWindows():
keepStdioOpenArg = "windows"
else:
# Just a value that doesn't equal "windows"
keepStdioOpenArg = ""
# Define this test here because PTY-using processes only have stdin and
# stdout and the test would need to be different for that to work.
def test_childConnectionLost(self):
"""
L{IProcessProtocol.childConnectionLost} is called each time a file
descriptor associated with a child process is closed.
"""
connected = Deferred()
lost = {0: Deferred(), 1: Deferred(), 2: Deferred()}
class Closer(ProcessProtocol):
def makeConnection(self, transport):
connected.callback(transport)
def childConnectionLost(self, childFD):
lost[childFD].callback(None)
source = (
"import os, sys\n"
"while 1:\n"
" line = sys.stdin.readline().strip()\n"
" if not line:\n"
" break\n"
" os.close(int(line))\n")
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Closer(), sys.executable,
[sys.executable, "-c", source], usePTY=self.usePTY)
def cbConnected(transport):
transport.write('2\n')
return lost[2].addCallback(lambda ign: transport)
connected.addCallback(cbConnected)
def lostSecond(transport):
transport.write('1\n')
return lost[1].addCallback(lambda ign: transport)
connected.addCallback(lostSecond)
def lostFirst(transport):
transport.write('\n')
connected.addCallback(lostFirst)
connected.addErrback(err)
def cbEnded(ignored):
reactor.stop()
connected.addCallback(cbEnded)
self.runReactor(reactor)
# This test is here because PTYProcess never delivers childConnectionLost.
def test_processEnded(self):
"""
L{IProcessProtocol.processEnded} is called after the child process
exits and L{IProcessProtocol.childConnectionLost} is called for each of
its file descriptors.
"""
ended = Deferred()
lost = []
class Ender(ProcessProtocol):
def childDataReceived(self, fd, data):
msg('childDataReceived(%d, %r)' % (fd, data))
self.transport.loseConnection()
def childConnectionLost(self, childFD):
msg('childConnectionLost(%d)' % (childFD,))
lost.append(childFD)
def processExited(self, reason):
msg('processExited(%r)' % (reason,))
def processEnded(self, reason):
msg('processEnded(%r)' % (reason,))
ended.callback([reason])
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Ender(), sys.executable,
[sys.executable, self.keepStdioOpenProgram, "child",
self.keepStdioOpenArg],
usePTY=self.usePTY)
def cbEnded((failure,)):
failure.trap(ProcessDone)
self.assertEqual(set(lost), set([0, 1, 2]))
ended.addCallback(cbEnded)
ended.addErrback(err)
ended.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
# This test is here because PTYProcess.loseConnection does not actually
# close the file descriptors to the child process. This test needs to be
# written fairly differently for PTYProcess.
def test_processExited(self):
"""
L{IProcessProtocol.processExited} is called when the child process
exits, even if file descriptors associated with the child are still
open.
"""
exited = Deferred()
allLost = Deferred()
lost = []
class Waiter(ProcessProtocol):
def childDataReceived(self, fd, data):
msg('childDataReceived(%d, %r)' % (fd, data))
def childConnectionLost(self, childFD):
msg('childConnectionLost(%d)' % (childFD,))
lost.append(childFD)
if len(lost) == 3:
allLost.callback(None)
def processExited(self, reason):
msg('processExited(%r)' % (reason,))
# See test_processExitedWithSignal
exited.callback([reason])
self.transport.loseConnection()
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Waiter(), sys.executable,
[sys.executable, self.keepStdioOpenProgram, "child",
self.keepStdioOpenArg],
usePTY=self.usePTY)
def cbExited((failure,)):
failure.trap(ProcessDone)
msg('cbExited; lost = %s' % (lost,))
self.assertEqual(lost, [])
return allLost
exited.addCallback(cbExited)
def cbAllLost(ignored):
self.assertEqual(set(lost), set([0, 1, 2]))
exited.addCallback(cbAllLost)
exited.addErrback(err)
exited.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
def makeSourceFile(self, sourceLines):
"""
Write the given list of lines to a text file and return the absolute
path to it.
"""
script = self.mktemp()
scriptFile = file(script, 'wt')
scriptFile.write(os.linesep.join(sourceLines) + os.linesep)
scriptFile.close()
return os.path.abspath(script)
def test_shebang(self):
"""
Spawning a process with an executable which is a script starting
with an interpreter definition line (#!) uses that interpreter to
evaluate the script.
"""
SHEBANG_OUTPUT = 'this is the shebang output'
scriptFile = self.makeSourceFile([
"#!%s" % (sys.executable,),
"import sys",
"sys.stdout.write('%s')" % (SHEBANG_OUTPUT,),
"sys.stdout.flush()"])
os.chmod(scriptFile, 0700)
reactor = self.buildReactor()
def cbProcessExited((out, err, code)):
msg("cbProcessExited((%r, %r, %d))" % (out, err, code))
self.assertEqual(out, SHEBANG_OUTPUT)
self.assertEqual(err, "")
self.assertEqual(code, 0)
def shutdown(passthrough):
reactor.stop()
return passthrough
def start():
d = utils.getProcessOutputAndValue(scriptFile, reactor=reactor)
d.addBoth(shutdown)
d.addCallback(cbProcessExited)
d.addErrback(err)
reactor.callWhenRunning(start)
self.runReactor(reactor)
def test_processCommandLineArguments(self):
"""
Arguments given to spawnProcess are passed to the child process as
originally intended.
"""
source = (
# On Windows, stdout is not opened in binary mode by default,
# so newline characters are munged on writing, interfering with
# the tests.
'import sys, os\n'
'try:\n'
' import msvcrt\n'
' msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n'
'except ImportError:\n'
' pass\n'
'for arg in sys.argv[1:]:\n'
' sys.stdout.write(arg + chr(0))\n'
' sys.stdout.flush()')
args = ['hello', '"', ' \t|<>^&', r'"\\"hello\\"', r'"foo\ bar baz\""']
# Ensure that all non-NUL characters can be passed too.
args.append(''.join(map(chr, xrange(1, 256))))
reactor = self.buildReactor()
def processFinished(output):
output = output.split('\0')
# Drop the trailing \0.
output.pop()
self.assertEqual(args, output)
def shutdown(result):
reactor.stop()
return result
def spawnChild():
d = succeed(None)
d.addCallback(lambda dummy: utils.getProcessOutput(
sys.executable, ['-c', source] + args, reactor=reactor))
d.addCallback(processFinished)
d.addBoth(shutdown)
reactor.callWhenRunning(spawnChild)
self.runReactor(reactor)
globals().update(ProcessTestsBuilder.makeTestCaseClasses())
class PTYProcessTestsBuilder(ProcessTestsBuilderBase):
"""
Builder defining tests relating to L{IReactorProcess} for child processes
which have a PTY.
"""
usePTY = True
if platform.isWindows():
skip = "PTYs are not supported on Windows."
elif platform.isMacOSX():
skippedReactors = {
"twisted.internet.pollreactor.PollReactor":
"OS X's poll() does not support PTYs"}
globals().update(PTYProcessTestsBuilder.makeTestCaseClasses())
class PotentialZombieWarningTests(TestCase):
"""
Tests for L{twisted.internet.error.PotentialZombieWarning}.
"""
def test_deprecated(self):
"""
Accessing L{PotentialZombieWarning} via the
I{PotentialZombieWarning} attribute of L{twisted.internet.error}
results in a deprecation warning being emitted.
"""
from twisted.internet import error
error.PotentialZombieWarning
warnings = self.flushWarnings([self.test_deprecated])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"twisted.internet.error.PotentialZombieWarning was deprecated in "
"Twisted 10.0.0: There is no longer any potential for zombie "
"process.")
self.assertEqual(len(warnings), 1)
| apache-2.0 |
melersh/Yasta | twilio/rest/resources/notifications.py | 14 | 1299 | from twilio.rest.resources.util import normalize_dates
from twilio.rest.resources import InstanceResource, ListResource
class Notification(InstanceResource):
def delete(self):
"""
Delete this notification
"""
return self.delete_instance()
class Notifications(ListResource):
name = "Notifications"
instance = Notification
@normalize_dates
def list(self, before=None, after=None, **kwargs):
"""
Returns a page of :class:`Notification` resources as a list.
For paging information see :class:`ListResource`.
**NOTE**: Due to the potentially voluminous amount of data in a
notification, the full HTTP request and response data is only returned
in the Notification instance resource representation.
:param date after: Only list notifications logged after this datetime
:param date before: Only list notifications logger before this datetime
:param log_level: If 1, only shows errors. If 0, only show warnings
"""
kwargs["MessageDate<"] = before
kwargs["MessageDate>"] = after
return self.get_instances(kwargs)
def delete(self, sid):
"""
Delete a given Notificiation
"""
return self.delete_instance(sid)
| mit |
frankiecjunle/yunblog | venv/lib/python2.7/site-packages/wheel/wininst2wheel.py | 62 | 7772 | #!/usr/bin/env python
import distutils.dist
import os.path
import re
import sys
import tempfile
import zipfile
from argparse import ArgumentParser
from glob import iglob
from shutil import rmtree
import wheel.bdist_wheel
from wheel.archive import archive_wheelfile
egg_info_re = re.compile(r'''(^|/)(?P<name>[^/]+?)-(?P<ver>.+?)
(-(?P<pyver>.+?))?(-(?P<arch>.+?))?.egg-info(/|$)''', re.VERBOSE)
def parse_info(wininfo_name, egginfo_name):
"""Extract metadata from filenames.
Extracts the 4 metadataitems needed (name, version, pyversion, arch) from
the installer filename and the name of the egg-info directory embedded in
the zipfile (if any).
The egginfo filename has the format::
name-ver(-pyver)(-arch).egg-info
The installer filename has the format::
name-ver.arch(-pyver).exe
Some things to note:
1. The installer filename is not definitive. An installer can be renamed
and work perfectly well as an installer. So more reliable data should
be used whenever possible.
2. The egg-info data should be preferred for the name and version, because
these come straight from the distutils metadata, and are mandatory.
3. The pyver from the egg-info data should be ignored, as it is
constructed from the version of Python used to build the installer,
which is irrelevant - the installer filename is correct here (even to
the point that when it's not there, any version is implied).
4. The architecture must be taken from the installer filename, as it is
not included in the egg-info data.
5. Architecture-neutral installers still have an architecture because the
installer format itself (being executable) is architecture-specific. We
should therefore ignore the architecture if the content is pure-python.
"""
egginfo = None
if egginfo_name:
egginfo = egg_info_re.search(egginfo_name)
if not egginfo:
raise ValueError("Egg info filename %s is not valid" % (egginfo_name,))
# Parse the wininst filename
# 1. Distribution name (up to the first '-')
w_name, sep, rest = wininfo_name.partition('-')
if not sep:
raise ValueError("Installer filename %s is not valid" % (wininfo_name,))
# Strip '.exe'
rest = rest[:-4]
# 2. Python version (from the last '-', must start with 'py')
rest2, sep, w_pyver = rest.rpartition('-')
if sep and w_pyver.startswith('py'):
rest = rest2
w_pyver = w_pyver.replace('.', '')
else:
# Not version specific - use py2.py3. While it is possible that
# pure-Python code is not compatible with both Python 2 and 3, there
# is no way of knowing from the wininst format, so we assume the best
# here (the user can always manually rename the wheel to be more
# restrictive if needed).
w_pyver = 'py2.py3'
# 3. Version and architecture
w_ver, sep, w_arch = rest.rpartition('.')
if not sep:
raise ValueError("Installer filename %s is not valid" % (wininfo_name,))
if egginfo:
w_name = egginfo.group('name')
w_ver = egginfo.group('ver')
return dict(name=w_name, ver=w_ver, arch=w_arch, pyver=w_pyver)
def bdist_wininst2wheel(path, dest_dir=os.path.curdir):
bdw = zipfile.ZipFile(path)
# Search for egg-info in the archive
egginfo_name = None
for filename in bdw.namelist():
if '.egg-info' in filename:
egginfo_name = filename
break
info = parse_info(os.path.basename(path), egginfo_name)
root_is_purelib = True
for zipinfo in bdw.infolist():
if zipinfo.filename.startswith('PLATLIB'):
root_is_purelib = False
break
if root_is_purelib:
paths = {'purelib': ''}
else:
paths = {'platlib': ''}
dist_info = "%(name)s-%(ver)s" % info
datadir = "%s.data/" % dist_info
# rewrite paths to trick ZipFile into extracting an egg
# XXX grab wininst .ini - between .exe, padding, and first zip file.
members = []
egginfo_name = ''
for zipinfo in bdw.infolist():
key, basename = zipinfo.filename.split('/', 1)
key = key.lower()
basepath = paths.get(key, None)
if basepath is None:
basepath = datadir + key.lower() + '/'
oldname = zipinfo.filename
newname = basepath + basename
zipinfo.filename = newname
del bdw.NameToInfo[oldname]
bdw.NameToInfo[newname] = zipinfo
# Collect member names, but omit '' (from an entry like "PLATLIB/"
if newname:
members.append(newname)
# Remember egg-info name for the egg2dist call below
if not egginfo_name:
if newname.endswith('.egg-info'):
egginfo_name = newname
elif '.egg-info/' in newname:
egginfo_name, sep, _ = newname.rpartition('/')
dir = tempfile.mkdtemp(suffix="_b2w")
bdw.extractall(dir, members)
# egg2wheel
abi = 'none'
pyver = info['pyver']
arch = (info['arch'] or 'any').replace('.', '_').replace('-', '_')
# Wininst installers always have arch even if they are not
# architecture-specific (because the format itself is).
# So, assume the content is architecture-neutral if root is purelib.
if root_is_purelib:
arch = 'any'
# If the installer is architecture-specific, it's almost certainly also
# CPython-specific.
if arch != 'any':
pyver = pyver.replace('py', 'cp')
wheel_name = '-'.join((
dist_info,
pyver,
abi,
arch
))
if root_is_purelib:
bw = wheel.bdist_wheel.bdist_wheel(distutils.dist.Distribution())
else:
bw = _bdist_wheel_tag(distutils.dist.Distribution())
bw.root_is_pure = root_is_purelib
bw.python_tag = pyver
bw.plat_name_supplied = True
bw.plat_name = info['arch'] or 'any'
if not root_is_purelib:
bw.full_tag_supplied = True
bw.full_tag = (pyver, abi, arch)
dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info)
bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir)
bw.write_wheelfile(dist_info_dir, generator='wininst2wheel')
bw.write_record(dir, dist_info_dir)
archive_wheelfile(os.path.join(dest_dir, wheel_name), dir)
rmtree(dir)
class _bdist_wheel_tag(wheel.bdist_wheel.bdist_wheel):
# allow the client to override the default generated wheel tag
# The default bdist_wheel implementation uses python and abi tags
# of the running python process. This is not suitable for
# generating/repackaging prebuild binaries.
full_tag_supplied = False
full_tag = None # None or a (pytag, soabitag, plattag) triple
def get_tag(self):
if self.full_tag_supplied and self.full_tag is not None:
return self.full_tag
else:
return super(_bdist_wheel_tag, self).get_tag()
def main():
parser = ArgumentParser()
parser.add_argument('installers', nargs='*', help="Installers to convert")
parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
for pat in args.installers:
for installer in iglob(pat):
if args.verbose:
sys.stdout.write("{0}... ".format(installer))
bdist_wininst2wheel(installer, args.dest_dir)
if args.verbose:
sys.stdout.write("OK\n")
if __name__ == "__main__":
main()
| mit |
jimi-c/ansible | lib/ansible/modules/network/vyos/vyos_system.py | 68 | 6307 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: "vyos_system"
version_added: "2.3"
author: "Nathaniel Case (@qalthos)"
short_description: Run `set system` commands on VyOS devices
description:
- Runs one or more commands on remote devices running VyOS.
This module can also be introspected to validate key parameters before
returning successfully.
extends_documentation_fragment: vyos
notes:
- Tested against VYOS 1.1.7
options:
host_name:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- The new domain name to apply to the device.
name_servers:
description:
- A list of name servers to use with the device. Mutually exclusive with
I(domain_search)
aliases: ['name_server']
domain_search:
description:
- A list of domain names to search. Mutually exclusive with
I(name_server)
state:
description:
- Whether to apply (C(present)) or remove (C(absent)) the settings.
default: present
choices: ['present', 'absent']
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system hostname vyos01
- set system domain-name foo.example.com
"""
EXAMPLES = """
- name: configure hostname and domain-name
vyos_system:
host_name: vyos01
domain_name: test.example.com
- name: remove all configuration
vyos_system:
state: absent
- name: configure name servers
vyos_system:
name_servers
- 8.8.8.8
- 8.8.4.4
- name: configure domain search suffixes
vyos_system:
domain_search:
- sub1.example.com
- sub2.example.com
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.vyos import get_config, load_config
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def spec_key_to_device_key(key):
device_key = key.replace('_', '-')
# domain-search is longer than just it's key
if device_key == 'domain-search':
device_key += ' domain'
return device_key
def config_to_dict(module):
data = get_config(module)
config = {'domain_search': [], 'name_server': []}
for line in data.split('\n'):
if line.startswith('set system host-name'):
config['host_name'] = line[22:-1]
elif line.startswith('set system domain-name'):
config['domain_name'] = line[24:-1]
elif line.startswith('set system domain-search domain'):
config['domain_search'].append(line[33:-1])
elif line.startswith('set system name-server'):
config['name_server'].append(line[24:-1])
return config
def spec_to_commands(want, have):
commands = []
state = want.pop('state')
# state='absent' by itself has special meaning
if state == 'absent' and all(v is None for v in want.values()):
# Clear everything
for key in have:
commands.append('delete system %s' % spec_key_to_device_key(key))
for key in want:
if want[key] is None:
continue
current = have.get(key)
proposed = want[key]
device_key = spec_key_to_device_key(key)
# These keys are lists which may need to be reconciled with the device
if key in ['domain_search', 'name_server']:
if not proposed:
# Empty list was passed, delete all values
commands.append("delete system %s" % device_key)
for config in proposed:
if state == 'absent' and config in current:
commands.append("delete system %s '%s'" % (device_key, config))
elif state == 'present' and config not in current:
commands.append("set system %s '%s'" % (device_key, config))
else:
if state == 'absent' and current and proposed:
commands.append('delete system %s' % device_key)
elif state == 'present' and proposed and proposed != current:
commands.append("set system %s '%s'" % (device_key, proposed))
return commands
def map_param_to_obj(module):
return {
'host_name': module.params['host_name'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'name_server': module.params['name_server'],
'state': module.params['state']
}
def main():
argument_spec = dict(
host_name=dict(type='str'),
domain_name=dict(type='str'),
domain_search=dict(type='list'),
name_server=dict(type='list', aliases=['name_servers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[('domain_name', 'domain_search')],
)
warnings = list()
result = {'changed': False, 'warnings': warnings}
want = map_param_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(want, have)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
vinicius-alves/InternetBanking | env/lib/python3.4/site-packages/django/conf/__init__.py | 59 | 6888 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import importlib
import os
import time
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return sorted(
s for s in list(self.__dict__) + dir(self.default_settings)
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return (deleted or set_locally or set_on_default)
def __repr__(self):
return '<%(cls)s>' % {
'cls': self.__class__.__name__,
}
settings = LazySettings()
| gpl-3.0 |
willhardy/django | django/db/migrations/operations/fields.py | 64 | 12272 | from __future__ import unicode_literals
from django.db.models.fields import NOT_PROVIDED
from django.utils.functional import cached_property
from .base import Operation
class FieldOperation(Operation):
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
@cached_property
def model_name_lower(self):
return self.model_name.lower()
@cached_property
def name_lower(self):
return self.name.lower()
def is_same_model_operation(self, operation):
return self.model_name_lower == operation.model_name_lower
def is_same_field_operation(self, operation):
return self.is_same_model_operation(operation) and self.name_lower == operation.name_lower
def references_model(self, name, app_label=None):
return name.lower() == self.model_name_lower
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and name.lower() == self.name_lower
def reduce(self, operation, in_between, app_label=None):
return (
super(FieldOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_field(self.model_name, self.name, app_label)
)
class AddField(FieldOperation):
"""
Adds a field to a model.
"""
def __init__(self, model_name, name, field, preserve_default=True):
self.field = field
self.preserve_default = preserve_default
super(AddField, self).__init__(model_name, name)
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'name': self.name,
'field': self.field,
}
if self.preserve_default is not True:
kwargs['preserve_default'] = self.preserve_default
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
# If preserve default is off, don't use the default for future state
if not self.preserve_default:
field = self.field.clone()
field.default = NOT_PROVIDED
else:
field = self.field
state.models[app_label, self.model_name_lower].fields.append((self.name, field))
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
field = to_model._meta.get_field(self.name)
if not self.preserve_default:
field.default = self.field.default
schema_editor.add_field(
from_model,
field,
)
if not self.preserve_default:
field.default = NOT_PROVIDED
def database_backwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, from_model):
schema_editor.remove_field(from_model, from_model._meta.get_field(self.name))
def describe(self):
return "Add field %s to %s" % (self.name, self.model_name)
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, FieldOperation) and self.is_same_field_operation(operation):
if isinstance(operation, AlterField):
return [
AddField(
model_name=self.model_name,
name=operation.name,
field=operation.field,
),
]
elif isinstance(operation, RemoveField):
return []
elif isinstance(operation, RenameField):
return [
AddField(
model_name=self.model_name,
name=operation.new_name,
field=self.field,
),
]
return super(AddField, self).reduce(operation, in_between, app_label=app_label)
class RemoveField(FieldOperation):
"""
Removes a field from a model.
"""
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
new_fields = []
for name, instance in state.models[app_label, self.model_name_lower].fields:
if name != self.name:
new_fields.append((name, instance))
state.models[app_label, self.model_name_lower].fields = new_fields
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, from_model):
schema_editor.remove_field(from_model, from_model._meta.get_field(self.name))
def database_backwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
schema_editor.add_field(from_model, to_model._meta.get_field(self.name))
def describe(self):
return "Remove field %s from %s" % (self.name, self.model_name)
class AlterField(FieldOperation):
"""
Alters a field's database column (e.g. null, max_length) to the provided new field
"""
def __init__(self, model_name, name, field, preserve_default=True):
self.field = field
self.preserve_default = preserve_default
super(AlterField, self).__init__(model_name, name)
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'name': self.name,
'field': self.field,
}
if self.preserve_default is not True:
kwargs['preserve_default'] = self.preserve_default
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
if not self.preserve_default:
field = self.field.clone()
field.default = NOT_PROVIDED
else:
field = self.field
state.models[app_label, self.model_name_lower].fields = [
(n, field if n == self.name else f)
for n, f in
state.models[app_label, self.model_name_lower].fields
]
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
from_field = from_model._meta.get_field(self.name)
to_field = to_model._meta.get_field(self.name)
if not self.preserve_default:
to_field.default = self.field.default
schema_editor.alter_field(from_model, from_field, to_field)
if not self.preserve_default:
to_field.default = NOT_PROVIDED
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Alter field %s on %s" % (self.name, self.model_name)
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, RemoveField) and self.is_same_field_operation(operation):
return [operation]
elif isinstance(operation, RenameField) and self.is_same_field_operation(operation):
return [
operation,
AlterField(
model_name=self.model_name,
name=operation.new_name,
field=self.field,
),
]
return super(AlterField, self).reduce(operation, in_between, app_label=app_label)
class RenameField(FieldOperation):
"""
Renames a field on the model. Might affect db_column too.
"""
def __init__(self, model_name, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super(RenameField, self).__init__(model_name, old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
# Rename the field
state.models[app_label, self.model_name_lower].fields = [
(self.new_name if n == self.old_name else n, f)
for n, f in state.models[app_label, self.model_name_lower].fields
]
# Fix index/unique_together to refer to the new field
options = state.models[app_label, self.model_name_lower].options
for option in ('index_together', 'unique_together'):
if option in options:
options[option] = [
[self.new_name if n == self.old_name else n for n in together]
for together in options[option]
]
state.reload_model(app_label, self.model_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
schema_editor.alter_field(
from_model,
from_model._meta.get_field(self.old_name),
to_model._meta.get_field(self.new_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
schema_editor.alter_field(
from_model,
from_model._meta.get_field(self.new_name),
to_model._meta.get_field(self.old_name),
)
def describe(self):
return "Rename field %s on %s to %s" % (self.old_name, self.model_name, self.new_name)
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and (
name.lower() == self.old_name_lower or
name.lower() == self.new_name_lower
)
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, RenameField) and
self.is_same_model_operation(operation) and
self.new_name_lower == operation.old_name_lower):
return [
RenameField(
self.model_name,
self.old_name,
operation.new_name,
),
]
# Skip `FieldOperation.reduce` as we want to run `references_field`
# against self.new_name.
return (
super(FieldOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_field(self.model_name, self.new_name, app_label)
)
| bsd-3-clause |
yuxiaorui/torngas | torngas/cache/__init__.py | 3 | 3657 | """
Caching framework.
"""
from threading import local
from torngas.settings_manager import settings
from torngas.cache.backends.base import (
InvalidCacheBackendError, CacheKeyWarning, BaseCache)
from tornado.util import import_object
from torngas.exception import ConfigError
from tornado.ioloop import PeriodicCallback
BACKENDS = {
'memcached': 'memcached',
'localcache': 'localcache',
'dummy': 'dummy',
'redis': 'rediscache'
}
DEFAULT_CACHE_ALIAS = 'default'
DEFAULT_REDIS_ALIAS = 'default_redis'
DEFAULT_MEMCACHED_ALIAS = 'default_memcache'
DEFAULT_DUMMY_ALIAS = 'dummy'
if DEFAULT_CACHE_ALIAS not in settings.CACHES:
raise ConfigError("You must define a '%s' cache" % DEFAULT_CACHE_ALIAS)
def _create_cache(backend, **kwargs):
try:
# Try to get the CACHES entry for the given backend name first
try:
conf = settings.CACHES[backend]
except KeyError:
try:
# Trying to import the given backend, in case it's a dotted path
import_object(backend)
except ImportError as e:
raise InvalidCacheBackendError("Could not find backend '%s': %s" % (
backend, e))
location = kwargs.pop('LOCATION', '')
params = kwargs
else:
params = conf.copy()
params.update(kwargs)
backend = params.pop('BACKEND')
location = params.pop('LOCATION', '')
backend_cls = import_object(backend)
except ImportError as e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
return backend_cls(location, params)
class CacheHandler(object):
"""
A Cache Handler to manage access to Cache instances.
Ensures only one instance of each alias exists per thread.
"""
def __init__(self):
self._caches = local()
def __getitem__(self, alias):
try:
return self._caches.caches[alias]
except AttributeError:
self._caches.caches = {}
except KeyError:
pass
if alias not in settings.CACHES:
raise InvalidCacheBackendError(
"Could not find config for '%s' in settings.CACHES" % alias
)
cache = _create_cache(alias)
if hasattr(cache, 'clear_expires'):
PeriodicCallback(cache.clear_expires, 1000 * 1800).start()
self._caches.caches[alias] = cache
return cache
def all(self):
return getattr(self._caches, 'caches', {}).values()
caches = CacheHandler()
class DefaultCacheProxy(object):
"""
Proxy access to the default Cache object's attributes.
This allows the legacy `cache` object to be thread-safe using the new
``caches`` API.
"""
def __getattr__(self, name):
return getattr(caches[DEFAULT_CACHE_ALIAS], name)
def __setattr__(self, name, value):
return setattr(caches[DEFAULT_CACHE_ALIAS], name, value)
def __delattr__(self, name):
return delattr(caches[DEFAULT_CACHE_ALIAS], name)
def __contains__(self, key):
return key in caches[DEFAULT_CACHE_ALIAS]
def __eq__(self, other):
return caches[DEFAULT_CACHE_ALIAS] == other
def __ne__(self, other):
return caches[DEFAULT_CACHE_ALIAS] != other
cache = DefaultCacheProxy()
def close_caches(**kwargs):
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If not implemented in a particular backend
# cache.close is a no-op
for cache in caches.all():
cache.close()
| bsd-3-clause |
SRabbelier/Melange | app/soc/logic/helper/notifications.py | 1 | 13100 | #!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for sending out notifications.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import logging
import time
from django.template import loader
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext
# We cannot import soc.logic.models notification nor user here
# due to cyclic imports
from soc.logic import accounts
from soc.logic import dicts
from soc.logic import mail_dispatcher
from soc.logic import system
from soc.views.helper import redirects
DEF_NEW_NOTIFICATION_MSG_SUBJECT_FMT = ugettext(
'New Notification: %s')
DEF_INVITATION_MSG_FMT = ugettext(
'Invitation to become a %(role_verbose)s for %(group)s.')
DEF_NEW_REQUEST_MSG_FMT = ugettext(
'New Request Received from %(requester)s to become a %(role_verbose)s '
'for %(group)s')
DEF_NEW_ORG_MSG_FMT = ugettext(
'Your Organization Application for %(group_name)s has been accepted.')
DEF_NEW_REVIEW_SUBJECT_FMT = ugettext(
'New %s Review on %s')
DEF_REJECTED_REQUEST_SUBJECT_FMT = ugettext(
'Request to become a %(role_verbose)s for %(group)s has been rejected')
DEF_WITHDRAWN_INVITE_SUBJECT_FMT = ugettext(
'Invite to become a %(role_verbose)s for %(group)s has been withdrawn')
DEF_WELCOME_MSG_FMT = ugettext('Welcome to %(site_name)s, %(name)s,')
DEF_GROUP_INVITE_NOTIFICATION_TEMPLATE = 'soc/notification/messages/' \
'invitation.html'
DEF_NEW_REQUEST_NOTIFICATION_TEMPLATE = 'soc/notification/messages/' \
'new_request.html'
DEF_NEW_REVIEW_NOTIFICATION_TEMPLATE = 'soc/notification/messages/' \
'new_review.html'
DEF_NEW_ORG_TEMPLATE = 'soc/organization/messages/accepted.html'
DEF_REJECTED_REQUEST_NOTIFICATION_TEMPLATE = 'soc/notification/messages/' \
'rejected_request.html'
DEF_WITHDRAWN_INVITE_NOTIFICATION_TEMPLATE = 'soc/notification/messages/' \
'withdrawn_invite.html'
def sendInviteNotification(entity):
"""Sends out an invite notification to the user the request is for.
Args:
entity : A request containing the information needed to create the message
"""
from soc.logic.models.user import logic as user_logic
from soc.views.models.role import ROLE_VIEWS
invitation_url = 'http://%(host)s%(index)s' % {
'host' : system.getHostname(),
'index': redirects.getInviteProcessRedirect(entity, None),
}
role_params = ROLE_VIEWS[entity.role].getParams()
message_properties = {
'role_verbose' : role_params['name'],
'group': entity.group.name,
'invitation_url': invitation_url,
}
subject = DEF_INVITATION_MSG_FMT % {
'role_verbose' : role_params['name'],
'group' : entity.group.name
}
template = DEF_GROUP_INVITE_NOTIFICATION_TEMPLATE
from_user = user_logic.getCurrentUser()
sendNotification(entity.user, from_user, message_properties, subject, template)
def sendNewRequestNotification(request_entity):
"""Sends out a notification to the persons who can process this Request.
Args:
request_entity: an instance of Request model
"""
from soc.logic.helper import notifications
from soc.logic.models.role import ROLE_LOGICS
from soc.views.models.role import ROLE_VIEWS
# get the users who should get the notification
to_users = []
# retrieve the Role Logics which we should query on
role_logic = ROLE_LOGICS[request_entity.role]
role_logics_to_notify = role_logic.getRoleLogicsToNotifyUponNewRequest()
# the scope of the roles is the same as the scope of the Request entity
fields = {'scope': request_entity.group,
'status': 'active'}
for role_logic in role_logics_to_notify:
roles = role_logic.getForFields(fields)
for role_entity in roles:
# TODO: this might lead to double notifications
to_users.append(role_entity.user)
# get the user the request is from
user_entity = request_entity.user
role_params = ROLE_VIEWS[request_entity.role].getParams()
request_url = 'http://%(host)s%(redirect)s' % {
'host': system.getHostname(),
'redirect': redirects.getProcessRequestRedirect(request_entity, None),
}
message_properties = {
'requester': user_entity.name,
'role_verbose': role_params['name'],
'group': request_entity.group.name,
'request_url': request_url
}
subject = DEF_NEW_REQUEST_MSG_FMT % {
'requester': user_entity.name,
'role_verbose' : role_params['name'],
'group' : request_entity.group.name
}
template = DEF_NEW_REQUEST_NOTIFICATION_TEMPLATE
for to_user in to_users:
notifications.sendNotification(to_user, None, message_properties,
subject, template)
def sendRejectedRequestNotification(entity):
"""Sends a message that the request to get a role has been rejected.
Args:
entity : A request containing the information needed to create the message
"""
from soc.views.models.role import ROLE_VIEWS
role_params = ROLE_VIEWS[entity.role].getParams()
message_properties = {
'role_verbose' : role_params['name'],
'group': entity.group.name,
}
subject = DEF_REJECTED_REQUEST_SUBJECT_FMT % {
'role_verbose' : role_params['name'],
'group' : entity.group.name
}
template = DEF_REJECTED_REQUEST_NOTIFICATION_TEMPLATE
# from user set to None to not leak who rejected it.
sendNotification(entity.user, None, message_properties, subject, template)
def sendWithdrawnInviteNotification(entity):
"""Sends a message that the invite to obtain a role has been withdrawn.
Args:
entity : A request containing the information needed to create the message
"""
from soc.views.models.role import ROLE_VIEWS
role_params = ROLE_VIEWS[entity.role].getParams()
message_properties = {
'role_verbose' : role_params['name'],
'group': entity.group.name,
}
subject = DEF_WITHDRAWN_INVITE_SUBJECT_FMT % {
'role_verbose' : role_params['name'],
'group' : entity.group.name
}
template = DEF_WITHDRAWN_INVITE_NOTIFICATION_TEMPLATE
# from user set to None to not leak who rejected it.
sendNotification(entity.user, None, message_properties, subject, template)
def sendNewOrganizationNotification(entity, module_name):
"""Sends out an invite notification to the applicant of the Organization.
Args:
entity : An accepted OrgAppRecord
"""
program_entity = entity.survey.scope
url = 'http://%(host)s%(redirect)s' % {
'redirect': redirects.getApplicantRedirect(entity,
{'url_name': '%s/org' % module_name,
'program': program_entity}),
'host': system.getHostname(),
}
message_properties = {
'org_name': entity.name,
'program_name': program_entity.name,
'url': url
}
subject = DEF_NEW_ORG_MSG_FMT % {
'group_name': entity.name,
}
template = DEF_NEW_ORG_TEMPLATE
for to in [entity.main_admin, entity.backup_admin]:
if not to:
continue
sendNotification(to, None, message_properties, subject, template)
def sendNewReviewNotification(to_user, review, reviewed_name, redirect_url):
"""Sends out a notification to alert the user of a new Review.
Args:
to_user: The user who should receive a notification
review: The review which triggers this notification
reviewed_name: Name of the entity reviewed
redirect_url: URL to which the follower should be sent for more information
"""
review_notification_url = 'http://%(host)s%(redirect_url)s' % {
'host' : system.getHostname(),
'redirect_url': redirect_url}
message_properties = {'review_notification_url': review_notification_url,
'reviewer_name': review.author_name(),
'reviewed_name': reviewed_name,
'review_content': review.content,
'review_visibility': 'public' if review.is_public else 'private',
}
# determine the subject
review_type = 'public' if review.is_public else 'private'
subject = DEF_NEW_REVIEW_SUBJECT_FMT % (review_type, reviewed_name)
template = DEF_NEW_REVIEW_NOTIFICATION_TEMPLATE
# send the notification from the system
# TODO(srabbelier): do this in a task instead
sendNotification(to_user, None, message_properties, subject, template)
def sendNotification(to_user, from_user, message_properties, subject, template):
"""Sends out a notification to the specified user.
Args:
to_user : user to which the notification will be send
from_user: user from who sends the notifications (None iff sent by site)
message_properties : message properties
subject : subject of notification email
template : template used for generating notification
"""
from soc.logic.models.notification import logic as notification_logic
from soc.logic.models.site import logic as site_logic
if from_user:
sender_name = from_user.name
else:
site_entity = site_logic.getSingleton()
sender_name = 'The %s Team' % (site_entity.site_name)
new_message_properties = {
'sender_name': sender_name,
'to_name': to_user.name,
}
message_properties = dicts.merge(message_properties, new_message_properties)
message = loader.render_to_string(template, dictionary=message_properties)
fields = {
'from_user': from_user,
'subject': subject,
'message': message,
'scope': to_user,
'link_id': 't%i' % (int(time.time()*100)),
'scope_path': to_user.link_id
}
key_name = notification_logic.getKeyNameFromFields(fields)
# create and put a new notification in the datastore
notification_logic.updateOrCreateFromKeyName(fields, key_name)
def sendNewNotificationMessage(notification_entity):
"""Sends an email to a user about a new notification.
Args:
notification_entity: Notification about which the message should be sent
"""
from soc.logic.models.site import logic as site_logic
from soc.views.models.notification import view as notification_view
# create the url to show this notification
notification_url = 'http://%(host)s%(index)s' % {
'host' : system.getHostname(),
'index': redirects.getPublicRedirect(notification_entity,
notification_view.getParams())}
sender = mail_dispatcher.getDefaultMailSender()
site_entity = site_logic.getSingleton()
site_name = site_entity.site_name
# get the default mail sender
default_sender = mail_dispatcher.getDefaultMailSender()
if not default_sender:
# no valid sender found, abort
logging.error('No default sender')
return
else:
(sender_name, sender) = default_sender
to = accounts.denormalizeAccount(notification_entity.scope.account).email()
subject = DEF_NEW_NOTIFICATION_MSG_SUBJECT_FMT % notification_entity.subject
# create the message contents
messageProperties = {
'to_name': notification_entity.scope.name,
'sender_name': sender_name,
'to': to,
'sender': sender,
'site_name': site_name,
'subject': force_unicode(subject),
'notification' : notification_entity,
'notification_url' : notification_url
}
# send out the message using the default new notification template
mail_dispatcher.sendMailFromTemplate('soc/mail/new_notification.html',
messageProperties)
def sendWelcomeMessage(user_entity):
"""Sends out a welcome message to a user.
Args:
user_entity: User entity which the message should be send to
"""
from soc.logic.models.site import logic as site_logic
# get site name
site_entity = site_logic.getSingleton()
site_name = site_entity.site_name
# get the default mail sender
default_sender = mail_dispatcher.getDefaultMailSender()
if not default_sender:
# no valid sender found, should not happen but abort anyway
logging.error('No default sender')
return
else:
sender_name, sender = default_sender
to = accounts.denormalizeAccount(user_entity.account).email()
# create the message contents
messageProperties = {
'to_name': user_entity.name,
'sender_name': sender_name,
'to': to,
'sender': sender,
'subject': DEF_WELCOME_MSG_FMT % {
'site_name': site_name,
'name': user_entity.name
},
'site_name': site_name,
'site_location': 'http://%s' % system.getHostname(),
}
# send out the message using the default welcome template
mail_dispatcher.sendMailFromTemplate('soc/mail/welcome.html',
messageProperties)
| apache-2.0 |
sudheeshap8/LetsTango-Newsletter-Generator | lib/werkzeug/posixemulation.py | 319 | 3543 | # -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import errno
import time
import random
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
if not isinstance(src, unicode):
src = unicode(src, sys.getfilesystemencoding())
if not isinstance(dst, unicode):
dst = unicode(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
| apache-2.0 |
Catherine-Chu/DeepQA | chatbot/corpus/scotusdata.py | 10 | 1602 | # Copyright 2015 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
"""
Load transcripts from the Supreme Court of the USA.
Available from here:
https://github.com/pender/chatbot-rnn
"""
class ScotusData:
"""
"""
def __init__(self, dirName):
"""
Args:
dirName (string): directory where to load the corpus
"""
self.lines = self.loadLines(os.path.join(dirName, "scotus"))
self.conversations = [{"lines": self.lines}]
def loadLines(self, fileName):
"""
Args:
fileName (str): file to load
Return:
list<dict<str>>: the extracted fields for each line
"""
lines = []
with open(fileName, 'r') as f:
for line in f:
l = line[line.index(":")+1:].strip() # Strip name of speaker.
lines.append({"text": l})
return lines
def getConversations(self):
return self.conversations
| apache-2.0 |
spring-week-topos/horizon-week | openstack_dashboard/test/integration_tests/helpers.py | 7 | 1492 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import selenium
from selenium.webdriver.support import ui
import testtools
from openstack_dashboard.test.integration_tests import config
class BaseTestCase(testtools.TestCase):
def setUp(self):
if os.environ.get('INTEGRATION_TESTS', False):
self.driver = selenium.webdriver.Firefox()
self.conf = config.get_config()
else:
msg = "The INTEGRATION_TESTS env variable is not set."
raise self.skipException(msg)
super(BaseTestCase, self).setUp()
def tearDown(self):
if os.environ.get('INTEGRATION_TESTS', False):
self.driver.close()
super(BaseTestCase, self).tearDown()
def wait_for_title(self):
timeout = self.conf.dashboard.page_timeout
ui.WebDriverWait(self.driver, timeout).until(lambda d:
self.driver.title)
| apache-2.0 |
drawks/ansible | lib/ansible/modules/windows/win_wait_for_process.py | 17 | 4029 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub, actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_wait_for_process
version_added: '2.7'
short_description: Waits for a process to exist or not exist before continuing.
description:
- Waiting for a process to start or stop.
- This is useful when Windows services behave poorly and do not enumerate external dependencies in their manifest.
options:
process_name_exact:
description:
- The name of the process(es) for which to wait.
type: list
process_name_pattern:
description:
- RegEx pattern matching desired process(es).
type: str
sleep:
description:
- Number of seconds to sleep between checks.
- Only applies when waiting for a process to start. Waiting for a process to start
does not have a native non-polling mechanism. Waiting for a stop uses native PowerShell
and does not require polling.
type: int
default: 1
process_min_count:
description:
- Minimum number of process matching the supplied pattern to satisfy C(present) condition.
- Only applies to C(present).
type: int
default: 1
pid:
description:
- The PID of the process.
type: int
owner:
description:
- The owner of the process.
- Requires PowerShell version 4.0 or newer.
type: str
pre_wait_delay:
description:
- Seconds to wait before checking processes.
type: int
default: 0
post_wait_delay:
description:
- Seconds to wait after checking for processes.
type: int
default: 0
state:
description:
- When checking for a running process C(present) will block execution
until the process exists, or until the timeout has been reached.
C(absent) will block execution untile the processs no longer exists,
or until the timeout has been reached.
- When waiting for C(present), the module will return changed only if
the process was not present on the initial check but became present on
subsequent checks.
- If, while waiting for C(absent), new processes matching the supplied
pattern are started, these new processes will not be included in the
action.
type: str
default: present
choices: [ absent, present ]
timeout:
description:
- The maximum number of seconds to wait for a for a process to start or stop
before erroring out.
type: int
default: 300
seealso:
- module: wait_for
- module: win_wait_for
author:
- Charles Crossan (@crossan007)
'''
EXAMPLES = r'''
- name: Wait 300 seconds for all Oracle VirtualBox processes to stop. (VBoxHeadless, VirtualBox, VBoxSVC)
win_wait_for_process:
process_name_pattern: 'v(irtual)?box(headless|svc)?'
state: absent
timeout: 500
- name: Wait 300 seconds for 3 instances of cmd to start, waiting 5 seconds between each check
win_wait_for_process:
process_name_exact: cmd
state: present
timeout: 500
sleep: 5
process_min_count: 3
'''
RETURN = r'''
elapsed:
description: The elapsed seconds between the start of poll and the end of the module.
returned: always
type: float
sample: 3.14159265
matched_processes:
description: List of matched processes (either stopped or started).
returned: always
type: complex
contains:
name:
description: The name of the matched process.
returned: always
type: str
sample: svchost
owner:
description: The owner of the matched process.
returned: when supported by PowerShell
type: str
sample: NT AUTHORITY\SYSTEM
pid:
description: The PID of the matched process.
returned: always
type: int
sample: 7908
'''
| gpl-3.0 |
pkats15/hdt_analyzer | django_test/django_venv/Lib/site-packages/pip/_vendor/distlib/index.py | 571 | 20976 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
self.rpc_proxy = None
with open(os.devnull, 'w') as sink:
for s in ('gpg2', 'gpg'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the acutal work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protcol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
if self.rpc_proxy is None:
self.rpc_proxy = ServerProxy(self.url, timeout=3.0)
return self.rpc_proxy.search(terms, operator or 'and')
| mit |
louyihua/edx-platform | lms/djangoapps/courseware/field_overrides.py | 31 | 11383 | """
This module provides a :class:`~xblock.field_data.FieldData` implementation
which wraps an other `FieldData` object and provides overrides based on the
user. The use of providers allows for overrides that are arbitrarily
extensible. One provider is found in `courseware.student_field_overrides`
which allows for fields to be overridden for individual students. One can
envision other providers being written that allow for fields to be overridden
base on membership of a student in a cohort, or similar. The use of an
extensible, modular architecture allows for overrides being done in ways not
envisioned by the authors.
Currently, this module is used in the `module_render` module in this same
package and is used to wrap the `authored_data` when constructing an
`LmsFieldData`. This means overrides will be in effect for all scopes covered
by `authored_data`, e.g. course content and settings stored in Mongo.
"""
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import threading
from django.conf import settings
from xblock.field_data import FieldData
from request_cache.middleware import RequestCache
from xmodule.modulestore.inheritance import InheritanceMixin
NOTSET = object()
ENABLED_OVERRIDE_PROVIDERS_KEY = u'courseware.field_overrides.enabled_providers.{course_id}'
ENABLED_MODULESTORE_OVERRIDE_PROVIDERS_KEY = u'courseware.modulestore_field_overrides.enabled_providers.{course_id}'
def resolve_dotted(name):
"""
Given the dotted name for a Python object, performs any necessary imports
and returns the object.
"""
names = name.split('.')
path = names.pop(0)
target = __import__(path)
while names:
segment = names.pop(0)
path += '.' + segment
try:
target = getattr(target, segment)
except AttributeError:
__import__(path)
target = getattr(target, segment)
return target
def _lineage(block):
"""
Returns an iterator over all ancestors of the given block, starting with
its immediate parent and ending at the root of the block tree.
"""
parent = block.get_parent()
while parent:
yield parent
parent = parent.get_parent()
class _OverridesDisabled(threading.local):
"""
A thread local used to manage state of overrides being disabled or not.
"""
disabled = ()
_OVERRIDES_DISABLED = _OverridesDisabled()
@contextmanager
def disable_overrides():
"""
A context manager which disables field overrides inside the context of a
`with` statement, allowing code to get at the `original` value of a field.
"""
prev = _OVERRIDES_DISABLED.disabled
_OVERRIDES_DISABLED.disabled += (True,)
yield
_OVERRIDES_DISABLED.disabled = prev
def overrides_disabled():
"""
Checks to see whether overrides are disabled in the current context.
Returns a boolean value. See `disable_overrides`.
"""
return bool(_OVERRIDES_DISABLED.disabled)
class FieldOverrideProvider(object):
"""
Abstract class which defines the interface that a `FieldOverrideProvider`
must provide. In general, providers should derive from this class, but
it's not strictly necessary as long as they correctly implement this
interface.
A `FieldOverrideProvider` implementation is only responsible for looking up
field overrides. To set overrides, there will be a domain specific API for
the concrete override implementation being used.
"""
__metaclass__ = ABCMeta
def __init__(self, user):
self.user = user
@abstractmethod
def get(self, block, name, default): # pragma no cover
"""
Look for an override value for the field named `name` in `block`.
Returns the overridden value or `default` if no override is found.
"""
raise NotImplementedError
@abstractmethod
def enabled_for(self, course): # pragma no cover
"""
Return True if this provider should be enabled for a given course,
and False otherwise.
Concrete implementations are responsible for implementing this method.
Arguments:
course (CourseModule or None)
Returns:
bool
"""
return False
class OverrideFieldData(FieldData):
"""
A :class:`~xblock.field_data.FieldData` which wraps another `FieldData`
object and allows for fields handled by the wrapped `FieldData` to be
overriden by arbitrary providers.
Providers are configured by use of the Django setting,
`FIELD_OVERRIDE_PROVIDERS` which should be a tuple of dotted names of
:class:`FieldOverrideProvider` concrete implementations. Note that order
is important for this setting. Override providers will tried in the order
configured in the setting. The first provider to find an override 'wins'
for a particular field lookup.
"""
provider_classes = None
@classmethod
def wrap(cls, user, course, wrapped):
"""
Will return a :class:`OverrideFieldData` which wraps the field data
given in `wrapped` for the given `user`, if override providers are
configred. If no override providers are configured, using the Django
setting, `FIELD_OVERRIDE_PROVIDERS`, returns `wrapped`, eliminating
any performance impact of this feature if no override providers are
configured.
"""
if cls.provider_classes is None:
cls.provider_classes = tuple(
(resolve_dotted(name) for name in
settings.FIELD_OVERRIDE_PROVIDERS))
enabled_providers = cls._providers_for_course(course)
if enabled_providers:
# TODO: we might not actually want to return here. Might be better
# to check for instance.providers after the instance is built. This
# would allow for the case where we have registered providers but
# none are enabled for the provided course
return cls(user, wrapped, enabled_providers)
return wrapped
@classmethod
def _providers_for_course(cls, course):
"""
Return a filtered list of enabled providers based
on the course passed in. Cache this result per request to avoid
needing to call the provider filter api hundreds of times.
Arguments:
course: The course XBlock
"""
request_cache = RequestCache.get_request_cache()
if course is None:
cache_key = ENABLED_OVERRIDE_PROVIDERS_KEY.format(course_id='None')
else:
cache_key = ENABLED_OVERRIDE_PROVIDERS_KEY.format(course_id=unicode(course.id))
enabled_providers = request_cache.data.get(cache_key, NOTSET)
if enabled_providers == NOTSET:
enabled_providers = tuple(
(provider_class for provider_class in cls.provider_classes if provider_class.enabled_for(course))
)
request_cache.data[cache_key] = enabled_providers
return enabled_providers
def __init__(self, user, fallback, providers):
self.fallback = fallback
self.providers = tuple(provider(user) for provider in providers)
def get_override(self, block, name):
"""
Checks for an override for the field identified by `name` in `block`.
Returns the overridden value or `NOTSET` if no override is found.
"""
if not overrides_disabled():
for provider in self.providers:
value = provider.get(block, name, NOTSET)
if value is not NOTSET:
return value
return NOTSET
def get(self, block, name):
value = self.get_override(block, name)
if value is not NOTSET:
return value
return self.fallback.get(block, name)
def set(self, block, name, value):
self.fallback.set(block, name, value)
def delete(self, block, name):
self.fallback.delete(block, name)
def has(self, block, name):
if not self.providers:
return self.fallback.has(block, name)
has = self.get_override(block, name)
if has is NOTSET:
# If this is an inheritable field and an override is set above,
# then we want to return False here, so the field_data uses the
# override and not the original value for this block.
inheritable = InheritanceMixin.fields.keys()
if name in inheritable:
for ancestor in _lineage(block):
if self.get_override(ancestor, name) is not NOTSET:
return False
return has is not NOTSET or self.fallback.has(block, name)
def set_many(self, block, update_dict):
return self.fallback.set_many(block, update_dict)
def default(self, block, name):
# The `default` method is overloaded by the field storage system to
# also handle inheritance.
if self.providers and not overrides_disabled():
inheritable = InheritanceMixin.fields.keys()
if name in inheritable:
for ancestor in _lineage(block):
value = self.get_override(ancestor, name)
if value is not NOTSET:
return value
return self.fallback.default(block, name)
class OverrideModulestoreFieldData(OverrideFieldData):
"""Apply field data overrides at the modulestore level. No student context required."""
@classmethod
def wrap(cls, block, field_data): # pylint: disable=arguments-differ
"""
Returns an instance of FieldData wrapped by FieldOverrideProviders which
extend read-only functionality. If no MODULESTORE_FIELD_OVERRIDE_PROVIDERS
are configured, an unwrapped FieldData instance is returned.
Arguments:
block: An XBlock
field_data: An instance of FieldData to be wrapped
"""
if cls.provider_classes is None:
cls.provider_classes = [
resolve_dotted(name) for name in settings.MODULESTORE_FIELD_OVERRIDE_PROVIDERS
]
enabled_providers = cls._providers_for_block(block)
if enabled_providers:
return cls(field_data, enabled_providers)
return field_data
@classmethod
def _providers_for_block(cls, block):
"""
Computes a list of enabled providers based on the given XBlock.
The result is cached per request to avoid the overhead incurred
by filtering override providers hundreds of times.
Arguments:
block: An XBlock
"""
course_id = unicode(block.location.course_key)
cache_key = ENABLED_MODULESTORE_OVERRIDE_PROVIDERS_KEY.format(course_id=course_id)
request_cache = RequestCache.get_request_cache()
enabled_providers = request_cache.data.get(cache_key)
if enabled_providers is None:
enabled_providers = [
provider_class for provider_class in cls.provider_classes if provider_class.enabled_for(block)
]
request_cache.data[cache_key] = enabled_providers
return enabled_providers
def __init__(self, fallback, providers):
super(OverrideModulestoreFieldData, self).__init__(None, fallback, providers)
| agpl-3.0 |
openaire/iis | iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/lib/chardet/sjisprober.py | 190 | 3549 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import SJISDistributionAnalysis
from jpcntx import SJISContextAnalysis
from mbcssm import SJISSMModel
import constants, sys
from constants import eStart, eError, eItsMe
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen :], charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen : i + 3 - charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1 : i + 1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mContextAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| apache-2.0 |
powerlim2/project_free_insight | data_api/venv/lib/python2.7/site-packages/requests/packages/urllib3/contrib/socks.py | 360 | 5668 | # -*- coding: utf-8 -*-
"""
SOCKS support for urllib3
~~~~~~~~~~~~~~~~~~~~~~~~~
This contrib module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
Known Limitations:
- Currently PySocks does not support contacting remote websites via literal
IPv6 addresses. Any such connection attempt will fail.
- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
such connection attempt will fail.
"""
from __future__ import absolute_import
try:
import socks
except ImportError:
import warnings
from ..exceptions import DependencyWarning
warnings.warn((
'SOCKS support in urllib3 requires the installation of optional '
'dependencies: specifically, PySocks. For more information, see '
'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
),
DependencyWarning
)
raise
from socket import error as SocketError, timeout as SocketTimeout
from ..connection import (
HTTPConnection, HTTPSConnection
)
from ..connectionpool import (
HTTPConnectionPool, HTTPSConnectionPool
)
from ..exceptions import ConnectTimeoutError, NewConnectionError
from ..poolmanager import PoolManager
from ..util.url import parse_url
try:
import ssl
except ImportError:
ssl = None
class SOCKSConnection(HTTPConnection):
"""
A plain-text HTTP connection that connects via a SOCKS proxy.
"""
def __init__(self, *args, **kwargs):
self._socks_options = kwargs.pop('_socks_options')
super(SOCKSConnection, self).__init__(*args, **kwargs)
def _new_conn(self):
"""
Establish a new connection via the SOCKS proxy.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = socks.create_connection(
(self.host, self.port),
proxy_type=self._socks_options['socks_version'],
proxy_addr=self._socks_options['proxy_host'],
proxy_port=self._socks_options['proxy_port'],
proxy_username=self._socks_options['username'],
proxy_password=self._socks_options['password'],
timeout=self.timeout,
**extra_kw
)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except socks.ProxyError as e:
# This is fragile as hell, but it seems to be the only way to raise
# useful errors here.
if e.socket_err:
error = e.socket_err
if isinstance(error, SocketTimeout):
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout)
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % error
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % e
)
except SocketError as e: # Defensive: PySocks should catch all these.
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
# We don't need to duplicate the Verified/Unverified distinction from
# urllib3/connection.py here because the HTTPSConnection will already have been
# correctly set to either the Verified or Unverified form by that module. This
# means the SOCKSHTTPSConnection will automatically be the correct type.
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
pass
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = SOCKSConnection
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
"""
A version of the urllib3 ProxyManager that routes connections via the
defined SOCKS proxy.
"""
pool_classes_by_scheme = {
'http': SOCKSHTTPConnectionPool,
'https': SOCKSHTTPSConnectionPool,
}
def __init__(self, proxy_url, username=None, password=None,
num_pools=10, headers=None, **connection_pool_kw):
parsed = parse_url(proxy_url)
if parsed.scheme == 'socks5':
socks_version = socks.PROXY_TYPE_SOCKS5
elif parsed.scheme == 'socks4':
socks_version = socks.PROXY_TYPE_SOCKS4
else:
raise ValueError(
"Unable to determine SOCKS version from %s" % proxy_url
)
self.proxy_url = proxy_url
socks_options = {
'socks_version': socks_version,
'proxy_host': parsed.host,
'proxy_port': parsed.port,
'username': username,
'password': password,
}
connection_pool_kw['_socks_options'] = socks_options
super(SOCKSProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
| bsd-3-clause |
daspots/dasapp | lib/requests_oauthlib/oauth2_session.py | 21 | 17215 | from __future__ import unicode_literals
import logging
from oauthlib.common import generate_token, urldecode
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import TokenExpiredError, is_secure_transport
import requests
log = logging.getLogger(__name__)
class TokenUpdated(Warning):
def __init__(self, token):
super(TokenUpdated, self).__init__()
self.token = token
class OAuth2Session(requests.Session):
"""Versatile OAuth 2 extension to :class:`requests.Session`.
Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec
including the four core OAuth 2 grants.
Can be used to create authorization urls, fetch tokens and access protected
resources using the :class:`requests.Session` interface you are used to.
- :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant
- :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant
- :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant
- :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant
Note that the only time you will be using Implicit Grant from python is if
you are driving a user agent able to obtain URL fragments.
"""
def __init__(self, client_id=None, client=None, auto_refresh_url=None,
auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,
state=None, token_updater=None, **kwargs):
"""Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token databse on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor.
"""
super(OAuth2Session, self).__init__(**kwargs)
self._client = client or WebApplicationClient(client_id, token=token)
self.token = token or {}
self.scope = scope
self.redirect_uri = redirect_uri
self.state = state or generate_token
self._state = state
self.auto_refresh_url = auto_refresh_url
self.auto_refresh_kwargs = auto_refresh_kwargs or {}
self.token_updater = token_updater
# Allow customizations for non compliant providers through various
# hooks to adjust requests and responses.
self.compliance_hook = {
'access_token_response': set([]),
'refresh_token_response': set([]),
'protected_request': set([]),
}
def new_state(self):
"""Generates a state string to be used in authorizations."""
try:
self._state = self.state()
log.debug('Generated new state %s.', self._state)
except TypeError:
self._state = self.state
log.debug('Re-using previously supplied state %s.', self._state)
return self._state
@property
def client_id(self):
return getattr(self._client, "client_id", None)
@client_id.setter
def client_id(self, value):
self._client.client_id = value
@client_id.deleter
def client_id(self):
del self._client.client_id
@property
def token(self):
return getattr(self._client, "token", None)
@token.setter
def token(self, value):
self._client.token = value
self._client._populate_attributes(value)
@property
def access_token(self):
return getattr(self._client, "access_token", None)
@access_token.setter
def access_token(self, value):
self._client.access_token = value
@access_token.deleter
def access_token(self):
del self._client.access_token
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
return bool(self.access_token)
def authorization_url(self, url, state=None, **kwargs):
"""Form an authorization URL.
:param url: Authorization endpoint url, must be HTTPS.
:param state: An optional state string for CSRF protection. If not
given it will be generated for you.
:param kwargs: Extra parameters to include.
:return: authorization_url, state
"""
state = state or self.new_state()
return self._client.prepare_request_uri(url,
redirect_uri=self.redirect_uri,
scope=self.scope,
state=state,
**kwargs), state
def fetch_token(self, token_url, code=None, authorization_response=None,
body='', auth=None, username=None, password=None, method='POST',
timeout=None, headers=None, verify=True, proxies=None, **kwargs):
"""Generic method for fetching an access token from the token endpoint.
If you are using the MobileApplicationClient you will want to use
token_from_fragment instead of fetch_token.
:param token_url: Token endpoint URL, must use HTTPS.
:param code: Authorization code (used by WebApplicationClients).
:param authorization_response: Authorization response URL, the callback
URL of the request back to you. Used by
WebApplicationClients instead of code.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param username: Username used by LegacyApplicationClients.
:param password: Password used by LegacyApplicationClients.
:param method: The HTTP method used to make the request. Defaults
to POST, but may also be GET. Other methods should
be added as needed.
:param headers: Dict to default request headers with.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
if not code and authorization_response:
self._client.parse_request_uri_response(authorization_response,
state=self._state)
code = self._client.code
elif not code and isinstance(self._client, WebApplicationClient):
code = self._client.code
if not code:
raise ValueError('Please supply either code or '
'authorization_response parameters.')
body = self._client.prepare_request_body(code=code, body=body,
redirect_uri=self.redirect_uri, username=username,
password=password, **kwargs)
client_id = kwargs.get('client_id', '')
if auth is None:
if client_id:
log.debug('Encoding client_id "%s" with client_secret as Basic auth credentials.', client_id)
client_secret = kwargs.get('client_secret', '')
client_secret = client_secret if client_secret is not None else ''
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
elif username:
if password is None:
raise ValueError('Username was supplied, but not password.')
log.debug('Encoding username, password as Basic auth credentials.')
auth = requests.auth.HTTPBasicAuth(username, password)
headers = headers or {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
self.token = {}
if method.upper() == 'POST':
r = self.post(token_url, data=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify, proxies=proxies)
log.debug('Prepared fetch token request body %s', body)
elif method.upper() == 'GET':
# if method is not 'POST', switch body to querystring and GET
r = self.get(token_url, params=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify, proxies=proxies)
log.debug('Prepared fetch token request querystring %s', body)
else:
raise ValueError('The method kwarg must be POST or GET.')
log.debug('Request to fetch token completed with status %s.',
r.status_code)
log.debug('Request headers were %s', r.request.headers)
log.debug('Request body was %s', r.request.body)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['access_token_response']))
for hook in self.compliance_hook['access_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self._client.parse_request_body_response(r.text, scope=self.scope)
self.token = self._client.token
log.debug('Obtained token %s.', self.token)
return self.token
def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(authorization_response,
state=self._state)
self.token = self._client.token
return self.token
def refresh_token(self, token_url, refresh_token=None, body='', auth=None,
timeout=None, headers=None, verify=True, proxies=None, **kwargs):
"""Fetch a new access token using a refresh token.
:param token_url: The token endpoint, must be HTTPS.
:param refresh_token: The refresh_token to use.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not token_url:
raise ValueError('No token endpoint set for auto_refresh.')
if not is_secure_transport(token_url):
raise InsecureTransportError()
refresh_token = refresh_token or self.token.get('refresh_token')
log.debug('Adding auto refresh key word arguments %s.',
self.auto_refresh_kwargs)
kwargs.update(self.auto_refresh_kwargs)
body = self._client.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
log.debug('Prepared refresh token request body %s', body)
if headers is None:
headers = {
'Accept': 'application/json',
'Content-Type': (
'application/x-www-form-urlencoded;charset=UTF-8'
),
}
r = self.post(token_url, data=dict(urldecode(body)), auth=auth,
timeout=timeout, headers=headers, verify=verify, withhold_token=True, proxies=proxies)
log.debug('Request to refresh token completed with status %s.',
r.status_code)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['refresh_token_response']))
for hook in self.compliance_hook['refresh_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self.token = self._client.parse_request_body_response(r.text, scope=self.scope)
if not 'refresh_token' in self.token:
log.debug('No new refresh token given. Re-using old.')
self.token['refresh_token'] = refresh_token
return self.token
def request(self, method, url, data=None, headers=None, withhold_token=False,
client_id=None, client_secret=None, **kwargs):
"""Intercept all requests and add the OAuth 2 token if present."""
if not is_secure_transport(url):
raise InsecureTransportError()
if self.token and not withhold_token:
log.debug('Invoking %d protected resource request hooks.',
len(self.compliance_hook['protected_request']))
for hook in self.compliance_hook['protected_request']:
log.debug('Invoking hook %s.', hook)
url, headers, data = hook(url, headers, data)
log.debug('Adding token %s to request.', self.token)
try:
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
# Attempt to retrieve and save new access token if expired
except TokenExpiredError:
if self.auto_refresh_url:
log.debug('Auto refresh is set, attempting to refresh at %s.',
self.auto_refresh_url)
# We mustn't pass auth twice.
auth = kwargs.pop('auth', None)
if client_id and client_secret and (auth is None):
log.debug('Encoding client_id "%s" with client_secret as Basic auth credentials.', client_id)
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
token = self.refresh_token(
self.auto_refresh_url, auth=auth, **kwargs
)
if self.token_updater:
log.debug('Updating token to %s using %s.',
token, self.token_updater)
self.token_updater(token)
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
else:
raise TokenUpdated(token)
else:
raise
log.debug('Requesting url %s using method %s.', url, method)
log.debug('Supplying headers %s and data %s', headers, data)
log.debug('Passing through key word arguments %s.', kwargs)
return super(OAuth2Session, self).request(method, url,
headers=headers, data=data, **kwargs)
def register_compliance_hook(self, hook_type, hook):
"""Register a hook for request/response tweaking.
Available hooks are:
access_token_response invoked before token parsing.
refresh_token_response invoked before refresh token parsing.
protected_request invoked before making a request.
If you find a new hook is needed please send a GitHub PR request
or open an issue.
"""
if hook_type not in self.compliance_hook:
raise ValueError('Hook type %s is not in %s.',
hook_type, self.compliance_hook)
self.compliance_hook[hook_type].add(hook)
| mit |
DHI-GRAS/processing_gpf | SNAPAlgorithm.py | 2 | 4198 | """
***************************************************************************
S1TbxAlgorithm.py
-------------------------------------
Copyright (C) 2014 TIGER-NET (www.tiger-net.org)
***************************************************************************
* This plugin is part of the Water Observation Information System (WOIS) *
* developed under the TIGER-NET project funded by the European Space *
* Agency as part of the long-term TIGER initiative aiming at promoting *
* the use of Earth Observation (EO) for improved Integrated Water *
* Resources Management (IWRM) in Africa. *
* *
* WOIS is a free software i.e. you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* WOIS is distributed in the hope that it will be useful, but WITHOUT ANY *
* WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
* for more details. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program. If not, see <http://www.gnu.org/licenses/>. *
***************************************************************************
"""
import os
from PyQt4.QtGui import *
from qgis.core import *
from xml.etree.ElementTree import Element, SubElement
from processing_gpf.GPFUtils import GPFUtils
from processing_gpf.GPFAlgorithm import GPFAlgorithm
try:
from processing.parameters.ParameterRaster import ParameterRaster
except:
from processing.core.parameters import ParameterRaster
# General SNAP algorithms (e.g. from Raster or Input-Output menus)
class SNAPAlgorithm(GPFAlgorithm):
def __init__(self, descriptionfile):
GPFAlgorithm.__init__(self, descriptionfile)
self.programKey = GPFUtils.snapKey()
def processAlgorithm(self, progress):
GPFAlgorithm.processAlgorithm(self, GPFUtils.snapKey(), progress)
def addGPFNode(self, graph):
graph = GPFAlgorithm.addGPFNode(self, graph)
# split band element with multiple bands into multiple elements
for parent in graph.findall(".//band/.."):
for element in parent.findall("band"):
bands = element.text.split(',')
parent.remove(element)
for band in bands:
if len(band) > 0:
newElement = SubElement(parent, "band")
newElement.text = band
for parent in graph.findall(".//mapProjection/.."):
for element in parent.findall("mapProjection"):
crs = element.text
try:
projection = QgsCoordinateReferenceSystem(int(crs), 2)
wkt = projection.toWkt()
element.text = str(wkt)
except:
parent.remove(element)
return graph
def defineCharacteristicsFromFile(self):
GPFAlgorithm.defineCharacteristicsFromFile(self)
# check if there are multiple raster inputs
inputsCount = 0
for param in self.parameters:
if isinstance(param, ParameterRaster):
inputsCount+=1
if inputsCount > 1:
self.multipleRasterInput = True
def helpFile(self):
GPFAlgorithm.helpFile(self, GPFUtils.snapKey())
def getIcon(self):
return QIcon(os.path.dirname(__file__) + "/images/snap.png")
def getCopy(self):
newone = SNAPAlgorithm(self.descriptionFile)
newone.provider = self.provider
return newone
| gpl-3.0 |
Bismarrck/pymatgen | pymatgen/command_line/tests/test_critic2_caller.py | 6 | 3331 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
from pymatgen import Structure
from pymatgen.command_line.critic2_caller import *
from monty.os.path import which
__author__ = "Matthew Horton"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Production"
__date__ = "July 2017"
@unittest.skipIf(not which('critic2'), "critic2 executable not present")
class Critic2CallerTest(unittest.TestCase):
def test_from_path(self):
# uses chgcars
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files/bader')
c2c = Critic2Caller.from_path(test_dir)
# check we have some results!
self.assertGreaterEqual(len(c2c._stdout), 500)
def test_from_structure(self):
# uses promolecular density
structure = Structure.from_file(os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files/critic2/MoS2.cif'))
c2c = Critic2Caller(structure)
# check we have some results!
self.assertGreaterEqual(len(c2c._stdout), 500)
class Critic2OutputTest(unittest.TestCase):
def setUp(self):
stdout_file = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files/critic2/MoS2_critic2_stdout.txt')
with open(stdout_file, 'r') as f:
reference_stdout = f.read()
structure = Structure.from_file(os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files/critic2/MoS2.cif'))
self.c2o = Critic2Output(structure, reference_stdout)
def test_properties_to_from_dict(self):
self.assertEqual(len(self.c2o.critical_points), 6)
self.assertEqual(len(self.c2o.nodes), 14)
self.assertEqual(len(self.c2o.edges), 10)
# reference dictionary for c2o.critical_points[0].as_dict()
# {'@class': 'CriticalPoint',
# '@module': 'pymatgen.command_line.critic2_caller',
# 'coords': None,
# 'field': 93848.0413,
# 'field_gradient': 0.0,
# 'field_hessian': [[-2593274446000.0, -3.873587547e-19, -1.704530713e-08],
# [-3.873587547e-19, -2593274446000.0, 1.386877485e-18],
# [-1.704530713e-08, 1.386877485e-18, -2593274446000.0]],
# 'frac_coords': [0.333333, 0.666667, 0.213295],
# 'index': 0,
# 'multiplicity': 1.0,
# 'point_group': 'D3h',
# 'type': < CriticalPointType.nucleus: 'nucleus' >}
self.assertEqual(str(self.c2o.critical_points[0].type), "CriticalPointType.nucleus")
# test connectivity
self.assertDictEqual(self.c2o.edges[3], {'from_idx': 1, 'from_lvec': (0, 0, 0),
'to_idx': 0, 'to_lvec': (1, 0, 0)})
# test as/from dict
d = self.c2o.as_dict()
self.assertEqual(set(d.keys()), {'@module', '@class',
'structure', 'critic2_stdout'})
self.c2o.from_dict(d)
if __name__ == '__main__':
unittest.main()
| mit |
rjeschmi/easybuild-easyblocks | easybuild/easyblocks/a/aladin.py | 3 | 14593 | ##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ALADIN, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.filetools import run_cmd, run_cmd_qa
from easybuild.tools.modules import get_software_root
from easybuild.tools.ordereddict import OrderedDict
class EB_ALADIN(EasyBlock):
"""Support for building/installing ALADIN."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for ALADIN."""
super(EB_ALADIN, self).__init__(*args, **kwargs)
self.conf_file = None
self.conf_filepath = None
self.rootpack_dir = None
self.orig_library_path = None
@staticmethod
def extra_options():
"""Custom easyconfig parameters for ALADIN."""
extra_vars = {
'optional_extra_param': ['default value', "short description", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Custom configuration procedure for ALADIN."""
# unset $LIBRARY_PATH set by modules of dependencies, because it may screw up linking
if 'LIBRARY_PATH' in os.environ:
self.log.debug("Unsetting $LIBRARY_PATH (was: %s)" % os.environ['LIBRARY_PATH'])
self.orig_library_path = os.environ.pop('LIBRARY_PATH')
# build auxiliary libraries
auxlibs_dir = None
my_gnu = None
if self.toolchain.comp_family() == toolchain.GCC:
my_gnu = 'y' # gfortran
for var in ['CFLAGS', 'CXXFLAGS', 'F90FLAGS', 'FFLAGS']:
flags = os.getenv(var)
env.setvar(var, "%s -fdefault-real-8 -fdefault-double-8" % flags)
self.log.info("Updated %s to '%s'" % (var, os.getenv(var)))
elif self.toolchain.comp_family() == toolchain.INTELCOMP:
my_gnu = 'i' # icc/ifort
else:
self.log.error("Don't know how to set 'my_gnu' variable in auxlibs build script.")
self.log.info("my_gnu set to '%s'" % my_gnu)
tmp_installroot = tempfile.mkdtemp(prefix='aladin_auxlibs_')
try:
cwd = os.getcwd()
os.chdir(self.builddir)
builddirs = os.listdir(self.builddir)
auxlibs_dir = [x for x in builddirs if x.startswith('auxlibs_installer')][0]
os.chdir(auxlibs_dir)
auto_driver = 'driver_automatic'
for line in fileinput.input(auto_driver, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(my_gnu\s*=\s*).*$", r"\1%s" % my_gnu, line)
line = re.sub(r"^(my_r32\s*=\s*).*$", r"\1n", line) # always 64-bit real precision
line = re.sub(r"^(my_readonly\s*=\s*).*$", r"\1y", line) # make libs read-only after build
line = re.sub(r"^(my_installroot\s*=\s*).*$", r"\1%s" % tmp_installroot, line)
sys.stdout.write(line)
run_cmd("./%s" % auto_driver)
os.chdir(cwd)
except OSError, err:
self.log.error("Failed to build ALADIN: %s" % err)
# build gmkpack, update PATH and set GMKROOT
# we build gmkpack here because a config file is generated in the gmkpack isntall path
try:
gmkpack_dir = [x for x in builddirs if x.startswith('gmkpack')][0]
os.chdir(os.path.join(self.builddir, gmkpack_dir))
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'n',
}
run_cmd_qa("./build_gmkpack", qa)
os.chdir(cwd)
paths = os.getenv('PATH').split(':')
paths.append(os.path.join(self.builddir, gmkpack_dir, 'util'))
env.setvar('PATH', ':'.join(paths))
env.setvar('GMKROOT', os.path.join(self.builddir, gmkpack_dir))
except OSError, err:
self.log.error("Failed to build gmkpack: %s" % err)
# generate gmkpack configuration file
self.conf_file = 'ALADIN_%s' % self.version
self.conf_filepath = os.path.join(self.builddir, 'arch', '%s.x' % self.conf_file)
try:
if os.path.exists(self.conf_filepath):
os.remove(self.conf_filepath)
self.log.info("Removed existing gmpack config file %s" % self.conf_filepath)
archdir = os.path.join(self.builddir, 'arch')
if not os.path.exists(archdir):
os.makedirs(archdir)
except OSError, err:
self.log.error("Failed to remove existing file %s: %s" % (self.conf_filepath, err))
mpich = 'n'
known_mpi_libs = [toolchain.MPICH2, toolchain.INTELMPI]
if self.toolchain.options.get('usempi', None) and self.toolchain.mpi_family() in known_mpi_libs:
mpich = 'y'
qpref = 'Please type the ABSOLUTE name of '
qsuff = ', or ignore (environment variables allowed) :'
qsuff2 = ', or ignore : (environment variables allowed) :'
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC:
gribdir = 'GNU'
elif comp_fam == toolchain.INTELCOMP:
gribdir = 'INTEL'
else:
self.log.error("Don't know which grib lib dir to use for compiler %s" % comp_fam)
aux_lib_gribex = os.path.join(tmp_installroot, gribdir, 'lib', 'libgribex.a')
aux_lib_ibm = os.path.join(tmp_installroot, gribdir, 'lib', 'libibmdummy.a')
grib_api_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api.a')
grib_api_f90_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api_f90.a')
grib_api_inc = os.path.join(get_software_root('grib_api'), 'include')
jasperlib = os.path.join(get_software_root('JasPer'), 'lib', 'libjasper.a')
netcdflib = os.path.join(get_software_root('netCDF'), 'lib', 'libnetcdff.a')
netcdfinc = os.path.join(get_software_root('netCDF'), 'include')
mpilib = os.path.join(os.getenv('MPI_LIB_DIR'), os.getenv('MPI_LIB_SHARED'))
ldpaths = [ldflag[2:] for ldflag in os.getenv('LDFLAGS').split(' ')] # LDFLAGS have form '-L/path/to'
lapacklibs = []
for lib in os.getenv('LAPACK_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
lapacklibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
lapacklib = ' '.join(lapacklibs)
blaslibs = []
for lib in os.getenv('BLAS_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
blaslibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
blaslib = ' '.join(blaslibs)
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'y',
'Do you want to setup your configuration file for MPICH (y/n) [n] ?': mpich,
'Please type the directory name where to find a dummy file mpif.h or ignore :': os.getenv('MPI_INC_DIR'),
'%sthe library gribex or emos%s' % (qpref, qsuff2): aux_lib_gribex,
'%sthe library ibm%s' % (qpref, qsuff): aux_lib_ibm,
'%sthe library grib_api%s' % (qpref, qsuff): grib_api_lib,
'%sthe library grib_api_f90%s' % (qpref, qsuff): grib_api_f90_lib,
'%sthe JPEG auxilary library if enabled by Grib_api%s' % (qpref, qsuff2): jasperlib,
'%sthe library netcdf%s' % (qpref, qsuff): netcdflib,
'%sthe library lapack%s' % (qpref, qsuff): lapacklib,
'%sthe library blas%s' % (qpref, qsuff): blaslib,
'%sthe library mpi%s' % (qpref, qsuff): mpilib,
'%sa MPI dummy library for serial executions, or ignore :' % qpref: '',
'Please type the directory name where to find grib_api headers, or ignore :': grib_api_inc,
'Please type the directory name where to find fortint.h or ignore :': '',
'Please type the directory name where to find netcdf headers, or ignore :': netcdfinc,
'Do you want to define CANARI (y/n) [y] ?': 'y',
'Please type the name of the script file used to generate a preprocessed blacklist file, or ignore :': '',
'Please type the name of the script file used to recover local libraries (gget), or ignore :': '',
'Please type the options to tune the gnu compilers, or ignore :': os.getenv('F90FLAGS'),
}
f90_seq = os.getenv('F90_SEQ')
if not f90_seq:
# F90_SEQ is only defined when usempi is enabled
f90_seq = os.getenv('F90')
stdqa = OrderedDict([
(r'Confirm library .* is .*', 'y'), # this one needs to be tried first!
(r'.*fortran 90 compiler name .*\s*:\n\(suggestions\s*: .*\)', os.getenv('F90')),
(r'.*fortran 90 compiler interfaced with .*\s*:\n\(suggestions\s*: .*\)', f90_seq),
(r'Please type the ABSOLUTE name of .*library.*, or ignore\s*[:]*\s*[\n]*.*', ''),
(r'Please .* to save this draft configuration file :\n.*', '%s.x' % self.conf_file),
])
env.setvar('GMKTMP', self.builddir)
env.setvar('GMKFILE', self.conf_file)
run_cmd_qa("gmkfilemaker", qa, std_qa=stdqa)
# set environment variables for installation dirs
env.setvar('ROOTPACK', os.path.join(self.installdir, 'rootpack'))
env.setvar('ROOTBIN', os.path.join(self.installdir, 'rootpack'))
env.setvar('HOMEPACK', os.path.join(self.installdir, 'pack'))
env.setvar('HOMEBIN', os.path.join(self.installdir, 'pack'))
def build_step(self):
"""No separate build procedure for ALADIN (see install_step)."""
pass
def test_step(self):
"""Custom built-in test procedure for ALADIN."""
if self.cfg['runtest']:
cmd = "test-command"
run_cmd(cmd, simple=True, log_all=True, log_output=True)
def install_step(self):
"""Custom install procedure for ALADIN."""
try:
os.mkdir(os.getenv('ROOTPACK'))
os.mkdir(os.getenv('HOMEPACK'))
except OSError, err:
self.log.error("Failed to create rootpack dir in %s: %s" % err)
# create rootpack
[v1, v2] = self.version.split('_')
(out, _) = run_cmd("source $GMKROOT/util/berootpack && gmkpack -p master -a -r %s -b %s" % (v1, v2), simple=False)
packdir_regexp = re.compile("Creating main pack (.*) \.\.\.")
res = packdir_regexp.search(out)
if res:
self.rootpack_dir = os.path.join('rootpack', res.group(1))
else:
self.log.error("Failed to determine rootpack dir.")
# copy ALADIN sources to right directory
try:
src_dirs = [d for d in os.listdir(self.builddir) if not (d.startswith('auxlib') or d.startswith('gmk'))]
target = os.path.join(self.installdir, self.rootpack_dir, 'src', 'local')
self.log.info("Copying sources from %s to %s" % (self.builddir, target))
for srcdir in src_dirs:
shutil.copytree(os.path.join(self.builddir, srcdir), os.path.join(target, srcdir))
self.log.info("Copied %s" % srcdir)
except OSError, err:
self.log.error("Failed to copy ALADIN sources: %s" % err)
if self.cfg['parallel']:
env.setvar('GMK_THREADS', str(self.cfg['parallel']))
# build rootpack
run_cmd(os.path.join(self.installdir, self.rootpack_dir, 'ics_master'))
# restore original $LIBRARY_PATH
if self.orig_library_path is not None:
os.environ['LIBRARY_PATH'] = self.orig_library_path
def sanity_check_step(self):
"""Custom sanity check for ALADIN."""
bindir = os.path.join(self.rootpack_dir, 'bin')
libdir = os.path.join(self.rootpack_dir, 'lib')
custom_paths = {
'files': [os.path.join(bindir, x) for x in ['MASTER']] +
[os.path.join(libdir, 'lib%s.local.a' % x) for x in ['aeo', 'ald', 'arp', 'bip',
'bla', 'mpa', 'mse', 'obt',
'odb', 'sat', 'scr', 'sct',
'sur', 'surfex', 'tal', 'tfl',
'uti', 'xla', 'xrd']],
'dirs': [],
}
super(EB_ALADIN, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guesses for environment variables (PATH, ...) for ALADIN."""
guesses = super(EB_ALADIN, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(self.rootpack_dir, 'bin')],
})
return guesses
| gpl-2.0 |
walty8/trac | trac/wiki/web_ui.py | 1 | 35037 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import pkg_resources
import re
from genshi.builder import tag
from trac.attachment import AttachmentModule, Attachment
from trac.config import IntOption
from trac.core import *
from trac.mimeview.api import IContentConverter, Mimeview
from trac.perm import IPermissionPolicy, IPermissionRequestor
from trac.resource import *
from trac.search import ISearchSource, search_to_sql, shorten_result
from trac.timeline.api import ITimelineEventProvider
from trac.util import as_int, get_reporter_id
from trac.util.datefmt import from_utimestamp, to_utimestamp
from trac.util.text import shorten_line
from trac.util.translation import _, tag_
from trac.versioncontrol.diff import get_diff_options, diff_blocks
from trac.web.api import HTTPBadRequest, IRequestHandler
from trac.web.chrome import (Chrome, INavigationContributor,
ITemplateProvider, add_ctxtnav, add_link,
add_notice, add_script, add_stylesheet,
add_warning, prevnext_nav, web_context)
from trac.wiki.api import IWikiPageManipulator, WikiSystem, validate_page_name
from trac.wiki.formatter import format_to, OneLinerFormatter
from trac.wiki.model import WikiPage
class WikiModule(Component):
implements(IContentConverter, INavigationContributor,
IPermissionRequestor, IRequestHandler, ITimelineEventProvider,
ISearchSource, ITemplateProvider)
page_manipulators = ExtensionPoint(IWikiPageManipulator)
realm = WikiSystem.realm
max_size = IntOption('wiki', 'max_size', 262144,
"""Maximum allowed wiki page size in characters.""")
default_edit_area_height = IntOption('wiki', 'default_edit_area_height',
20,
"""Default height of the textarea on the wiki edit page.
(//Since 1.1.5//)""")
PAGE_TEMPLATES_PREFIX = 'PageTemplates/'
DEFAULT_PAGE_TEMPLATE = 'DefaultPage'
# IContentConverter methods
def get_supported_conversions(self):
yield ('txt', _("Plain Text"), 'txt', 'text/x-trac-wiki',
'text/plain', 9)
def convert_content(self, req, mimetype, content, key):
return content, 'text/plain;charset=utf-8'
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'wiki'
def get_navigation_items(self, req):
if 'WIKI_VIEW' in req.perm(self.realm, 'WikiStart'):
yield ('mainnav', 'wiki',
tag.a(_("Wiki"), href=req.href.wiki(), accesskey=1))
if 'WIKI_VIEW' in req.perm(self.realm, 'TracGuide'):
yield ('metanav', 'help',
tag.a(_("Help/Guide"), href=req.href.wiki('TracGuide'),
accesskey=6))
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['WIKI_CREATE', 'WIKI_DELETE', 'WIKI_MODIFY', 'WIKI_RENAME',
'WIKI_VIEW']
return actions + [('WIKI_ADMIN', actions)]
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/wiki(?:/(.+))?$', req.path_info)
if match:
if match.group(1):
req.args['page'] = match.group(1)
return 1
def process_request(self, req):
action = req.args.get('action', 'view')
pagename = req.args.get('page', 'WikiStart')
version = req.args.get('version')
old_version = req.args.get('old_version')
if pagename.startswith('/') or pagename.endswith('/') or \
'//' in pagename:
pagename = re.sub(r'/{2,}', '/', pagename.strip('/'))
req.redirect(req.href.wiki(pagename))
if not validate_page_name(pagename):
raise TracError(_("Invalid Wiki page name '%(name)s'",
name=pagename))
if version is not None:
version_as_int = as_int(version, None)
if version_as_int is None:
raise ResourceNotFound(
_('No version "%(num)s" for Wiki page "%(name)s"',
num=version, name=pagename))
version = version_as_int
page = WikiPage(self.env, pagename)
versioned_page = WikiPage(self.env, pagename, version)
req.perm(versioned_page.resource).require('WIKI_VIEW')
if version and versioned_page.version != int(version):
raise ResourceNotFound(
_('No version "%(num)s" for Wiki page "%(name)s"',
num=version, name=page.name))
add_stylesheet(req, 'common/css/wiki.css')
if req.method == 'POST':
if action == 'edit':
if 'cancel' in req.args:
req.redirect(req.href.wiki(page.name))
has_collision = int(version) != page.version
for a in ('preview', 'diff', 'merge'):
if a in req.args:
action = a
break
versioned_page.text = req.args.get('text')
valid = self._validate(req, versioned_page)
if action == 'edit' and not has_collision and valid:
return self._do_save(req, versioned_page)
else:
return self._render_editor(req, page, action,
has_collision)
elif action == 'edit_comment':
self._do_edit_comment(req, versioned_page)
elif action == 'delete':
self._do_delete(req, versioned_page)
elif action == 'rename':
return self._do_rename(req, page)
elif action == 'diff':
style, options, diff_data = get_diff_options(req)
contextall = diff_data['options']['contextall']
req.redirect(req.href.wiki(versioned_page.name, action='diff',
old_version=old_version,
version=version,
contextall=contextall or None))
else:
raise HTTPBadRequest(_("Invalid request arguments."))
elif action == 'delete':
return self._render_confirm_delete(req, page)
elif action == 'rename':
return self._render_confirm_rename(req, page)
elif action == 'edit':
return self._render_editor(req, page)
elif action == 'edit_comment':
return self._render_edit_comment(req, versioned_page)
elif action == 'diff':
return self._render_diff(req, versioned_page)
elif action == 'history':
return self._render_history(req, versioned_page)
else:
format = req.args.get('format')
if format:
Mimeview(self.env).send_converted(req, 'text/x-trac-wiki',
versioned_page.text,
format, versioned_page.name)
return self._render_view(req, versioned_page)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.wiki', 'templates')]
# Internal methods
def _validate(self, req, page):
valid = True
# Validate page size
if len(req.args.get('text', '')) > self.max_size:
add_warning(req, _("The wiki page is too long (must be less "
"than %(num)s characters)",
num=self.max_size))
valid = False
# Give the manipulators a pass at post-processing the page
for manipulator in self.page_manipulators:
for field, message in manipulator.validate_wiki_page(req, page):
valid = False
if field:
add_warning(req, tag_("The Wiki page field %(field)s"
" is invalid: %(message)s",
field=tag.strong(field),
message=message))
else:
add_warning(req, tag_("Invalid Wiki page: %(message)s",
message=message))
return valid
def _page_data(self, req, page, action=''):
title = get_resource_summary(self.env, page.resource)
if action:
title += ' (%s)' % action
return {'page': page, 'action': action, 'title': title}
def _prepare_diff(self, req, page, old_text, new_text,
old_version, new_version):
diff_style, diff_options, diff_data = get_diff_options(req)
diff_context = 3
for option in diff_options:
if option.startswith('-U'):
diff_context = int(option[2:])
break
if diff_context < 0:
diff_context = None
diffs = diff_blocks(old_text, new_text, context=diff_context,
ignore_blank_lines='-B' in diff_options,
ignore_case='-i' in diff_options,
ignore_space_changes='-b' in diff_options)
def version_info(v, last=0):
return {'path': get_resource_name(self.env, page.resource),
# TRANSLATOR: wiki page
'rev': v or _("currently edited"),
'shortrev': v or last + 1,
'href': req.href.wiki(page.name, version=v)
if v else None}
changes = [{'diffs': diffs, 'props': [],
'new': version_info(new_version, old_version),
'old': version_info(old_version)}]
add_stylesheet(req, 'common/css/diff.css')
add_script(req, 'common/js/diff.js')
return diff_data, changes
def _do_edit_comment(self, req, page):
req.perm(page.resource).require('WIKI_ADMIN')
if 'cancel' in req.args:
req.redirect(req.href.wiki(page.name, action='history'))
new_comment = req.args.get('new_comment')
page.edit_comment(new_comment)
add_notice(req, _("The comment of version %(version)s of the page "
"%(name)s has been updated.",
version=page.version, name=page.name))
req.redirect(req.href.wiki(page.name, action='history'))
def _do_delete(self, req, page):
req.perm(page.resource).require('WIKI_DELETE')
if 'cancel' in req.args:
req.redirect(get_resource_url(self.env, page.resource, req.href))
version = int(req.args.get('version', 0)) or None
old_version = int(req.args.get('old_version', 0)) or version
with self.env.db_transaction as db:
if version and old_version and version > old_version:
# delete from `old_version` exclusive to `version` inclusive:
for v in range(old_version, version):
page.delete(v + 1)
else:
# only delete that `version`, or the whole page if `None`
page.delete(version)
if not page.exists:
add_notice(req, _("The page %(name)s has been deleted.",
name=page.name))
req.redirect(req.href.wiki())
else:
if version and old_version and version > old_version + 1:
add_notice(req, _("The versions %(from_)d to %(to)d of the "
"page %(name)s have been deleted.",
from_=old_version + 1, to=version, name=page.name))
else:
add_notice(req, _("The version %(version)d of the page "
"%(name)s has been deleted.",
version=version, name=page.name))
req.redirect(req.href.wiki(page.name))
def _do_rename(self, req, page):
req.perm(page.resource).require('WIKI_RENAME')
if 'cancel' in req.args:
req.redirect(get_resource_url(self.env, page.resource, req.href))
old_name, old_version = page.name, page.version
new_name = req.args.get('new_name', '')
new_name = re.sub(r'/{2,}', '/', new_name.strip('/'))
redirect = req.args.get('redirect')
# verify input parameters
warn = None
if not new_name:
warn = _("A new name is mandatory for a rename.")
elif not validate_page_name(new_name):
warn = _("The new name is invalid (a name which is separated "
"with slashes cannot be '.' or '..').")
elif new_name == old_name:
warn = _("The new name must be different from the old name.")
elif WikiPage(self.env, new_name).exists:
warn = _("The page %(name)s already exists.", name=new_name)
if warn:
add_warning(req, warn)
return self._render_confirm_rename(req, page, new_name)
with self.env.db_transaction as db:
page.rename(new_name)
if redirect:
redirection = WikiPage(self.env, old_name)
redirection.text = _('See [wiki:"%(name)s"].', name=new_name)
author = get_reporter_id(req)
comment = u'[wiki:"%s@%d" %s] \u2192 [wiki:"%s"].' % (
new_name, old_version, old_name, new_name)
redirection.save(author, comment, req.remote_addr)
add_notice(req, _("The page %(old_name)s has been renamed to "
"%(new_name)s.", old_name=old_name,
new_name=new_name))
if redirect:
add_notice(req, _("The page %(old_name)s has been recreated "
"with a redirect to %(new_name)s.",
old_name=old_name, new_name=new_name))
req.redirect(req.href.wiki(old_name if redirect else new_name))
def _do_save(self, req, page):
if not page.exists:
req.perm(page.resource).require('WIKI_CREATE')
else:
req.perm(page.resource).require('WIKI_MODIFY')
if 'WIKI_ADMIN' in req.perm(page.resource):
# Modify the read-only flag if it has been changed and the user is
# WIKI_ADMIN
page.readonly = int('readonly' in req.args)
try:
page.save(get_reporter_id(req, 'author'), req.args.get('comment'),
req.remote_addr)
href = req.href.wiki(page.name, action='diff',
version=page.version)
add_notice(req, tag_("Your changes have been saved in version "
"%(version)s (%(diff)s).",
version=page.version,
diff=tag.a(_("diff"), href=href)))
req.redirect(get_resource_url(self.env, page.resource, req.href,
version=None))
except TracError:
add_warning(req, _("Page not modified, showing latest version."))
return self._render_view(req, page)
def _render_confirm_delete(self, req, page):
req.perm(page.resource).require('WIKI_DELETE')
version = None
if 'delete_version' in req.args:
version = int(req.args.get('version', 0))
old_version = req.args.getint('old_version', version)
what = 'multiple' if version and old_version \
and version - old_version > 1 \
else 'single' if version else 'page'
num_versions = 0
new_date = None
old_date = None
for v, t, author, comment, ipnr in page.get_history():
if (v <= version or what == 'page') and new_date is None:
new_date = t
if (v <= old_version and what == 'multiple' or
num_versions > 1 and what == 'single'):
break
num_versions += 1
old_date = t
data = self._page_data(req, page, 'delete')
attachments = Attachment.select(self.env, self.realm, page.name)
data.update({
'what': what, 'new_version': None, 'old_version': None,
'num_versions': num_versions, 'new_date': new_date,
'old_date': old_date, 'attachments': list(attachments),
})
if version is not None:
data.update({'new_version': version, 'old_version': old_version})
self._wiki_ctxtnav(req, page)
return 'wiki_delete.html', data, None
def _render_confirm_rename(self, req, page, new_name=None):
req.perm(page.resource).require('WIKI_RENAME')
data = self._page_data(req, page, 'rename')
data['new_name'] = new_name if new_name is not None else page.name
self._wiki_ctxtnav(req, page)
return 'wiki_rename.html', data, None
def _render_diff(self, req, page):
if not page.exists:
raise TracError(_("Version %(num)s of page \"%(name)s\" does not "
"exist",
num=req.args.get('version'), name=page.name))
old_version = req.args.get('old_version')
if old_version:
old_version = int(old_version)
if old_version == page.version:
old_version = None
elif old_version > page.version:
# FIXME: what about reverse diffs?
old_version = page.resource.version
page = WikiPage(self.env, page.name, old_version)
req.perm(page.resource).require('WIKI_VIEW')
latest_page = WikiPage(self.env, page.name)
req.perm(latest_page.resource).require('WIKI_VIEW')
new_version = int(page.version)
date = author = comment = ipnr = None
num_changes = 0
prev_version = next_version = None
for version, t, a, c, i in latest_page.get_history():
if version == new_version:
date = t
author = a or 'anonymous'
comment = c or '--'
ipnr = i or ''
else:
if version < new_version:
num_changes += 1
if not prev_version:
prev_version = version
if old_version is None or version == old_version:
old_version = version
break
else:
next_version = version
if not old_version:
old_version = 0
old_page = WikiPage(self.env, page.name, old_version)
req.perm(old_page.resource).require('WIKI_VIEW')
# -- text diffs
old_text = old_page.text.splitlines()
new_text = page.text.splitlines()
diff_data, changes = self._prepare_diff(req, page, old_text, new_text,
old_version, new_version)
# -- prev/up/next links
if prev_version:
add_link(req, 'prev', req.href.wiki(page.name, action='diff',
version=prev_version),
_("Version %(num)s", num=prev_version))
add_link(req, 'up', req.href.wiki(page.name, action='history'),
_('Page history'))
if next_version:
add_link(req, 'next', req.href.wiki(page.name, action='diff',
version=next_version),
_("Version %(num)s", num=next_version))
data = self._page_data(req, page, 'diff')
data.update({
'change': {'date': date, 'author': author, 'ipnr': ipnr,
'comment': comment},
'new_version': new_version, 'old_version': old_version,
'latest_version': latest_page.version,
'num_changes': num_changes,
'longcol': 'Version', 'shortcol': 'v',
'changes': changes,
'diff': diff_data,
})
prevnext_nav(req, _("Previous Change"), _("Next Change"),
_("Wiki History"))
return 'wiki_diff.html', data, None
def _render_editor(self, req, page, action='edit', has_collision=False):
if has_collision:
if action == 'merge':
page = WikiPage(self.env, page.name)
req.perm(page.resource).require('WIKI_VIEW')
else:
action = 'collision'
if not page.exists:
req.perm(page.resource).require('WIKI_CREATE')
else:
req.perm(page.resource).require('WIKI_MODIFY')
original_text = page.text
comment = req.args.get('comment', '')
if 'text' in req.args:
page.text = req.args.get('text')
elif 'template' in req.args:
template = self.PAGE_TEMPLATES_PREFIX + req.args.get('template')
template_page = WikiPage(self.env, template)
if template_page and template_page.exists and \
'WIKI_VIEW' in req.perm(template_page.resource):
page.text = template_page.text
elif 'version' in req.args:
old_page = WikiPage(self.env, page.name, int(req.args['version']))
req.perm(page.resource).require('WIKI_VIEW')
page.text = old_page.text
comment = _("Reverted to version %(version)s.",
version=req.args['version'])
if action in ('preview', 'diff'):
page.readonly = 'readonly' in req.args
author = get_reporter_id(req, 'author')
defaults = {'editrows': str(self.default_edit_area_height)}
prefs = dict((key, req.session.get('wiki_%s' % key, defaults.get(key)))
for key in ('editrows', 'sidebyside'))
if 'from_editor' in req.args:
sidebyside = req.args.get('sidebyside') or None
if sidebyside != prefs['sidebyside']:
req.session.set('wiki_sidebyside', int(bool(sidebyside)), 0)
else:
sidebyside = prefs['sidebyside']
if sidebyside:
editrows = max(int(prefs['editrows']),
len(page.text.splitlines()) + 1)
else:
editrows = req.args.get('editrows')
if editrows:
if editrows != prefs['editrows']:
req.session.set('wiki_editrows', editrows,
defaults['editrows'])
else:
editrows = prefs['editrows']
data = self._page_data(req, page, action)
context = web_context(req, page.resource)
data.update({
'context': context,
'author': author,
'comment': comment,
'edit_rows': editrows,
'sidebyside': sidebyside,
'scroll_bar_pos': req.args.get('scroll_bar_pos', ''),
'diff': None,
'attachments': AttachmentModule(self.env).attachment_data(context),
'show_readonly_checkbox': ReadonlyWikiPolicy.__name__ in
self.config.get('trac',
'permission_policies')
})
if action in ('diff', 'merge'):
old_text = original_text.splitlines() if original_text else []
new_text = page.text.splitlines() if page.text else []
diff_data, changes = self._prepare_diff(
req, page, old_text, new_text, page.version, '')
data.update({'diff': diff_data, 'changes': changes,
'action': 'preview', 'merge': action == 'merge',
'longcol': 'Version', 'shortcol': 'v'})
elif sidebyside and action != 'collision':
data['action'] = 'preview'
self._wiki_ctxtnav(req, page)
Chrome(self.env).add_wiki_toolbars(req)
Chrome(self.env).add_auto_preview(req)
add_script(req, 'common/js/folding.js')
return 'wiki_edit.html', data, None
def _render_edit_comment(self, req, page):
req.perm(page.resource).require('WIKI_ADMIN')
data = self._page_data(req, page, 'edit_comment')
self._wiki_ctxtnav(req, page)
return 'wiki_edit_comment.html', data, None
def _render_history(self, req, page):
"""Extract the complete history for a given page.
This information is used to present a changelog/history for a given
page.
"""
if not page.exists:
raise TracError(_("Page %(name)s does not exist", name=page.name))
data = self._page_data(req, page, 'history')
history = []
for version, date, author, comment, ipnr in page.get_history():
history.append({
'version': version,
'date': date,
'author': author,
'comment': comment,
'ipnr': ipnr
})
data.update({
'history': history,
'resource': page.resource,
'can_edit_comment': 'WIKI_ADMIN' in req.perm(page.resource)
})
add_ctxtnav(req, _("Back to %(wikipage)s", wikipage=page.name),
req.href.wiki(page.name))
return 'history_view.html', data, None
def _render_view(self, req, page):
version = page.resource.version
# Add registered converters
if page.exists:
for conversion in Mimeview(self.env) \
.get_supported_conversions('text/x-trac-wiki'):
conversion_href = req.href.wiki(page.name, version=version,
format=conversion.key)
add_link(req, 'alternate', conversion_href, conversion.name,
conversion.in_mimetype)
data = self._page_data(req, page)
if page.name == 'WikiStart':
data['title'] = ''
ws = WikiSystem(self.env)
context = web_context(req, page.resource)
higher, related = [], []
if not page.exists:
if 'WIKI_CREATE' not in req.perm(page.resource):
raise ResourceNotFound(_("Page %(name)s not found",
name=page.name))
formatter = OneLinerFormatter(self.env, context)
if '/' in page.name:
parts = page.name.split('/')
for i in range(len(parts) - 2, -1, -1):
name = '/'.join(parts[:i] + [parts[-1]])
if not ws.has_page(name):
higher.append(ws._format_link(formatter, 'wiki',
'/' + name, name, False))
else:
name = page.name
name = name.lower()
related = [each for each in ws.pages
if name in each.lower()
and 'WIKI_VIEW' in req.perm(self.realm, each)]
related.sort()
related = [ws._format_link(formatter, 'wiki', '/' + each, each,
False)
for each in related]
latest_page = WikiPage(self.env, page.name)
prev_version = next_version = None
if version:
try:
version = int(version)
for hist in latest_page.get_history():
v = hist[0]
if v != version:
if v < version:
if not prev_version:
prev_version = v
break
else:
next_version = v
except ValueError:
version = None
prefix = self.PAGE_TEMPLATES_PREFIX
templates = [template[len(prefix):]
for template in ws.get_pages(prefix)
if 'WIKI_VIEW' in req.perm(self.realm, template)]
# -- prev/up/next links
if prev_version:
add_link(req, 'prev',
req.href.wiki(page.name, version=prev_version),
_("Version %(num)s", num=prev_version))
parent = None
if version:
add_link(req, 'up', req.href.wiki(page.name, version=None),
_("View latest version"))
elif '/' in page.name:
parent = page.name[:page.name.rindex('/')]
add_link(req, 'up', req.href.wiki(parent, version=None),
_("View parent page"))
if next_version:
add_link(req, 'next',
req.href.wiki(page.name, version=next_version),
_('Version %(num)s', num=next_version))
# Add ctxtnav entries
if version:
prevnext_nav(req, _("Previous Version"), _("Next Version"),
_("View Latest Version"))
else:
if parent:
add_ctxtnav(req, _('Up'), req.href.wiki(parent))
self._wiki_ctxtnav(req, page)
# Plugin content validation
fields = {'text': page.text}
for manipulator in self.page_manipulators:
manipulator.prepare_wiki_page(req, page, fields)
text = fields.get('text', '')
data.update({
'context': context,
'text': text,
'latest_version': latest_page.version,
'attachments': AttachmentModule(self.env).attachment_data(context),
'default_template': self.DEFAULT_PAGE_TEMPLATE,
'templates': templates,
'version': version,
'higher': higher, 'related': related,
'resourcepath_template': 'wiki_page_path.html',
})
add_script(req, 'common/js/folding.js')
return 'wiki_view.html', data, None
def _wiki_ctxtnav(self, req, page):
"""Add the normal wiki ctxtnav entries."""
add_ctxtnav(req, _("Start Page"), req.href.wiki('WikiStart'))
add_ctxtnav(req, _("Index"), req.href.wiki('TitleIndex'))
if page.exists:
add_ctxtnav(req, _("History"), req.href.wiki(page.name,
action='history'))
# ITimelineEventProvider methods
def get_timeline_filters(self, req):
if 'WIKI_VIEW' in req.perm:
yield ('wiki', _('Wiki changes'))
def get_timeline_events(self, req, start, stop, filters):
if 'wiki' in filters:
wiki_realm = Resource(self.realm)
for ts, name, comment, author, version in self.env.db_query("""
SELECT time, name, comment, author, version FROM wiki
WHERE time>=%s AND time<=%s
""", (to_utimestamp(start), to_utimestamp(stop))):
wiki_page = wiki_realm(id=name, version=version)
if 'WIKI_VIEW' not in req.perm(wiki_page):
continue
yield ('wiki', from_utimestamp(ts), author,
(wiki_page, comment))
# Attachments
for event in AttachmentModule(self.env).get_timeline_events(
req, wiki_realm, start, stop):
yield event
def render_timeline_event(self, context, field, event):
wiki_page, comment = event[3]
if field == 'url':
return context.href.wiki(wiki_page.id, version=wiki_page.version)
elif field == 'title':
name = tag.em(get_resource_name(self.env, wiki_page))
if wiki_page.version > 1:
return tag_("%(page)s edited", page=name)
else:
return tag_("%(page)s created", page=name)
elif field == 'description':
markup = format_to(self.env, None,
context.child(resource=wiki_page), comment)
if wiki_page.version > 1:
diff_href = context.href.wiki(
wiki_page.id, version=wiki_page.version, action='diff')
markup = tag(markup,
" (", tag.a(_("diff"), href=diff_href), ")")
return markup
# ISearchSource methods
def get_search_filters(self, req):
if 'WIKI_VIEW' in req.perm:
yield ('wiki', _('Wiki'))
def get_search_results(self, req, terms, filters):
if not 'wiki' in filters:
return
with self.env.db_query as db:
sql_query, args = search_to_sql(db, ['w1.name', 'w1.author',
'w1.text'], terms)
wiki_realm = Resource(self.realm)
for name, ts, author, text in db("""
SELECT w1.name, w1.time, w1.author, w1.text
FROM wiki w1,(SELECT name, max(version) AS ver
FROM wiki GROUP BY name) w2
WHERE w1.version = w2.ver AND w1.name = w2.name
AND """ + sql_query, args):
page = wiki_realm(id=name)
if 'WIKI_VIEW' in req.perm(page):
yield (get_resource_url(self.env, page, req.href),
'%s: %s' % (name, shorten_line(text)),
from_utimestamp(ts), author,
shorten_result(text, terms))
# Attachments
for result in AttachmentModule(self.env).get_search_results(
req, wiki_realm, terms):
yield result
class ReadonlyWikiPolicy(Component):
"""Permission policy for the wiki that enforces the read-only attribute
for wiki pages."""
implements(IPermissionPolicy)
realm = WikiSystem.realm
# IPermissionPolicy methods
def check_permission(self, action, username, resource, perm):
if resource and resource.realm == self.realm and \
action in ('WIKI_DELETE', 'WIKI_MODIFY', 'WIKI_RENAME'):
page = WikiPage(self.env, resource)
if page.readonly and 'WIKI_ADMIN' not in perm(resource):
return False
| bsd-3-clause |
map222/spark | examples/src/main/python/ml/one_vs_rest_example.py | 71 | 2237 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.classification import LogisticRegression, OneVsRest
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
"""
An example of Multiclass to Binary Reduction with One Vs Rest,
using Logistic Regression as the base classifier.
Run with:
bin/spark-submit examples/src/main/python/ml/one_vs_rest_example.py
"""
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("OneVsRestExample") \
.getOrCreate()
# $example on$
# load data file.
inputData = spark.read.format("libsvm") \
.load("data/mllib/sample_multiclass_classification_data.txt")
# generate the train/test split.
(train, test) = inputData.randomSplit([0.8, 0.2])
# instantiate the base classifier.
lr = LogisticRegression(maxIter=10, tol=1E-6, fitIntercept=True)
# instantiate the One Vs Rest Classifier.
ovr = OneVsRest(classifier=lr)
# train the multiclass model.
ovrModel = ovr.fit(train)
# score the model on test data.
predictions = ovrModel.transform(test)
# obtain evaluator.
evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
# compute the classification error on test data.
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g" % (1.0 - accuracy))
# $example off$
spark.stop()
| apache-2.0 |
labcodes/django | django/contrib/gis/sitemaps/views.py | 101 | 2313 | from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.functions import AsKML, Transform
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
from django.core.exceptions import FieldDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connections
from django.http import Http404
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The field name must be that of a geographic field.
"""
placemarks = []
try:
klass = apps.get_model(label, model)
except LookupError:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
field = klass._meta.get_field(field_name)
if not isinstance(field, GeometryField):
raise FieldDoesNotExist
except FieldDoesNotExist:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.features.has_AsKML_function:
# Database will take care of transformation.
placemarks = klass._default_manager.using(using).annotate(kml=AsKML(field_name))
else:
# If the database offers no KML method, we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.features.has_Transform_function:
qs = klass._default_manager.using(using).annotate(
**{'%s_4326' % field_name: Transform(field_name, 4326)})
field_name += '_4326'
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
mod.kml = getattr(mod, field_name).kml
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places': placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
Return KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| bsd-3-clause |
neerajvashistha/pa-dude | lib/python2.7/site-packages/django/db/migrations/optimizer.py | 339 | 15773 | from __future__ import unicode_literals
from django.db.migrations import (
AddField, AlterField, AlterIndexTogether, AlterModelTable,
AlterOrderWithRespectTo, AlterUniqueTogether, CreateModel, DeleteModel,
RemoveField, RenameField, RenameModel,
)
from django.utils import six
class MigrationOptimizer(object):
"""
Powers the optimization process, where you provide a list of Operations
and you are returned a list of equal or shorter length - operations
are merged into one if possible.
For example, a CreateModel and an AddField can be optimized into a
new CreateModel, and CreateModel and DeleteModel can be optimized into
nothing.
"""
def __init__(self):
self.model_level_operations = (
CreateModel,
AlterModelTable,
AlterUniqueTogether,
AlterIndexTogether,
AlterOrderWithRespectTo,
)
self.field_level_operations = (
AddField,
AlterField,
)
self.reduce_methods = {
# (model operation, model operation)
(CreateModel, DeleteModel): self.reduce_create_model_delete_model,
(CreateModel, RenameModel): self.reduce_create_model_rename_model,
(RenameModel, RenameModel): self.reduce_rename_model_rename_model,
(AlterIndexTogether, AlterIndexTogether): self.reduce_alter_model_alter_model,
(AlterModelTable, AlterModelTable): self.reduce_alter_model_alter_model,
(AlterOrderWithRespectTo, AlterOrderWithRespectTo): self.reduce_alter_model_alter_model,
(AlterUniqueTogether, AlterUniqueTogether): self.reduce_alter_model_alter_model,
(AlterIndexTogether, DeleteModel): self.reduce_alter_model_delete_model,
(AlterModelTable, DeleteModel): self.reduce_alter_model_delete_model,
(AlterOrderWithRespectTo, DeleteModel): self.reduce_alter_model_delete_model,
(AlterUniqueTogether, DeleteModel): self.reduce_alter_model_delete_model,
# (model operation, field operation)
(CreateModel, AddField): self.reduce_create_model_add_field,
(CreateModel, AlterField): self.reduce_create_model_alter_field,
(CreateModel, RemoveField): self.reduce_create_model_remove_field,
(CreateModel, RenameField): self.reduce_create_model_rename_field,
(AlterIndexTogether, AddField): self.reduce_alter_model_addalterremove_field,
(AlterIndexTogether, AlterField): self.reduce_alter_model_addalterremove_field,
(AlterIndexTogether, RemoveField): self.reduce_alter_model_addalterremove_field,
(AlterOrderWithRespectTo, AddField): self.reduce_alter_model_addalterremove_field,
(AlterOrderWithRespectTo, AlterField): self.reduce_alter_model_addalterremove_field,
(AlterOrderWithRespectTo, RemoveField): self.reduce_alter_model_addalterremove_field,
(AlterUniqueTogether, AddField): self.reduce_alter_model_addalterremove_field,
(AlterUniqueTogether, AlterField): self.reduce_alter_model_addalterremove_field,
(AlterUniqueTogether, RemoveField): self.reduce_alter_model_addalterremove_field,
(AlterIndexTogether, RenameField): self.reduce_alter_model_rename_field,
(AlterOrderWithRespectTo, RenameField): self.reduce_alter_model_rename_field,
(AlterUniqueTogether, RenameField): self.reduce_alter_model_rename_field,
# (field operation, field operation)
(AddField, AlterField): self.reduce_add_field_alter_field,
(AddField, RemoveField): self.reduce_add_field_remove_field,
(AddField, RenameField): self.reduce_add_field_rename_field,
(AlterField, RemoveField): self.reduce_alter_field_remove_field,
(AlterField, RenameField): self.reduce_alter_field_rename_field,
(RenameField, RenameField): self.reduce_rename_field_rename_field,
}
def optimize(self, operations, app_label=None):
"""
Main optimization entry point. Pass in a list of Operation instances,
get out a new list of Operation instances.
Unfortunately, due to the scope of the optimization (two combinable
operations might be separated by several hundred others), this can't be
done as a peephole optimization with checks/output implemented on
the Operations themselves; instead, the optimizer looks at each
individual operation and scans forwards in the list to see if there
are any matches, stopping at boundaries - operations which can't
be optimized over (RunSQL, operations on the same field/model, etc.)
The inner loop is run until the starting list is the same as the result
list, and then the result is returned. This means that operation
optimization must be stable and always return an equal or shorter list.
The app_label argument is optional, but if you pass it you'll get more
efficient optimization.
"""
# Internal tracking variable for test assertions about # of loops
self._iterations = 0
while True:
result = self.optimize_inner(operations, app_label)
self._iterations += 1
if result == operations:
return result
operations = result
def optimize_inner(self, operations, app_label=None):
"""
Inner optimization loop.
"""
new_operations = []
for i, operation in enumerate(operations):
# Compare it to each operation after it
for j, other in enumerate(operations[i + 1:]):
result = self.reduce(operation, other, operations[i + 1:i + j + 1])
if result is not None:
# Optimize! Add result, then remaining others, then return
new_operations.extend(result)
new_operations.extend(operations[i + 1:i + 1 + j])
new_operations.extend(operations[i + j + 2:])
return new_operations
if not self.can_optimize_through(operation, other, app_label):
new_operations.append(operation)
break
else:
new_operations.append(operation)
return new_operations
# REDUCTION
def reduce(self, operation, other, in_between=None):
"""
Either returns a list of zero, one or two operations,
or None, meaning this pair cannot be optimized.
"""
method = self.reduce_methods.get((type(operation), type(other)))
if method:
return method(operation, other, in_between or [])
return None
def model_to_key(self, model):
"""
Takes either a model class or a "appname.ModelName" string
and returns (appname, modelname)
"""
if isinstance(model, six.string_types):
return model.split(".", 1)
else:
return (
model._meta.app_label,
model._meta.object_name,
)
# REDUCE METHODS: (MODEL OPERATION, MODEL OPERATION)
def reduce_create_model_delete_model(self, operation, other, in_between):
"""
Folds a CreateModel and a DeleteModel into nothing.
"""
if (operation.name_lower == other.name_lower and
not operation.options.get("proxy", False)):
return []
def reduce_create_model_rename_model(self, operation, other, in_between):
"""
Folds a model rename into its create
"""
if operation.name_lower == other.old_name_lower:
return [
CreateModel(
other.new_name,
fields=operation.fields,
options=operation.options,
bases=operation.bases,
managers=operation.managers,
)
]
def reduce_rename_model_rename_model(self, operation, other, in_between):
"""
Folds a model rename into another one
"""
if operation.new_name_lower == other.old_name_lower:
return [
RenameModel(
operation.old_name,
other.new_name,
)
]
def reduce_alter_model_alter_model(self, operation, other, in_between):
"""
Folds two AlterModelTable, AlterFooTogether, or AlterOrderWithRespectTo
operations into the latter.
"""
if operation.name_lower == other.name_lower:
return [other]
def reduce_alter_model_delete_model(self, operation, other, in_between):
"""
Folds an AlterModelSomething and a DeleteModel into just delete.
"""
if operation.name_lower == other.name_lower:
return [other]
# REDUCE METHODS: (MODEL OPERATION, FIELD OPERATION)
def reduce_create_model_add_field(self, operation, other, in_between):
if operation.name_lower == other.model_name_lower:
# Don't allow optimizations of FKs through models they reference
if hasattr(other.field, "remote_field") and other.field.remote_field:
for between in in_between:
# Check that it doesn't point to the model
app_label, object_name = self.model_to_key(other.field.remote_field.model)
if between.references_model(object_name, app_label):
return None
# Check that it's not through the model
if getattr(other.field.remote_field, "through", None):
app_label, object_name = self.model_to_key(other.field.remote_field.through)
if between.references_model(object_name, app_label):
return None
# OK, that's fine
return [
CreateModel(
operation.name,
fields=operation.fields + [(other.name, other.field)],
options=operation.options,
bases=operation.bases,
managers=operation.managers,
)
]
def reduce_create_model_alter_field(self, operation, other, in_between):
if operation.name_lower == other.model_name_lower:
return [
CreateModel(
operation.name,
fields=[
(n, other.field if n == other.name else v)
for n, v in operation.fields
],
options=operation.options,
bases=operation.bases,
managers=operation.managers,
)
]
def reduce_create_model_remove_field(self, operation, other, in_between):
if operation.name_lower == other.model_name_lower:
return [
CreateModel(
operation.name,
fields=[
(n, v)
for n, v in operation.fields
if n.lower() != other.name_lower
],
options=operation.options,
bases=operation.bases,
managers=operation.managers,
)
]
def reduce_create_model_rename_field(self, operation, other, in_between):
if operation.name_lower == other.model_name_lower:
return [
CreateModel(
operation.name,
fields=[
(other.new_name if n == other.old_name else n, v)
for n, v in operation.fields
],
options=operation.options,
bases=operation.bases,
managers=operation.managers,
)
]
def reduce_alter_model_addalterremove_field(self, operation, other, in_between):
if (operation.name_lower == other.model_name_lower and
not operation.references_field(other.model_name, other.name)):
return [other, operation]
def reduce_alter_model_rename_field(self, operation, other, in_between):
if (operation.name_lower == other.model_name_lower and
not operation.references_field(other.model_name, other.old_name)):
return [other, operation]
# REDUCE METHODS: (FIELD OPERATION, FIELD OPERATION)
def reduce_add_field_alter_field(self, operation, other, in_between):
if (operation.model_name_lower == other.model_name_lower and
operation.name_lower == other.name_lower):
return [
AddField(
model_name=operation.model_name,
name=operation.name,
field=other.field,
)
]
def reduce_add_field_remove_field(self, operation, other, in_between):
if (operation.model_name_lower == other.model_name_lower and
operation.name_lower == other.name_lower):
return []
def reduce_add_field_rename_field(self, operation, other, in_between):
if (operation.model_name_lower == other.model_name_lower and
operation.name_lower == other.old_name_lower):
return [
AddField(
model_name=operation.model_name,
name=other.new_name,
field=operation.field,
)
]
def reduce_alter_field_remove_field(self, operation, other, in_between):
if (operation.model_name_lower == other.model_name_lower and
operation.name_lower == other.name_lower):
return [other]
def reduce_alter_field_rename_field(self, operation, other, in_between):
if (operation.model_name_lower == other.model_name_lower and
operation.name_lower == other.old_name_lower):
return [
other,
AlterField(
model_name=operation.model_name,
name=other.new_name,
field=operation.field,
),
]
def reduce_rename_field_rename_field(self, operation, other, in_between):
if (operation.model_name_lower == other.model_name_lower and
operation.new_name_lower == other.old_name_lower):
return [
RenameField(
operation.model_name,
operation.old_name,
other.new_name,
),
]
# THROUGH CHECKS
def can_optimize_through(self, operation, other, app_label=None):
"""
Returns True if it's possible to optimize 'operation' with something
the other side of 'other'. This is possible if, for example, they
affect different models.
"""
# If it's a model level operation, let it through if there's
# nothing that looks like a reference to us in 'other'.
if isinstance(operation, self.model_level_operations):
if not other.references_model(operation.name, app_label):
return True
# If it's field level, only let it through things that don't reference
# the field (which includes not referencing the model)
if isinstance(operation, self.field_level_operations):
if not other.references_field(operation.model_name, operation.name, app_label):
return True
return False
| mit |
spiiph/owls-hep | owls_hep/counting.py | 1 | 2022 | """Provides method for efficiently counting events in a region.
"""
# System imports
from uuid import uuid4
# owls-cache imports
from owls_cache.persistent import cached as persistently_cached
# owls-parallel imports
from owls_parallel import parallelized
# owls-hep imports
from owls_hep.calculation import Calculation
from owls_hep.utility import make_selection, integral, create_histogram
@parallelized(lambda p, r: 1.0, lambda p, r: (p, r))
@persistently_cached('owls_hep.counting._count', lambda p, r: (p, r))
def _count(process, region):
"""Computes the weighted event count of a process in a region.
Args:
process: The process whose events should be counted
region: The region whose weighting/selection should be applied
Returns:
The weighted event count in the region.
"""
# Create a unique name for the histogram
name = uuid4().hex
# Create the selection
selection = make_selection(process, region)
# Create the expression string and specify which histogram to fill
expression = '1>>{0}'.format(name)
# Create the bare histogram
h = create_histogram(1, name, ((1, 0.5, 1.5),))
# Load the chain
chain = process.load()
chain.Draw(expression, selection)
# Return the count as the integral of the histogram, including overflow
# bins
return integral(h, include_overflow=True)
class Count(Calculation):
"""A counting calculation.
Although the need should not generally arise to subclass Count, all
subclasses must return a floating point value for their result.
"""
def __call__(self, process, region):
"""Counts the number of weighted events passing a region's selection.
Args:
process: The process whose weighted events should be counted
region: The region providing selection/weighting for the count
Returns:
The number of weighted events passing the region's selection.
"""
return _count(process, region)
| mit |
erkrishna9/odoo | addons/hr_payroll/hr_payroll.py | 12 | 49751 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import date
from datetime import datetime
from datetime import timedelta
from dateutil import relativedelta
from openerp import api, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.tools.safe_eval import safe_eval as eval
class hr_payroll_structure(osv.osv):
"""
Salary structure used to defined
- Basic
- Allowances
- Deductions
"""
_name = 'hr.payroll.structure'
_description = 'Salary Structure'
_columns = {
'name':fields.char('Name', required=True),
'code':fields.char('Reference', size=64, required=True),
'company_id':fields.many2one('res.company', 'Company', required=True, copy=False),
'note': fields.text('Description'),
'parent_id':fields.many2one('hr.payroll.structure', 'Parent'),
'children_ids':fields.one2many('hr.payroll.structure', 'parent_id', 'Children', copy=True),
'rule_ids':fields.many2many('hr.salary.rule', 'hr_structure_salary_rule_rel', 'struct_id', 'rule_id', 'Salary Rules'),
}
def _get_parent(self, cr, uid, context=None):
obj_model = self.pool.get('ir.model.data')
res = False
data_id = obj_model.search(cr, uid, [('model', '=', 'hr.payroll.structure'), ('name', '=', 'structure_base')])
if data_id:
res = obj_model.browse(cr, uid, data_id[0], context=context).res_id
return res
_defaults = {
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
'parent_id': _get_parent,
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create a recursive Salary Structure.', ['parent_id'])
]
def copy(self, cr, uid, id, default=None, context=None):
default = dict(default or {},
code=_("%s (copy)") % (self.browse(cr, uid, id, context=context).code))
return super(hr_payroll_structure, self).copy(cr, uid, id, default, context=context)
@api.cr_uid_ids_context
def get_all_rules(self, cr, uid, structure_ids, context=None):
"""
@param structure_ids: list of structure
@return: returns a list of tuple (id, sequence) of rules that are maybe to apply
"""
all_rules = []
for struct in self.browse(cr, uid, structure_ids, context=context):
all_rules += self.pool.get('hr.salary.rule')._recursive_search_of_rules(cr, uid, struct.rule_ids, context=context)
return all_rules
@api.cr_uid_ids_context
def _get_parent_structure(self, cr, uid, struct_ids, context=None):
if not struct_ids:
return []
parent = []
for struct in self.browse(cr, uid, struct_ids, context=context):
if struct.parent_id:
parent.append(struct.parent_id.id)
if parent:
parent = self._get_parent_structure(cr, uid, parent, context)
return parent + struct_ids
class hr_contract(osv.osv):
"""
Employee contract based on the visa, work permits
allows to configure different Salary structure
"""
_inherit = 'hr.contract'
_description = 'Employee Contract'
_columns = {
'struct_id': fields.many2one('hr.payroll.structure', 'Salary Structure'),
'schedule_pay': fields.selection([
('monthly', 'Monthly'),
('quarterly', 'Quarterly'),
('semi-annually', 'Semi-annually'),
('annually', 'Annually'),
('weekly', 'Weekly'),
('bi-weekly', 'Bi-weekly'),
('bi-monthly', 'Bi-monthly'),
], 'Scheduled Pay', select=True),
}
_defaults = {
'schedule_pay': 'monthly',
}
@api.cr_uid_ids_context
def get_all_structures(self, cr, uid, contract_ids, context=None):
"""
@param contract_ids: list of contracts
@return: the structures linked to the given contracts, ordered by hierachy (parent=False first, then first level children and so on) and without duplicata
"""
structure_ids = [contract.struct_id.id for contract in self.browse(cr, uid, contract_ids, context=context) if contract.struct_id]
if not structure_ids:
return []
return list(set(self.pool.get('hr.payroll.structure')._get_parent_structure(cr, uid, structure_ids, context=context)))
class contrib_register(osv.osv):
'''
Contribution Register
'''
_name = 'hr.contribution.register'
_description = 'Contribution Register'
_columns = {
'company_id':fields.many2one('res.company', 'Company'),
'partner_id':fields.many2one('res.partner', 'Partner'),
'name':fields.char('Name', required=True, readonly=False),
'register_line_ids':fields.one2many('hr.payslip.line', 'register_id', 'Register Line', readonly=True),
'note': fields.text('Description'),
}
_defaults = {
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
}
class hr_salary_rule_category(osv.osv):
"""
HR Salary Rule Category
"""
_name = 'hr.salary.rule.category'
_description = 'Salary Rule Category'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'code':fields.char('Code', size=64, required=True, readonly=False),
'parent_id':fields.many2one('hr.salary.rule.category', 'Parent', help="Linking a salary category to its parent is used only for the reporting purpose."),
'children_ids': fields.one2many('hr.salary.rule.category', 'parent_id', 'Children'),
'note': fields.text('Description'),
'company_id':fields.many2one('res.company', 'Company', required=False),
}
_defaults = {
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
}
class one2many_mod2(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if context is None:
context = {}
if not values:
values = {}
res = {}
for id in ids:
res[id] = []
ids2 = obj.pool[self._obj].search(cr, user, [(self._fields_id,'in',ids), ('appears_on_payslip', '=', True)], limit=self._limit)
for r in obj.pool[self._obj].read(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
key = r[self._fields_id]
if isinstance(key, tuple):
# Read return a tuple in the case where the field is a many2one
# but we want to get the id of this field.
key = key[0]
res[key].append( r['id'] )
return res
class hr_payslip_run(osv.osv):
_name = 'hr.payslip.run'
_description = 'Payslip Batches'
_columns = {
'name': fields.char('Name', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'slip_ids': fields.one2many('hr.payslip', 'payslip_run_id', 'Payslips', required=False, readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([
('draft', 'Draft'),
('close', 'Close'),
], 'Status', select=True, readonly=True, copy=False),
'date_start': fields.date('Date From', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_end': fields.date('Date To', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'credit_note': fields.boolean('Credit Note', readonly=True, states={'draft': [('readonly', False)]}, help="If its checked, indicates that all payslips generated from here are refund payslips."),
}
_defaults = {
'state': 'draft',
'date_start': lambda *a: time.strftime('%Y-%m-01'),
'date_end': lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10],
}
def draft_payslip_run(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def close_payslip_run(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
class hr_payslip(osv.osv):
'''
Pay Slip
'''
_name = 'hr.payslip'
_description = 'Pay Slip'
def _get_lines_salary_rule_category(self, cr, uid, ids, field_names, arg=None, context=None):
result = {}
if not ids: return result
for id in ids:
result.setdefault(id, [])
cr.execute('''SELECT pl.slip_id, pl.id FROM hr_payslip_line AS pl \
LEFT JOIN hr_salary_rule_category AS sh on (pl.category_id = sh.id) \
WHERE pl.slip_id in %s \
GROUP BY pl.slip_id, pl.sequence, pl.id ORDER BY pl.sequence''',(tuple(ids),))
res = cr.fetchall()
for r in res:
result[r[0]].append(r[1])
return result
def _count_detail_payslip(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for details in self.browse(cr, uid, ids, context=context):
res[details.id] = len(details.line_ids)
return res
_columns = {
'struct_id': fields.many2one('hr.payroll.structure', 'Structure', readonly=True, states={'draft': [('readonly', False)]}, help='Defines the rules that have to be applied to this payslip, accordingly to the contract chosen. If you let empty the field contract, this field isn\'t mandatory anymore and thus the rules applied will be all the rules set on the structure of all contracts of the employee valid for the chosen period'),
'name': fields.char('Payslip Name', required=False, readonly=True, states={'draft': [('readonly', False)]}),
'number': fields.char('Reference', required=False, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_from': fields.date('Date From', readonly=True, states={'draft': [('readonly', False)]}, required=True),
'date_to': fields.date('Date To', readonly=True, states={'draft': [('readonly', False)]}, required=True),
'state': fields.selection([
('draft', 'Draft'),
('verify', 'Waiting'),
('done', 'Done'),
('cancel', 'Rejected'),
], 'Status', select=True, readonly=True, copy=False,
help='* When the payslip is created the status is \'Draft\'.\
\n* If the payslip is under verification, the status is \'Waiting\'. \
\n* If the payslip is confirmed then status is set to \'Done\'.\
\n* When user cancel payslip the status is \'Rejected\'.'),
'line_ids': one2many_mod2('hr.payslip.line', 'slip_id', 'Payslip Lines', readonly=True, states={'draft':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=False, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'worked_days_line_ids': fields.one2many('hr.payslip.worked_days', 'payslip_id', 'Payslip Worked Days', required=False, readonly=True, states={'draft': [('readonly', False)]}),
'input_line_ids': fields.one2many('hr.payslip.input', 'payslip_id', 'Payslip Inputs', required=False, readonly=True, states={'draft': [('readonly', False)]}),
'paid': fields.boolean('Made Payment Order ? ', required=False, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'note': fields.text('Internal Note', readonly=True, states={'draft':[('readonly',False)]}),
'contract_id': fields.many2one('hr.contract', 'Contract', required=False, readonly=True, states={'draft': [('readonly', False)]}),
'details_by_salary_rule_category': fields.function(_get_lines_salary_rule_category, method=True, type='one2many', relation='hr.payslip.line', string='Details by Salary Rule Category'),
'credit_note': fields.boolean('Credit Note', help="Indicates this payslip has a refund of another", readonly=True, states={'draft': [('readonly', False)]}),
'payslip_run_id': fields.many2one('hr.payslip.run', 'Payslip Batches', readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'payslip_count': fields.function(_count_detail_payslip, type='integer', string="Payslip Computation Details"),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'date_to': lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10],
'state': 'draft',
'credit_note': False,
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
}
def _check_dates(self, cr, uid, ids, context=None):
for payslip in self.browse(cr, uid, ids, context=context):
if payslip.date_from > payslip.date_to:
return False
return True
_constraints = [(_check_dates, "Payslip 'Date From' must be before 'Date To'.", ['date_from', 'date_to'])]
def cancel_sheet(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
def process_sheet(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'paid': True, 'state': 'done'}, context=context)
def hr_verify_sheet(self, cr, uid, ids, context=None):
self.compute_sheet(cr, uid, ids, context)
return self.write(cr, uid, ids, {'state': 'verify'}, context=context)
def refund_sheet(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
for payslip in self.browse(cr, uid, ids, context=context):
id_copy = self.copy(cr, uid, payslip.id, {'credit_note': True, 'name': _('Refund: ')+payslip.name}, context=context)
self.compute_sheet(cr, uid, [id_copy], context=context)
self.signal_workflow(cr, uid, [id_copy], 'hr_verify_sheet')
self.signal_workflow(cr, uid, [id_copy], 'process_sheet')
form_id = mod_obj.get_object_reference(cr, uid, 'hr_payroll', 'view_hr_payslip_form')
form_res = form_id and form_id[1] or False
tree_id = mod_obj.get_object_reference(cr, uid, 'hr_payroll', 'view_hr_payslip_tree')
tree_res = tree_id and tree_id[1] or False
return {
'name':_("Refund Payslip"),
'view_mode': 'tree, form',
'view_id': False,
'view_type': 'form',
'res_model': 'hr.payslip',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'domain': "[('id', 'in', %s)]" % [id_copy],
'views': [(tree_res, 'tree'), (form_res, 'form')],
'context': {}
}
def check_done(self, cr, uid, ids, context=None):
return True
def unlink(self, cr, uid, ids, context=None):
for payslip in self.browse(cr, uid, ids, context=context):
if payslip.state not in ['draft','cancel']:
raise osv.except_osv(_('Warning!'),_('You cannot delete a payslip which is not draft or cancelled!'))
return super(hr_payslip, self).unlink(cr, uid, ids, context)
#TODO move this function into hr_contract module, on hr.employee object
def get_contract(self, cr, uid, employee, date_from, date_to, context=None):
"""
@param employee: browse record of employee
@param date_from: date field
@param date_to: date field
@return: returns the ids of all the contracts for the given employee that need to be considered for the given dates
"""
contract_obj = self.pool.get('hr.contract')
clause = []
#a contract is valid if it ends between the given dates
clause_1 = ['&',('date_end', '<=', date_to),('date_end','>=', date_from)]
#OR if it starts between the given dates
clause_2 = ['&',('date_start', '<=', date_to),('date_start','>=', date_from)]
#OR if it starts before the date_from and finish after the date_end (or never finish)
clause_3 = [('date_start','<=', date_from),'|',('date_end', '=', False),('date_end','>=', date_to)]
clause_final = [('employee_id', '=', employee.id),'|','|'] + clause_1 + clause_2 + clause_3
contract_ids = contract_obj.search(cr, uid, clause_final, context=context)
return contract_ids
def compute_sheet(self, cr, uid, ids, context=None):
slip_line_pool = self.pool.get('hr.payslip.line')
sequence_obj = self.pool.get('ir.sequence')
for payslip in self.browse(cr, uid, ids, context=context):
number = payslip.number or sequence_obj.get(cr, uid, 'salary.slip')
#delete old payslip lines
old_slipline_ids = slip_line_pool.search(cr, uid, [('slip_id', '=', payslip.id)], context=context)
# old_slipline_ids
if old_slipline_ids:
slip_line_pool.unlink(cr, uid, old_slipline_ids, context=context)
if payslip.contract_id:
#set the list of contract for which the rules have to be applied
contract_ids = [payslip.contract_id.id]
else:
#if we don't give the contract, then the rules to apply should be for all current contracts of the employee
contract_ids = self.get_contract(cr, uid, payslip.employee_id, payslip.date_from, payslip.date_to, context=context)
lines = [(0,0,line) for line in self.pool.get('hr.payslip').get_payslip_lines(cr, uid, contract_ids, payslip.id, context=context)]
self.write(cr, uid, [payslip.id], {'line_ids': lines, 'number': number,}, context=context)
return True
def get_worked_day_lines(self, cr, uid, contract_ids, date_from, date_to, context=None):
"""
@param contract_ids: list of contract id
@return: returns a list of dict containing the input that should be applied for the given contract between date_from and date_to
"""
def was_on_leave(employee_id, datetime_day, context=None):
res = False
day = datetime_day.strftime("%Y-%m-%d")
holiday_ids = self.pool.get('hr.holidays').search(cr, uid, [('state','=','validate'),('employee_id','=',employee_id),('type','=','remove'),('date_from','<=',day),('date_to','>=',day)])
if holiday_ids:
res = self.pool.get('hr.holidays').browse(cr, uid, holiday_ids, context=context)[0].holiday_status_id.name
return res
res = []
for contract in self.pool.get('hr.contract').browse(cr, uid, contract_ids, context=context):
if not contract.working_hours:
#fill only if the contract as a working schedule linked
continue
attendances = {
'name': _("Normal Working Days paid at 100%"),
'sequence': 1,
'code': 'WORK100',
'number_of_days': 0.0,
'number_of_hours': 0.0,
'contract_id': contract.id,
}
leaves = {}
day_from = datetime.strptime(date_from,"%Y-%m-%d")
day_to = datetime.strptime(date_to,"%Y-%m-%d")
nb_of_days = (day_to - day_from).days + 1
for day in range(0, nb_of_days):
working_hours_on_day = self.pool.get('resource.calendar').working_hours_on_day(cr, uid, contract.working_hours, day_from + timedelta(days=day), context)
if working_hours_on_day:
#the employee had to work
leave_type = was_on_leave(contract.employee_id.id, day_from + timedelta(days=day), context=context)
if leave_type:
#if he was on leave, fill the leaves dict
if leave_type in leaves:
leaves[leave_type]['number_of_days'] += 1.0
leaves[leave_type]['number_of_hours'] += working_hours_on_day
else:
leaves[leave_type] = {
'name': leave_type,
'sequence': 5,
'code': leave_type,
'number_of_days': 1.0,
'number_of_hours': working_hours_on_day,
'contract_id': contract.id,
}
else:
#add the input vals to tmp (increment if existing)
attendances['number_of_days'] += 1.0
attendances['number_of_hours'] += working_hours_on_day
leaves = [value for key,value in leaves.items()]
res += [attendances] + leaves
return res
def get_inputs(self, cr, uid, contract_ids, date_from, date_to, context=None):
res = []
contract_obj = self.pool.get('hr.contract')
rule_obj = self.pool.get('hr.salary.rule')
structure_ids = contract_obj.get_all_structures(cr, uid, contract_ids, context=context)
rule_ids = self.pool.get('hr.payroll.structure').get_all_rules(cr, uid, structure_ids, context=context)
sorted_rule_ids = [id for id, sequence in sorted(rule_ids, key=lambda x:x[1])]
for contract in contract_obj.browse(cr, uid, contract_ids, context=context):
for rule in rule_obj.browse(cr, uid, sorted_rule_ids, context=context):
if rule.input_ids:
for input in rule.input_ids:
inputs = {
'name': input.name,
'code': input.code,
'contract_id': contract.id,
}
res += [inputs]
return res
def get_payslip_lines(self, cr, uid, contract_ids, payslip_id, context):
def _sum_salary_rule_category(localdict, category, amount):
if category.parent_id:
localdict = _sum_salary_rule_category(localdict, category.parent_id, amount)
localdict['categories'].dict[category.code] = category.code in localdict['categories'].dict and localdict['categories'].dict[category.code] + amount or amount
return localdict
class BrowsableObject(object):
def __init__(self, pool, cr, uid, employee_id, dict):
self.pool = pool
self.cr = cr
self.uid = uid
self.employee_id = employee_id
self.dict = dict
def __getattr__(self, attr):
return attr in self.dict and self.dict.__getitem__(attr) or 0.0
class InputLine(BrowsableObject):
"""a class that will be used into the python code, mainly for usability purposes"""
def sum(self, code, from_date, to_date=None):
if to_date is None:
to_date = datetime.now().strftime('%Y-%m-%d')
result = 0.0
self.cr.execute("SELECT sum(amount) as sum\
FROM hr_payslip as hp, hr_payslip_input as pi \
WHERE hp.employee_id = %s AND hp.state = 'done' \
AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s",
(self.employee_id, from_date, to_date, code))
res = self.cr.fetchone()[0]
return res or 0.0
class WorkedDays(BrowsableObject):
"""a class that will be used into the python code, mainly for usability purposes"""
def _sum(self, code, from_date, to_date=None):
if to_date is None:
to_date = datetime.now().strftime('%Y-%m-%d')
result = 0.0
self.cr.execute("SELECT sum(number_of_days) as number_of_days, sum(number_of_hours) as number_of_hours\
FROM hr_payslip as hp, hr_payslip_worked_days as pi \
WHERE hp.employee_id = %s AND hp.state = 'done'\
AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s",
(self.employee_id, from_date, to_date, code))
return self.cr.fetchone()
def sum(self, code, from_date, to_date=None):
res = self._sum(code, from_date, to_date)
return res and res[0] or 0.0
def sum_hours(self, code, from_date, to_date=None):
res = self._sum(code, from_date, to_date)
return res and res[1] or 0.0
class Payslips(BrowsableObject):
"""a class that will be used into the python code, mainly for usability purposes"""
def sum(self, code, from_date, to_date=None):
if to_date is None:
to_date = datetime.now().strftime('%Y-%m-%d')
self.cr.execute("SELECT sum(case when hp.credit_note = False then (pl.total) else (-pl.total) end)\
FROM hr_payslip as hp, hr_payslip_line as pl \
WHERE hp.employee_id = %s AND hp.state = 'done' \
AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pl.slip_id AND pl.code = %s",
(self.employee_id, from_date, to_date, code))
res = self.cr.fetchone()
return res and res[0] or 0.0
#we keep a dict with the result because a value can be overwritten by another rule with the same code
result_dict = {}
rules = {}
categories_dict = {}
blacklist = []
payslip_obj = self.pool.get('hr.payslip')
inputs_obj = self.pool.get('hr.payslip.worked_days')
obj_rule = self.pool.get('hr.salary.rule')
payslip = payslip_obj.browse(cr, uid, payslip_id, context=context)
worked_days = {}
for worked_days_line in payslip.worked_days_line_ids:
worked_days[worked_days_line.code] = worked_days_line
inputs = {}
for input_line in payslip.input_line_ids:
inputs[input_line.code] = input_line
categories_obj = BrowsableObject(self.pool, cr, uid, payslip.employee_id.id, categories_dict)
input_obj = InputLine(self.pool, cr, uid, payslip.employee_id.id, inputs)
worked_days_obj = WorkedDays(self.pool, cr, uid, payslip.employee_id.id, worked_days)
payslip_obj = Payslips(self.pool, cr, uid, payslip.employee_id.id, payslip)
rules_obj = BrowsableObject(self.pool, cr, uid, payslip.employee_id.id, rules)
baselocaldict = {'categories': categories_obj, 'rules': rules_obj, 'payslip': payslip_obj, 'worked_days': worked_days_obj, 'inputs': input_obj}
#get the ids of the structures on the contracts and their parent id as well
structure_ids = self.pool.get('hr.contract').get_all_structures(cr, uid, contract_ids, context=context)
#get the rules of the structure and thier children
rule_ids = self.pool.get('hr.payroll.structure').get_all_rules(cr, uid, structure_ids, context=context)
#run the rules by sequence
sorted_rule_ids = [id for id, sequence in sorted(rule_ids, key=lambda x:x[1])]
for contract in self.pool.get('hr.contract').browse(cr, uid, contract_ids, context=context):
employee = contract.employee_id
localdict = dict(baselocaldict, employee=employee, contract=contract)
for rule in obj_rule.browse(cr, uid, sorted_rule_ids, context=context):
key = rule.code + '-' + str(contract.id)
localdict['result'] = None
localdict['result_qty'] = 1.0
localdict['result_rate'] = 100
#check if the rule can be applied
if obj_rule.satisfy_condition(cr, uid, rule.id, localdict, context=context) and rule.id not in blacklist:
#compute the amount of the rule
amount, qty, rate = obj_rule.compute_rule(cr, uid, rule.id, localdict, context=context)
#check if there is already a rule computed with that code
previous_amount = rule.code in localdict and localdict[rule.code] or 0.0
#set/overwrite the amount computed for this rule in the localdict
tot_rule = amount * qty * rate / 100.0
localdict[rule.code] = tot_rule
rules[rule.code] = rule
#sum the amount for its salary category
localdict = _sum_salary_rule_category(localdict, rule.category_id, tot_rule - previous_amount)
#create/overwrite the rule in the temporary results
result_dict[key] = {
'salary_rule_id': rule.id,
'contract_id': contract.id,
'name': rule.name,
'code': rule.code,
'category_id': rule.category_id.id,
'sequence': rule.sequence,
'appears_on_payslip': rule.appears_on_payslip,
'condition_select': rule.condition_select,
'condition_python': rule.condition_python,
'condition_range': rule.condition_range,
'condition_range_min': rule.condition_range_min,
'condition_range_max': rule.condition_range_max,
'amount_select': rule.amount_select,
'amount_fix': rule.amount_fix,
'amount_python_compute': rule.amount_python_compute,
'amount_percentage': rule.amount_percentage,
'amount_percentage_base': rule.amount_percentage_base,
'register_id': rule.register_id.id,
'amount': amount,
'employee_id': contract.employee_id.id,
'quantity': qty,
'rate': rate,
}
else:
#blacklist this rule and its children
blacklist += [id for id, seq in self.pool.get('hr.salary.rule')._recursive_search_of_rules(cr, uid, [rule], context=context)]
result = [value for code, value in result_dict.items()]
return result
def onchange_employee_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
empolyee_obj = self.pool.get('hr.employee')
contract_obj = self.pool.get('hr.contract')
worked_days_obj = self.pool.get('hr.payslip.worked_days')
input_obj = self.pool.get('hr.payslip.input')
if context is None:
context = {}
#delete old worked days lines
old_worked_days_ids = ids and worked_days_obj.search(cr, uid, [('payslip_id', '=', ids[0])], context=context) or False
if old_worked_days_ids:
worked_days_obj.unlink(cr, uid, old_worked_days_ids, context=context)
#delete old input lines
old_input_ids = ids and input_obj.search(cr, uid, [('payslip_id', '=', ids[0])], context=context) or False
if old_input_ids:
input_obj.unlink(cr, uid, old_input_ids, context=context)
#defaults
res = {'value':{
'line_ids':[],
'input_line_ids': [],
'worked_days_line_ids': [],
#'details_by_salary_head':[], TODO put me back
'name':'',
'contract_id': False,
'struct_id': False,
}
}
if (not employee_id) or (not date_from) or (not date_to):
return res
ttyme = datetime.fromtimestamp(time.mktime(time.strptime(date_from, "%Y-%m-%d")))
employee_id = empolyee_obj.browse(cr, uid, employee_id, context=context)
res['value'].update({
'name': _('Salary Slip of %s for %s') % (employee_id.name, tools.ustr(ttyme.strftime('%B-%Y'))),
'company_id': employee_id.company_id.id
})
if not context.get('contract', False):
#fill with the first contract of the employee
contract_ids = self.get_contract(cr, uid, employee_id, date_from, date_to, context=context)
else:
if contract_id:
#set the list of contract for which the input have to be filled
contract_ids = [contract_id]
else:
#if we don't give the contract, then the input to fill should be for all current contracts of the employee
contract_ids = self.get_contract(cr, uid, employee_id, date_from, date_to, context=context)
if not contract_ids:
return res
contract_record = contract_obj.browse(cr, uid, contract_ids[0], context=context)
res['value'].update({
'contract_id': contract_record and contract_record.id or False
})
struct_record = contract_record and contract_record.struct_id or False
if not struct_record:
return res
res['value'].update({
'struct_id': struct_record.id,
})
#computation of the salary input
worked_days_line_ids = self.get_worked_day_lines(cr, uid, contract_ids, date_from, date_to, context=context)
input_line_ids = self.get_inputs(cr, uid, contract_ids, date_from, date_to, context=context)
res['value'].update({
'worked_days_line_ids': worked_days_line_ids,
'input_line_ids': input_line_ids,
})
return res
def onchange_contract_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
#TODO it seems to be the mess in the onchanges, we should have onchange_employee => onchange_contract => doing all the things
res = {'value':{
'line_ids': [],
'name': '',
}
}
context = dict(context or {}, contract=True)
if not contract_id:
res['value'].update({'struct_id': False})
return self.onchange_employee_id(cr, uid, ids, date_from=date_from, date_to=date_to, employee_id=employee_id, contract_id=contract_id, context=context)
class hr_payslip_worked_days(osv.osv):
'''
Payslip Worked Days
'''
_name = 'hr.payslip.worked_days'
_description = 'Payslip Worked Days'
_columns = {
'name': fields.char('Description', required=True),
'payslip_id': fields.many2one('hr.payslip', 'Pay Slip', required=True, ondelete='cascade', select=True),
'sequence': fields.integer('Sequence', required=True, select=True),
'code': fields.char('Code', size=52, required=True, help="The code that can be used in the salary rules"),
'number_of_days': fields.float('Number of Days'),
'number_of_hours': fields.float('Number of Hours'),
'contract_id': fields.many2one('hr.contract', 'Contract', required=True, help="The contract for which applied this input"),
}
_order = 'payslip_id, sequence'
_defaults = {
'sequence': 10,
}
class hr_payslip_input(osv.osv):
'''
Payslip Input
'''
_name = 'hr.payslip.input'
_description = 'Payslip Input'
_columns = {
'name': fields.char('Description', required=True),
'payslip_id': fields.many2one('hr.payslip', 'Pay Slip', required=True, ondelete='cascade', select=True),
'sequence': fields.integer('Sequence', required=True, select=True),
'code': fields.char('Code', size=52, required=True, help="The code that can be used in the salary rules"),
'amount': fields.float('Amount', help="It is used in computation. For e.g. A rule for sales having 1% commission of basic salary for per product can defined in expression like result = inputs.SALEURO.amount * contract.wage*0.01."),
'contract_id': fields.many2one('hr.contract', 'Contract', required=True, help="The contract for which applied this input"),
}
_order = 'payslip_id, sequence'
_defaults = {
'sequence': 10,
'amount': 0.0,
}
class hr_salary_rule(osv.osv):
_name = 'hr.salary.rule'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'code':fields.char('Code', size=64, required=True, help="The code of salary rules can be used as reference in computation of other rules. In that case, it is case sensitive."),
'sequence': fields.integer('Sequence', required=True, help='Use to arrange calculation sequence', select=True),
'quantity': fields.char('Quantity', help="It is used in computation for percentage and fixed amount.For e.g. A rule for Meal Voucher having fixed amount of 1€ per worked day can have its quantity defined in expression like worked_days.WORK100.number_of_days."),
'category_id':fields.many2one('hr.salary.rule.category', 'Category', required=True),
'active':fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the salary rule without removing it."),
'appears_on_payslip': fields.boolean('Appears on Payslip', help="Used to display the salary rule on payslip."),
'parent_rule_id':fields.many2one('hr.salary.rule', 'Parent Salary Rule', select=True),
'company_id':fields.many2one('res.company', 'Company', required=False),
'condition_select': fields.selection([('none', 'Always True'),('range', 'Range'), ('python', 'Python Expression')], "Condition Based on", required=True),
'condition_range':fields.char('Range Based on', readonly=False, help='This will be used to compute the % fields values; in general it is on basic, but you can also use categories code fields in lowercase as a variable names (hra, ma, lta, etc.) and the variable basic.'),
'condition_python':fields.text('Python Condition', required=True, readonly=False, help='Applied this rule for calculation if condition is true. You can specify condition like basic > 1000.'),
'condition_range_min': fields.float('Minimum Range', required=False, help="The minimum amount, applied for this rule."),
'condition_range_max': fields.float('Maximum Range', required=False, help="The maximum amount, applied for this rule."),
'amount_select':fields.selection([
('percentage','Percentage (%)'),
('fix','Fixed Amount'),
('code','Python Code'),
],'Amount Type', select=True, required=True, help="The computation method for the rule amount."),
'amount_fix': fields.float('Fixed Amount', digits_compute=dp.get_precision('Payroll'),),
'amount_percentage': fields.float('Percentage (%)', digits_compute=dp.get_precision('Payroll Rate'), help='For example, enter 50.0 to apply a percentage of 50%'),
'amount_python_compute':fields.text('Python Code'),
'amount_percentage_base': fields.char('Percentage based on', required=False, readonly=False, help='result will be affected to a variable'),
'child_ids':fields.one2many('hr.salary.rule', 'parent_rule_id', 'Child Salary Rule', copy=True),
'register_id':fields.many2one('hr.contribution.register', 'Contribution Register', help="Eventual third party involved in the salary payment of the employees."),
'input_ids': fields.one2many('hr.rule.input', 'input_id', 'Inputs', copy=True),
'note':fields.text('Description'),
}
_defaults = {
'amount_python_compute': '''
# Available variables:
#----------------------
# payslip: object containing the payslips
# employee: hr.employee object
# contract: hr.contract object
# rules: object containing the rules code (previously computed)
# categories: object containing the computed salary rule categories (sum of amount of all rules belonging to that category).
# worked_days: object containing the computed worked days.
# inputs: object containing the computed inputs.
# Note: returned value have to be set in the variable 'result'
result = contract.wage * 0.10''',
'condition_python':
'''
# Available variables:
#----------------------
# payslip: object containing the payslips
# employee: hr.employee object
# contract: hr.contract object
# rules: object containing the rules code (previously computed)
# categories: object containing the computed salary rule categories (sum of amount of all rules belonging to that category).
# worked_days: object containing the computed worked days
# inputs: object containing the computed inputs
# Note: returned value have to be set in the variable 'result'
result = rules.NET > categories.NET * 0.10''',
'condition_range': 'contract.wage',
'sequence': 5,
'appears_on_payslip': True,
'active': True,
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
'condition_select': 'none',
'amount_select': 'fix',
'amount_fix': 0.0,
'amount_percentage': 0.0,
'quantity': '1.0',
}
@api.cr_uid_ids_context
def _recursive_search_of_rules(self, cr, uid, rule_ids, context=None):
"""
@param rule_ids: list of browse record
@return: returns a list of tuple (id, sequence) which are all the children of the passed rule_ids
"""
children_rules = []
for rule in rule_ids:
if rule.child_ids:
children_rules += self._recursive_search_of_rules(cr, uid, rule.child_ids, context=context)
return [(r.id, r.sequence) for r in rule_ids] + children_rules
#TODO should add some checks on the type of result (should be float)
def compute_rule(self, cr, uid, rule_id, localdict, context=None):
"""
:param rule_id: id of rule to compute
:param localdict: dictionary containing the environement in which to compute the rule
:return: returns a tuple build as the base/amount computed, the quantity and the rate
:rtype: (float, float, float)
"""
rule = self.browse(cr, uid, rule_id, context=context)
if rule.amount_select == 'fix':
try:
return rule.amount_fix, eval(rule.quantity, localdict), 100.0
except:
raise osv.except_osv(_('Error!'), _('Wrong quantity defined for salary rule %s (%s).')% (rule.name, rule.code))
elif rule.amount_select == 'percentage':
try:
return eval(rule.amount_percentage_base, localdict), eval(rule.quantity, localdict), rule.amount_percentage
except:
raise osv.except_osv(_('Error!'), _('Wrong percentage base or quantity defined for salary rule %s (%s).')% (rule.name, rule.code))
else:
try:
eval(rule.amount_python_compute, localdict, mode='exec', nocopy=True)
return localdict['result'], 'result_qty' in localdict and localdict['result_qty'] or 1.0, 'result_rate' in localdict and localdict['result_rate'] or 100.0
except:
raise osv.except_osv(_('Error!'), _('Wrong python code defined for salary rule %s (%s).')% (rule.name, rule.code))
def satisfy_condition(self, cr, uid, rule_id, localdict, context=None):
"""
@param rule_id: id of hr.salary.rule to be tested
@param contract_id: id of hr.contract to be tested
@return: returns True if the given rule match the condition for the given contract. Return False otherwise.
"""
rule = self.browse(cr, uid, rule_id, context=context)
if rule.condition_select == 'none':
return True
elif rule.condition_select == 'range':
try:
result = eval(rule.condition_range, localdict)
return rule.condition_range_min <= result and result <= rule.condition_range_max or False
except:
raise osv.except_osv(_('Error!'), _('Wrong range condition defined for salary rule %s (%s).')% (rule.name, rule.code))
else: #python code
try:
eval(rule.condition_python, localdict, mode='exec', nocopy=True)
return 'result' in localdict and localdict['result'] or False
except:
raise osv.except_osv(_('Error!'), _('Wrong python condition defined for salary rule %s (%s).')% (rule.name, rule.code))
class hr_rule_input(osv.osv):
'''
Salary Rule Input
'''
_name = 'hr.rule.input'
_description = 'Salary Rule Input'
_columns = {
'name': fields.char('Description', required=True),
'code': fields.char('Code', size=52, required=True, help="The code that can be used in the salary rules"),
'input_id': fields.many2one('hr.salary.rule', 'Salary Rule Input', required=True)
}
class hr_payslip_line(osv.osv):
'''
Payslip Line
'''
_name = 'hr.payslip.line'
_inherit = 'hr.salary.rule'
_description = 'Payslip Line'
_order = 'contract_id, sequence'
def _calculate_total(self, cr, uid, ids, name, args, context):
if not ids: return {}
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = float(line.quantity) * line.amount * line.rate / 100
return res
_columns = {
'slip_id':fields.many2one('hr.payslip', 'Pay Slip', required=True, ondelete='cascade'),
'salary_rule_id':fields.many2one('hr.salary.rule', 'Rule', required=True),
'employee_id':fields.many2one('hr.employee', 'Employee', required=True),
'contract_id':fields.many2one('hr.contract', 'Contract', required=True, select=True),
'rate': fields.float('Rate (%)', digits_compute=dp.get_precision('Payroll Rate')),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Payroll')),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Payroll')),
'total': fields.function(_calculate_total, method=True, type='float', string='Total', digits_compute=dp.get_precision('Payroll'),store=True ),
}
_defaults = {
'quantity': 1.0,
'rate': 100.0,
}
class hr_employee(osv.osv):
'''
Employee
'''
_inherit = 'hr.employee'
_description = 'Employee'
def _calculate_total_wage(self, cr, uid, ids, name, args, context):
if not ids: return {}
res = {}
current_date = datetime.now().strftime('%Y-%m-%d')
for employee in self.browse(cr, uid, ids, context=context):
if not employee.contract_ids:
res[employee.id] = {'basic': 0.0}
continue
cr.execute( 'SELECT SUM(wage) '\
'FROM hr_contract '\
'WHERE employee_id = %s '\
'AND date_start <= %s '\
'AND (date_end > %s OR date_end is NULL)',
(employee.id, current_date, current_date))
result = dict(cr.dictfetchone())
res[employee.id] = {'basic': result['sum']}
return res
def _payslip_count(self, cr, uid, ids, field_name, arg, context=None):
Payslip = self.pool['hr.payslip']
return {
employee_id: Payslip.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'slip_ids':fields.one2many('hr.payslip', 'employee_id', 'Payslips', required=False, readonly=True),
'total_wage': fields.function(_calculate_total_wage, method=True, type='float', string='Total Basic Salary', digits_compute=dp.get_precision('Payroll'), help="Sum of all current contract's wage of employee."),
'payslip_count': fields.function(_payslip_count, type='integer', string='Payslips'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
idea4bsd/idea4bsd | python/lib/Lib/site-packages/django/db/backends/postgresql/introspection.py | 308 | 3725 | from django.db.backends import BaseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'IPAddressField',
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return cursor.description
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
try:
# row[0] and row[1] are like "{2}", so strip the curly braces.
relations[int(row[0][1:-1]) - 1] = (int(row[1][1:-1]) - 1, row[2])
except ValueError:
continue
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute("""
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
return indexes
| apache-2.0 |
bioh4x/NeatFreq | lib/genometools-1.4.1/gtpython/gt/extended/feature_node.py | 1 | 8909 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2009 Sascha Steinbiss <steinbiss@zbh.uni-hamburg.de>
# Copyright (c) 2008-2009 Center for Bioinformatics, University of Hamburg
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from gt.dlload import gtlib, CollectFunc
from gt.core.error import Error, gterror
from gt.core.gtstr import Str
from gt.core.str_array import StrArray
from gt.extended.genome_node import GenomeNode
from gt.props import cachedproperty
class FeatureNode(GenomeNode):
def __init__(self):
self.depth_first = True
pass
@classmethod
def create_new(cls, seqid, type, start, end, strand):
from gt.extended.strand import strandchars
if not strand in strandchars:
gterror("Invalid strand '%s' -- must be one of %s" % (strand,
strandchars))
s = Str(str(seqid.encode("utf-8")))
fn = gtlib.gt_feature_node_new(s, type, start, end, \
strandchars.index(strand))
n = cls.create_from_ptr(fn, True)
n.depth_first = True
return n
def update_attrs(self):
attribs = {}
def py_collect_func(tag, val, data):
attribs[tag] = val
collect_func = CollectFunc(py_collect_func)
gtlib.gt_feature_node_foreach_attribute(self.gn, collect_func,
None)
return attribs
def add_child(self, node):
ownid = str(self.get_seqid())
newid = str(node.get_seqid())
if (ownid != newid):
gterror("cannot add node with sequence region '%s' to node with sequence region '%s'" % (ownid, newid))
else:
gtlib.gt_feature_node_add_child(self.gn, node)
def from_param(cls, obj):
if not isinstance(obj, FeatureNode):
raise TypeError, "argument must be a FeatureNode"
return obj._as_parameter_
from_param = classmethod(from_param)
def get_source(self):
return gtlib.gt_feature_node_get_source(self.gn)
def set_source(self, source):
s = Str(str(source.encode("utf-8")))
gtlib.gt_feature_node_set_source(self.gn, s)
source = cachedproperty(get_source, set_source)
def get_type(self):
return gtlib.gt_feature_node_get_type(self.gn)
def set_type(self, type):
gtlib.gt_feature_node_set_type(self.gn, type)
type = cachedproperty(get_type, set_type)
def has_type(self, type):
return gtlib.gt_feature_node_has_type(self.gn, type) == 1
def set_strand(self, strand):
from gt.extended.strand import strandchars
if not strand in strandchars:
gterror("Invalid strand '%s' -- must be one of %s" % (strand,
strandchars))
gtlib.gt_feature_node_set_strand(self.gn, strandchars.index(strand))
def get_strand(self):
from gt.extended.strand import strandchars
return strandchars[gtlib.gt_feature_node_get_strand(self.gn)]
strand = cachedproperty(get_strand, set_strand)
def get_phase(self):
return gtlib.gt_feature_node_get_phase(self.gn)
def set_phase(self, phase):
return gtlib.gt_feature_node_set_phase(self.gn, phase)
phase = cachedproperty(get_phase, set_phase)
def score_is_defined(self):
return gtlib.gt_feature_node_score_is_defined(self.gn) == 1
def get_score(self):
if gtlib.gt_feature_node_score_is_defined(self.gn) == 1:
return gtlib.gt_feature_node_get_score(self.gn)
else:
return None
def set_score(self, score):
gtlib.gt_feature_node_set_score(self.gn, score)
def unset_score(self):
gtlib.gt_feature_node_unset_score(self.gn)
score = cachedproperty(get_score, set_score, unset_score)
def get_attribute(self, attrib):
return gtlib.gt_feature_node_get_attribute(self.gn, attrib)
def add_attribute(self, attrib, value):
if attrib == "" or value == "":
gterror("attribute keys or values must not be empty!")
gtlib.gt_feature_node_add_attribute(self.gn, attrib, value)
def each_attribute(self):
attribs = self.update_attrs()
for (tag, val) in attribs.iteritems():
yield (tag, val)
def get_attribs(self):
return dict(self.each_attribute())
attribs = property(get_attribs)
def register(cls, gtlib):
from ctypes import c_char_p, c_float, c_int, c_int, c_void_p, \
c_ulong, c_float
gtlib.gt_feature_node_new.restype = c_void_p
gtlib.gt_feature_node_new.argtypes = [Str, c_char_p, c_ulong,
c_ulong, c_int]
gtlib.gt_feature_node_add_child.argtypes = [c_void_p,
FeatureNode]
gtlib.gt_feature_node_set_source.argtypes = [c_void_p, Str]
gtlib.gt_feature_node_get_source.restype = c_char_p
gtlib.gt_feature_node_get_source.argtypes = [c_void_p]
gtlib.gt_feature_node_get_type.restype = c_char_p
gtlib.gt_feature_node_get_type.argtypes = [c_void_p]
gtlib.gt_feature_node_has_type.restype = c_int
gtlib.gt_feature_node_has_type.argtypes = [c_void_p, c_char_p]
gtlib.gt_feature_node_get_score.restype = c_float
gtlib.gt_feature_node_get_score.argtypes = [c_void_p]
gtlib.gt_feature_node_set_score.argtypes = [c_void_p, c_float]
gtlib.gt_feature_node_get_phase.restype = c_int
gtlib.gt_feature_node_get_phase.argtypes = [c_void_p]
gtlib.gt_feature_node_set_phase.argtypes = [c_void_p, c_int]
gtlib.gt_feature_node_score_is_defined.restype = c_int
gtlib.gt_feature_node_score_is_defined.argtypes = [c_void_p]
gtlib.gt_feature_node_get_strand.restype = c_int
gtlib.gt_feature_node_get_strand.argtypes = [c_void_p]
gtlib.gt_feature_node_set_strand.argtypes = [c_void_p, c_int]
gtlib.gt_feature_node_unset_score.argtypes = [c_void_p]
gtlib.gt_feature_node_add_attribute.argtypes = [c_void_p,
c_char_p, c_char_p]
gtlib.gt_feature_node_get_attribute.restype = c_char_p
gtlib.gt_feature_node_get_attribute.argtypes = [c_void_p,
c_char_p]
register = classmethod(register)
def traverse(self, it):
f = it.next()
while f is not None:
yield f
f = it.next()
def __iter__(self):
if self.depth_first:
it = FeatureNodeIteratorDepthFirst(self)
else:
it = FeatureNodeIteratorDirect(self)
return self.traverse(it)
def __call__(self, method=None):
if str(method).lower() == 'direct':
it = FeatureNodeIteratorDirect(self)
else:
it = FeatureNodeIteratorDepthFirst(self)
return self.traverse(it)
def traverse_dfs(self):
it = FeatureNodeIteratorDepthFirst(self)
return self.traverse(it)
def traverse_direct(self):
it = FeatureNodeIteratorDirect(self)
return self.traverse(it)
class FeatureNodeIterator(object):
def next(self):
ret = gtlib.gt_feature_node_iterator_next(self.i)
if ret != None:
return FeatureNode.create_from_ptr(ret)
return ret
def __del__(self):
try:
gtlib.gt_feature_node_iterator_delete(self.i)
except AttributeError:
pass
def register(cls, gtlib):
from ctypes import c_void_p
gtlib.gt_feature_node_iterator_new.restype = c_void_p
gtlib.gt_feature_node_iterator_new.argtypes = [FeatureNode]
gtlib.gt_feature_node_iterator_new_direct.restype = c_void_p
gtlib.gt_feature_node_iterator_new_direct.argtypes = [FeatureNode]
gtlib.gt_feature_node_iterator_next.restype = c_void_p
register = classmethod(register)
class FeatureNodeIteratorDepthFirst(FeatureNodeIterator):
"""
includes the node itself
"""
def __init__(self, node):
self.i = gtlib.gt_feature_node_iterator_new(node)
self._as_parameter_ = self.i
class FeatureNodeIteratorDirect(FeatureNodeIterator):
def __init__(self, node):
self.i = gtlib.gt_feature_node_iterator_new_direct(node)
self._as_parameter_ = self.i
| gpl-2.0 |
fidomason/kbengine | kbe/res/scripts/common/Lib/distutils/tests/test_spawn.py | 146 | 1857 | """Tests for distutils.spawn."""
import unittest
import os
import time
from test.support import captured_stdout, run_unittest
from distutils.spawn import _nt_quote_args
from distutils.spawn import spawn, find_executable
from distutils.errors import DistutilsExecError
from distutils.tests import support
class SpawnTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_nt_quote_args(self):
for (args, wanted) in ((['with space', 'nospace'],
['"with space"', 'nospace']),
(['nochange', 'nospace'],
['nochange', 'nospace'])):
res = _nt_quote_args(args)
self.assertEqual(res, wanted)
@unittest.skipUnless(os.name in ('nt', 'posix'),
'Runs only under posix or nt')
def test_spawn(self):
tmpdir = self.mkdtemp()
# creating something executable
# through the shell that returns 1
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 1')
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 1')
os.chmod(exe, 0o777)
self.assertRaises(DistutilsExecError, spawn, [exe])
# now something that works
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 0')
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 0')
os.chmod(exe, 0o777)
spawn([exe]) # should work without any error
def test_suite():
return unittest.makeSuite(SpawnTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| lgpl-3.0 |
NMGRL/pychron | pychron/experiment/conditional/experiment_queue_action.py | 3 | 2235 | # # ===============================================================================
# # Copyright 2013 Jake Ross
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ===============================================================================
#
# # ============= enthought library imports =======================
# from traits.api import Str, Int
# # ============= standard library imports ========================
# # ============= local library imports ==========================
# from pychron.experiment.conditional.conditional import BaseConditional
#
#
# class ExperimentQueueAction(BaseConditional):
# analysis_type = Str
# action = Str
#
# nrepeat = Int(1)
# count = Int(0)
# def __init__(self, aparams, *args, **kw):
# super(ExperimentQueueAction, self).__init__(*args, **kw)
# self._parse(aparams)
#
# def _parse(self, params):
# params = eval(params)
# n = len(params)
# nr = 1
# if n == 5:
# at, a, c, ac, nr = params
# elif n == 4:
# at, a, c, ac = params
#
# self.analysis_type = at
# self.attr = a
# self.comp = c
# self.action = ac
# self.nrepeat = int(nr)
#
# def to_string(self):
# return '{}{}{}'.format(self.attr, self.comp)
#
# def _should_check(self, run, data, cnt):
# if run.spec.analysis_type==self.analysis_type:
# return hasattr(run, self.attr)
#
# def _check(self, run, data):
# run_value = getattr(run, self.attr)
# comp = self.comparator
# cmd = '{}{}'.format(run_value, comp)
# return eval(cmd)
#
# # ============= EOF =============================================
| apache-2.0 |
ESOedX/edx-platform | lms/djangoapps/discussion/rest_api/tests/test_api.py | 1 | 128499 | """
Tests for Discussion API internal interface
"""
from __future__ import absolute_import
import itertools
from datetime import datetime, timedelta
import ddt
import httpretty
import mock
import six
from django.core.exceptions import ValidationError
from django.test.client import RequestFactory
from opaque_keys.edx.locator import CourseLocator
from pytz import UTC
from rest_framework.exceptions import PermissionDenied
from six.moves import range
from six.moves.urllib.parse import parse_qs, urlencode, urlparse, urlunparse # pylint: disable=import-error
from common.test.utils import MockSignalHandlerMixin, disable_signal
from lms.djangoapps.courseware.tests.factories import BetaTesterFactory, StaffFactory
from lms.djangoapps.discussion.django_comment_client.tests.utils import ForumsEnableMixin
from lms.djangoapps.discussion.rest_api import api
from lms.djangoapps.discussion.rest_api.api import (
create_comment,
create_thread,
delete_comment,
delete_thread,
get_comment_list,
get_course,
get_course_topics,
get_thread,
get_thread_list,
update_comment,
update_thread
)
from lms.djangoapps.discussion.rest_api.exceptions import (
CommentNotFoundError,
DiscussionDisabledError,
ThreadNotFoundError
)
from lms.djangoapps.discussion.rest_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_comment,
make_minimal_cs_thread,
make_paginated_api_response
)
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role
)
from openedx.core.lib.exceptions import CourseNotFoundError, PageNotFoundError
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
def _remove_discussion_tab(course, user_id):
"""
Remove the discussion tab for the course.
user_id is passed to the modulestore as the editor of the xblock.
"""
course.tabs = [tab for tab in course.tabs if not tab.type == 'discussion']
modulestore().update_item(course, user_id)
def _discussion_disabled_course_for(user):
"""
Create and return a course with discussions disabled.
The user passed in will be enrolled in the course.
"""
course_with_disabled_forums = CourseFactory.create()
CourseEnrollmentFactory.create(user=user, course_id=course_with_disabled_forums.id)
_remove_discussion_tab(course_with_disabled_forums, user.id)
return course_with_disabled_forums
def _create_course_and_cohort_with_user_role(course_is_cohorted, user, role_name):
"""
Creates a course with the value of `course_is_cohorted`, plus `always_cohort_inline_discussions`
set to True (which is no longer the default value). Then 1) enrolls the user in that course,
2) creates a cohort that the user is placed in, and 3) adds the user to the given role.
Returns: a tuple of the created course and the created cohort
"""
cohort_course = CourseFactory.create(
cohort_config={"cohorted": course_is_cohorted, "always_cohort_inline_discussions": True}
)
CourseEnrollmentFactory.create(user=user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [user]
return [cohort_course, cohort]
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTest(ForumsEnableMixin, UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_course"""
@classmethod
def setUpClass(cls):
super(GetCourseTest, cls).setUpClass()
cls.course = CourseFactory.create(org="x", course="y", run="z")
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTest, self).setUp()
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_course(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(CourseNotFoundError):
get_course(self.request, self.course.id)
def test_discussions_disabled(self):
with self.assertRaises(DiscussionDisabledError):
get_course(self.request, _discussion_disabled_course_for(self.user).id)
def test_basic(self):
self.assertEqual(
get_course(self.request, self.course.id),
{
"id": six.text_type(self.course.id),
"blackouts": [],
"thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz",
"following_thread_list_url": (
"http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&following=True"
),
"topics_url": "http://testserver/api/discussion/v1/course_topics/x/y/z",
}
)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTestBlackouts(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Tests of get_course for courses that have blackout dates.
"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTestBlackouts, self).setUp()
self.course = CourseFactory.create(org="x", course="y", run="z")
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_blackout(self):
# A variety of formats is accepted
self.course.discussion_blackouts = [
["2015-06-09T00:00:00Z", "6-10-15"],
[1433980800000, datetime(2015, 6, 12)],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(
result["blackouts"],
[
{"start": "2015-06-09T00:00:00+00:00", "end": "2015-06-10T00:00:00+00:00"},
{"start": "2015-06-11T00:00:00+00:00", "end": "2015-06-12T00:00:00+00:00"},
]
)
@ddt.data(None, "not a datetime", "2015", [])
def test_blackout_errors(self, bad_value):
self.course.discussion_blackouts = [
[bad_value, "2015-06-09T00:00:00Z"],
["2015-06-10T00:00:00Z", "2015-06-11T00:00:00Z"],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(result["blackouts"], [])
@mock.patch.dict("django.conf.settings.FEATURES", {"DISABLE_START_DATES": False})
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTopicsTest(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""Test for get_course_topics"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTopicsTest, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.partition = UserPartition(
0,
"partition",
"Test Partition",
[Group(0, "Cohort A"), Group(1, "Cohort B")],
scheme_id="cohort"
)
self.course = CourseFactory.create(
org="x",
course="y",
run="z",
start=datetime.now(UTC),
discussion_topics={"Test Topic": {"id": "non-courseware-topic-id"}},
user_partitions=[self.partition],
cohort_config={"cohorted": True},
days_early_for_beta=3
)
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def make_discussion_xblock(self, topic_id, category, subcategory, **kwargs):
"""
Build a discussion xblock in self.course.
"""
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_id,
discussion_category=category,
discussion_target=subcategory,
**kwargs
)
def get_thread_list_url(self, topic_id_list):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = "http://testserver/api/discussion/v1/threads/"
topic_ids_to_query = [("topic_id", topic_id) for topic_id in topic_id_list]
query_list = [("course_id", six.text_type(self.course.id))] + topic_ids_to_query
return urlunparse(("", "", path, "", urlencode(query_list), ""))
def get_course_topics(self):
"""
Get course topics for self.course, using the given user or self.user if
not provided, and generating absolute URIs with a test scheme/host.
"""
return get_course_topics(self.request, self.course.id)
def make_expected_tree(self, topic_id, name, children=None):
"""
Build an expected result tree given a topic id, display name, and
children
"""
topic_id_list = [topic_id] if topic_id else [child["id"] for child in children]
children = children or []
node = {
"id": topic_id,
"name": name,
"children": children,
"thread_list_url": self.get_thread_list_url(topic_id_list)
}
return node
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_course_topics(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(CourseNotFoundError):
self.get_course_topics()
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(DiscussionDisabledError):
self.get_course_topics()
def test_without_courseware(self):
actual = self.get_course_topics()
expected = {
"courseware_topics": [],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_with_courseware(self):
self.make_discussion_xblock("courseware-topic-id", "Foo", "Bar")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"Foo",
[self.make_expected_tree("courseware-topic-id", "Bar")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_many(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"A": {"id": "non-courseware-1"},
"B": {"id": "non-courseware-2"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_xblock("courseware-1", "A", "1")
self.make_discussion_xblock("courseware-2", "A", "2")
self.make_discussion_xblock("courseware-3", "B", "1")
self.make_discussion_xblock("courseware-4", "B", "2")
self.make_discussion_xblock("courseware-5", "C", "1")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"A",
[
self.make_expected_tree("courseware-1", "1"),
self.make_expected_tree("courseware-2", "2"),
]
),
self.make_expected_tree(
None,
"B",
[
self.make_expected_tree("courseware-3", "1"),
self.make_expected_tree("courseware-4", "2"),
]
),
self.make_expected_tree(
None,
"C",
[self.make_expected_tree("courseware-5", "1")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-1", "A"),
self.make_expected_tree("non-courseware-2", "B"),
],
}
self.assertEqual(actual, expected)
def test_sort_key(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"W": {"id": "non-courseware-1", "sort_key": "Z"},
"X": {"id": "non-courseware-2"},
"Y": {"id": "non-courseware-3", "sort_key": "Y"},
"Z": {"id": "non-courseware-4", "sort_key": "W"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_xblock("courseware-1", "First", "A", sort_key="D")
self.make_discussion_xblock("courseware-2", "First", "B", sort_key="B")
self.make_discussion_xblock("courseware-3", "First", "C", sort_key="E")
self.make_discussion_xblock("courseware-4", "Second", "A", sort_key="F")
self.make_discussion_xblock("courseware-5", "Second", "B", sort_key="G")
self.make_discussion_xblock("courseware-6", "Second", "C")
self.make_discussion_xblock("courseware-7", "Second", "D", sort_key="A")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "B"),
self.make_expected_tree("courseware-1", "A"),
self.make_expected_tree("courseware-3", "C"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-7", "D"),
self.make_expected_tree("courseware-6", "C"),
self.make_expected_tree("courseware-4", "A"),
self.make_expected_tree("courseware-5", "B"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-4", "Z"),
self.make_expected_tree("non-courseware-2", "X"),
self.make_expected_tree("non-courseware-3", "Y"),
self.make_expected_tree("non-courseware-1", "W"),
],
}
self.assertEqual(actual, expected)
def test_access_control(self):
"""
Test that only topics that a user has access to are returned. The
ways in which a user may not have access are:
* Module is visible to staff only
* Module has a start date in the future
* Module is accessible only to a group the user is not in
Also, there is a case that ensures that a category with no accessible
subcategories does not appear in the result.
"""
beta_tester = BetaTesterFactory.create(course_key=self.course.id)
CourseEnrollmentFactory.create(user=beta_tester, course_id=self.course.id)
staff = StaffFactory.create(course_key=self.course.id)
for user, group_idx in [(self.user, 0), (beta_tester, 1)]:
cohort = CohortFactory.create(
course_id=self.course.id,
name=self.partition.groups[group_idx].name,
users=[user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=cohort,
partition_id=self.partition.id,
group_id=self.partition.groups[group_idx].id
)
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.make_discussion_xblock("courseware-1", "First", "Everybody")
self.make_discussion_xblock(
"courseware-2",
"First",
"Cohort A",
group_access={self.partition.id: [self.partition.groups[0].id]}
)
self.make_discussion_xblock(
"courseware-3",
"First",
"Cohort B",
group_access={self.partition.id: [self.partition.groups[1].id]}
)
self.make_discussion_xblock("courseware-4", "Second", "Staff Only", visible_to_staff_only=True)
self.make_discussion_xblock(
"courseware-5",
"Second",
"Future Start Date",
start=datetime.now(UTC) + timedelta(days=1)
)
student_actual = self.get_course_topics()
student_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(student_actual, student_expected)
self.request.user = beta_tester
beta_actual = self.get_course_topics()
beta_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[self.make_expected_tree("courseware-5", "Future Start Date")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(beta_actual, beta_expected)
self.request.user = staff
staff_actual = self.get_course_topics()
staff_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-5", "Future Start Date"),
self.make_expected_tree("courseware-4", "Staff Only"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(staff_actual, staff_expected)
def test_discussion_topic(self):
"""
Tests discussion topic details against a requested topic id
"""
topic_id_1 = "topic_id_1"
topic_id_2 = "topic_id_2"
self.make_discussion_xblock(topic_id_1, "test_category_1", "test_target_1")
self.make_discussion_xblock(topic_id_2, "test_category_2", "test_target_2")
actual = get_course_topics(self.request, self.course.id, {"topic_id_1", "topic_id_2"})
self.assertEqual(
actual,
{
"non_courseware_topics": [],
"courseware_topics": [
{
"children": [{
"children": [],
"id": "topic_id_1",
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_1",
"name": "test_target_1"
}],
"id": None,
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_1",
"name": "test_category_1"
},
{
"children":
[{
"children": [],
"id": "topic_id_2",
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_2",
"name": "test_target_2"
}],
"id": None,
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_2",
"name": "test_category_2"
}
]
}
)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetThreadListTest(ForumsEnableMixin, CommentsServiceMockMixin, UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_thread_list"""
@classmethod
def setUpClass(cls):
super(GetThreadListTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetThreadListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
self.course.cohort_config = {"cohorted": False}
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
self.cohort = CohortFactory.create(course_id=self.course.id)
def get_thread_list(
self,
threads,
page=1,
page_size=1,
num_pages=1,
course=None,
topic_id_list=None,
):
"""
Register the appropriate comments service response, then call
get_thread_list and return the result.
"""
course = course or self.course
self.register_get_threads_response(threads, page, num_pages)
ret = get_thread_list(self.request, course.id, page, page_size, topic_id_list)
return ret
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_thread_list(self.request, CourseLocator.from_string("non/existent/course"), 1, 1)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
self.get_thread_list([])
def test_discussions_disabled(self):
with self.assertRaises(DiscussionDisabledError):
self.get_thread_list([], course=_discussion_disabled_course_for(self.user))
def test_empty(self):
self.assertEqual(
self.get_thread_list([], num_pages=0).data,
{
"pagination": {
"next": None,
"previous": None,
"num_pages": 0,
"count": 0
},
"results": [],
"text_search_rewrite": None,
}
)
def test_get_threads_by_topic_id(self):
self.get_thread_list([], topic_id_list=["topic_x", "topic_meow"])
self.assertEqual(urlparse(httpretty.last_request().path).path, "/api/v1/threads")
self.assert_last_query_params({
"user_id": [six.text_type(self.user.id)],
"course_id": [six.text_type(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["1"],
"commentable_ids": ["topic_x,topic_meow"]
})
def test_basic_query_params(self):
self.get_thread_list([], page=6, page_size=14)
self.assert_last_query_params({
"user_id": [six.text_type(self.user.id)],
"course_id": [six.text_type(self.course.id)],
"sort_key": ["activity"],
"page": ["6"],
"per_page": ["14"],
})
def test_thread_content(self):
self.course.cohort_config = {"cohorted": True}
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
source_threads = [
make_minimal_cs_thread({
"id": "test_thread_id_0",
"course_id": six.text_type(self.course.id),
"commentable_id": "topic_x",
"username": self.author.username,
"user_id": str(self.author.id),
"title": "Test Title",
"body": "Test body",
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
"endorsed": True,
"read": True,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
}),
make_minimal_cs_thread({
"id": "test_thread_id_1",
"course_id": six.text_type(self.course.id),
"commentable_id": "topic_y",
"group_id": self.cohort.id,
"username": self.author.username,
"user_id": str(self.author.id),
"thread_type": "question",
"title": "Another Test Title",
"body": "More content",
"votes": {"up_count": 9},
"comments_count": 18,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
})
]
expected_threads = [
self.expected_thread_data({
"id": "test_thread_id_0",
"author": self.author.username,
"topic_id": "topic_x",
"vote_count": 4,
"comment_count": 6,
"unread_comment_count": 3,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_0",
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"has_endorsed": True,
"read": True,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
}),
self.expected_thread_data({
"id": "test_thread_id_1",
"author": self.author.username,
"topic_id": "topic_y",
"group_id": self.cohort.id,
"group_name": self.cohort.name,
"type": "question",
"title": "Another Test Title",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"vote_count": 9,
"comment_count": 19,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=False"
),
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
}),
]
expected_result = make_paginated_api_response(
results=expected_threads, count=2, num_pages=1, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list(source_threads).data,
expected_result
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False]
)
)
@ddt.unpack
def test_request_group(self, role_name, course_is_cohorted):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.get_thread_list([], course=cohort_course)
actual_has_group = "group_id" in httpretty.last_request().querystring
expected_has_group = (course_is_cohorted and role_name == FORUM_ROLE_STUDENT)
self.assertEqual(actual_has_group, expected_has_group)
def test_pagination(self):
# N.B. Empty thread list is not realistic but convenient for this test
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=3, next_link="http://testserver/test_path?page=2", previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=1, num_pages=3).data,
expected_result
)
expected_result = make_paginated_api_response(
results=[],
count=0,
num_pages=3,
next_link="http://testserver/test_path?page=3",
previous_link="http://testserver/test_path?page=1"
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=2, num_pages=3).data,
expected_result
)
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=3, next_link=None, previous_link="http://testserver/test_path?page=2"
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=3, num_pages=3).data,
expected_result
)
# Test page past the last one
self.register_get_threads_response([], page=3, num_pages=3)
with self.assertRaises(PageNotFoundError):
get_thread_list(self.request, self.course.id, page=4, page_size=10)
@ddt.data(None, "rewritten search string")
def test_text_search(self, text_search_rewrite):
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": text_search_rewrite})
self.register_get_threads_search_response([], text_search_rewrite, num_pages=0)
self.assertEqual(
get_thread_list(
self.request,
self.course.id,
page=1,
page_size=10,
text_search="test search string"
).data,
expected_result
)
self.assert_last_query_params({
"user_id": [six.text_type(self.user.id)],
"course_id": [six.text_type(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["10"],
"text": ["test search string"],
})
def test_following(self):
self.register_subscribed_threads_response(self.user, [], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
following=True,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
result,
expected_result
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/users/{}/subscribed_threads".format(self.user.id)
)
self.assert_last_query_params({
"user_id": [six.text_type(self.user.id)],
"course_id": [six.text_type(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["11"],
})
@ddt.data("unanswered", "unread")
def test_view_query(self, query):
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
view=query,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
result,
expected_result
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [six.text_type(self.user.id)],
"course_id": [six.text_type(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["11"],
query: ["true"],
})
@ddt.data(
("last_activity_at", "activity"),
("comment_count", "comments"),
("vote_count", "votes")
)
@ddt.unpack
def test_order_by_query(self, http_query, cc_query):
"""
Tests the order_by parameter
Arguments:
http_query (str): Query string sent in the http request
cc_query (str): Query string used for the comments client service
"""
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_by=http_query,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(result, expected_result)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [six.text_type(self.user.id)],
"course_id": [six.text_type(self.course.id)],
"sort_key": [cc_query],
"page": ["1"],
"per_page": ["11"],
})
def test_order_direction(self):
"""
Only "desc" is supported for order. Also, since it is simply swallowed,
it isn't included in the params.
"""
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_direction="desc",
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(result, expected_result)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [six.text_type(self.user.id)],
"course_id": [six.text_type(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["11"],
})
def test_invalid_order_direction(self):
"""
Test with invalid order_direction (e.g. "asc")
"""
with self.assertRaises(ValidationError) as assertion:
self.register_get_threads_response([], page=1, num_pages=0)
get_thread_list( # pylint: disable=expression-not-assigned
self.request,
self.course.id,
page=1,
page_size=11,
order_direction="asc",
).data
self.assertIn("order_direction", assertion.exception.message_dict)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCommentListTest(ForumsEnableMixin, CommentsServiceMockMixin, SharedModuleStoreTestCase):
"""Test for get_comment_list"""
@classmethod
def setUpClass(cls):
super(GetCommentListTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCommentListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
def make_minimal_cs_thread(self, overrides=None):
"""
Create a thread with the given overrides, plus the course_id if not
already in overrides.
"""
overrides = overrides.copy() if overrides else {}
overrides.setdefault("course_id", six.text_type(self.course.id))
return make_minimal_cs_thread(overrides)
def get_comment_list(self, thread, endorsed=None, page=1, page_size=1):
"""
Register the appropriate comments service response, then call
get_comment_list and return the result.
"""
self.register_get_thread_response(thread)
return get_comment_list(self.request, thread["id"], endorsed, page, page_size)
def test_nonexistent_thread(self):
thread_id = "nonexistent_thread"
self.register_get_thread_error_response(thread_id, 404)
with self.assertRaises(ThreadNotFoundError):
get_comment_list(self.request, thread_id, endorsed=False, page=1, page_size=1)
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
self.get_comment_list(self.make_minimal_cs_thread({"course_id": "non/existent/course"}))
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
self.get_comment_list(self.make_minimal_cs_thread())
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
with self.assertRaises(DiscussionDisabledError):
self.get_comment_list(
self.make_minimal_cs_thread(
overrides={"course_id": six.text_type(disabled_course.id)}
)
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(
self,
role_name,
course_is_cohorted,
topic_is_cohorted,
thread_group_state
):
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
thread = self.make_minimal_cs_thread({
"course_id": six.text_type(cohort_course.id),
"commentable_id": "test_topic",
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
topic_is_cohorted and
thread_group_state == "different_group"
)
try:
self.get_comment_list(thread)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(True, False)
def test_discussion_endorsed(self, endorsed_value):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "discussion"}),
endorsed=endorsed_value
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field may not be specified for discussion threads."]}
)
def test_question_without_endorsed(self):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "question"}),
endorsed=None
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field is required for question threads."]}
)
def test_empty(self):
discussion_thread = self.make_minimal_cs_thread(
{"thread_type": "discussion", "children": [], "resp_total": 0}
)
self.assertEqual(
self.get_comment_list(discussion_thread).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
question_thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [],
"non_endorsed_responses": [],
"non_endorsed_resp_total": 0
})
self.assertEqual(
self.get_comment_list(question_thread, endorsed=False).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
self.assertEqual(
self.get_comment_list(question_thread, endorsed=True).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
def test_basic_query_params(self):
self.get_comment_list(
self.make_minimal_cs_thread({
"children": [make_minimal_cs_comment({"username": self.user.username})],
"resp_total": 71
}),
page=6,
page_size=14
)
self.assert_query_params_equal(
httpretty.httpretty.latest_requests[-2],
{
"user_id": [str(self.user.id)],
"mark_as_read": ["False"],
"recursive": ["False"],
"resp_skip": ["70"],
"resp_limit": ["14"],
"with_responses": ["True"],
}
)
def test_discussion_content(self):
source_comments = [
{
"type": "comment",
"id": "test_comment_1",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"body": "Test body",
"endorsed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"child_count": 0,
"children": [],
},
{
"type": "comment",
"id": "test_comment_2",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": True,
"anonymous_to_peers": False,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"body": "More content",
"endorsed": False,
"abuse_flaggers": [str(self.user.id)],
"votes": {"up_count": 7},
"child_count": 0,
"children": [],
}
]
expected_comments = [
{
"id": "test_comment_1",
"thread_id": "test_thread",
"parent_id": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"editable_fields": ["abuse_flagged", "voted"],
"child_count": 0,
"children": [],
},
{
"id": "test_comment_2",
"thread_id": "test_thread",
"parent_id": None,
"author": None,
"author_label": None,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": True,
"voted": False,
"vote_count": 7,
"editable_fields": ["abuse_flagged", "voted"],
"child_count": 0,
"children": [],
},
]
actual_comments = self.get_comment_list(
self.make_minimal_cs_thread({"children": source_comments})
).data["results"]
self.assertEqual(actual_comments, expected_comments)
def test_question_content(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({"id": "endorsed_comment", "username": self.user.username})],
"non_endorsed_responses": [make_minimal_cs_comment({
"id": "non_endorsed_comment", "username": self.user.username
})],
"non_endorsed_resp_total": 1,
})
endorsed_actual = self.get_comment_list(thread, endorsed=True).data
self.assertEqual(endorsed_actual["results"][0]["id"], "endorsed_comment")
non_endorsed_actual = self.get_comment_list(thread, endorsed=False).data
self.assertEqual(non_endorsed_actual["results"][0]["id"], "non_endorsed_comment")
def test_endorsed_by_anonymity(self):
"""
Ensure thread anonymity is properly considered in serializing
endorsed_by.
"""
thread = self.make_minimal_cs_thread({
"anonymous": True,
"children": [
make_minimal_cs_comment({
"username": self.user.username,
"endorsement": {"user_id": str(self.author.id), "time": "2015-05-18T12:34:56Z"},
})
]
})
actual_comments = self.get_comment_list(thread).data["results"]
self.assertIsNone(actual_comments[0]["endorsed_by"])
@ddt.data(
("discussion", None, "children", "resp_total"),
("question", False, "non_endorsed_responses", "non_endorsed_resp_total"),
)
@ddt.unpack
def test_cs_pagination(self, thread_type, endorsed_arg, response_field, response_total_field):
"""
Test cases in which pagination is done by the comments service.
thread_type is the type of thread (question or discussion).
endorsed_arg is the value of the endorsed argument.
repsonse_field is the field in which responses are returned for the
given thread type.
response_total_field is the field in which the total number of responses
is returned for the given thread type.
"""
# N.B. The mismatch between the number of children and the listed total
# number of responses is unrealistic but convenient for this test
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [make_minimal_cs_comment({"username": self.user.username})],
response_total_field: 5,
})
# Only page
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=5).data
self.assertIsNone(actual["pagination"]["next"])
self.assertIsNone(actual["pagination"]["previous"])
# First page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=2).data
self.assertEqual(actual["pagination"]["next"], "http://testserver/test_path?page=2")
self.assertIsNone(actual["pagination"]["previous"])
# Middle page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=2).data
self.assertEqual(actual["pagination"]["next"], "http://testserver/test_path?page=3")
self.assertEqual(actual["pagination"]["previous"], "http://testserver/test_path?page=1")
# Last page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=3, page_size=2).data
self.assertIsNone(actual["pagination"]["next"])
self.assertEqual(actual["pagination"]["previous"], "http://testserver/test_path?page=2")
# Page past the end
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [],
response_total_field: 5
})
with self.assertRaises(PageNotFoundError):
self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=5)
def test_question_endorsed_pagination(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({
"id": "comment_{}".format(i),
"username": self.user.username
}) for i in range(10)]
})
def assert_page_correct(page, page_size, expected_start, expected_stop, expected_next, expected_prev):
"""
Check that requesting the given page/page_size returns the expected
output
"""
actual = self.get_comment_list(thread, endorsed=True, page=page, page_size=page_size).data
result_ids = [result["id"] for result in actual["results"]]
self.assertEqual(
result_ids,
["comment_{}".format(i) for i in range(expected_start, expected_stop)]
)
self.assertEqual(
actual["pagination"]["next"],
"http://testserver/test_path?page={}".format(expected_next) if expected_next else None
)
self.assertEqual(
actual["pagination"]["previous"],
"http://testserver/test_path?page={}".format(expected_prev) if expected_prev else None
)
# Only page
assert_page_correct(
page=1,
page_size=10,
expected_start=0,
expected_stop=10,
expected_next=None,
expected_prev=None
)
# First page of many
assert_page_correct(
page=1,
page_size=4,
expected_start=0,
expected_stop=4,
expected_next=2,
expected_prev=None
)
# Middle page of many
assert_page_correct(
page=2,
page_size=4,
expected_start=4,
expected_stop=8,
expected_next=3,
expected_prev=1
)
# Last page of many
assert_page_correct(
page=3,
page_size=4,
expected_start=8,
expected_stop=10,
expected_next=None,
expected_prev=2
)
# Page past the end
with self.assertRaises(PageNotFoundError):
self.get_comment_list(thread, endorsed=True, page=2, page_size=10)
@ddt.ddt
@disable_signal(api, 'thread_created')
@disable_signal(api, 'thread_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class CreateThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_thread"""
LONG_TITLE = (
'Lorem ipsum dolor sit amet, consectetuer adipiscing elit. '
'Aenean commodo ligula eget dolor. Aenean massa. Cum sociis '
'natoque penatibus et magnis dis parturient montes, nascetur '
'ridiculus mus. Donec quam felis, ultricies nec, '
'pellentesque eu, pretium quis, sem. Nulla consequat massa '
'quis enim. Donec pede justo, fringilla vel, aliquet nec, '
'vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet '
'a, venenatis vitae, justo. Nullam dictum felis eu pede '
'mollis pretium. Integer tincidunt. Cras dapibus. Vivamus '
'elementum semper nisi. Aenean vulputate eleifend tellus. '
'Aenean leo ligula, porttitor eu, consequat vitae, eleifend '
'ac, enim. Aliquam lorem ante, dapibus in, viverra quis, '
'feugiat a, tellus. Phasellus viverra nulla ut metus varius '
'laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies '
'nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam '
'eget dui. Etiam rhoncus. Maecenas tempus, tellus eget '
'condimentum rhoncus, sem quam semper libero, sit amet '
'adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, '
'luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et '
'ante tincidunt tempus. Donec vitae sapien ut libero '
'venenatis faucibus. Nullam quis ante. Etiam sit amet orci '
'eget eros faucibus tincidunt. Duis leo. Sed fringilla '
'mauris sit amet nibh. Donec sodales sagittis magna. Sed '
'consequat, leo eget bibendum sodales, augue velit cursus '
'nunc, quis gravida magna mi a libero. Fusce vulputate '
'eleifend sapien. Vestibulum purus quam, scelerisque ut, '
'mollis sed, nonummy id, metus. Nullam accumsan lorem in '
'dui. Cras ultricies mi eu turpis hendrerit fringilla. '
'Vestibulum ante ipsum primis in faucibus orci luctus et '
'ultrices posuere cubilia Curae; In ac dui quis mi '
'consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu '
'tortor, suscipit eget, imperdiet nec, imperdiet iaculis, '
'ipsum. Sed aliquam ultrices mauris. Integer ante arcu, '
'accumsan a, consectetuer eget, posuere ut, mauris. Praesent '
'adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc '
'nonummy metus.'
)
@classmethod
def setUpClass(cls):
super(CreateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.minimal_data = {
"course_id": six.text_type(self.course.id),
"topic_id": "test_topic",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
}
@mock.patch("eventtracking.tracker.emit")
def test_basic(self, mock_emit):
cs_thread = make_minimal_cs_thread({
"id": "test_id",
"username": self.user.username,
"read": True,
})
self.register_post_thread_response(cs_thread)
with self.assert_signal_sent(api, 'thread_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_thread(self.request, self.minimal_data)
expected = self.expected_thread_data({
"id": "test_id",
"course_id": six.text_type(self.course.id),
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_id",
"read": True,
})
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [six.text_type(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": "Test Title",
"title_truncated": False,
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
@mock.patch("eventtracking.tracker.emit")
def test_title_truncation(self, mock_emit):
data = self.minimal_data.copy()
data['title'] = self.LONG_TITLE
cs_thread = make_minimal_cs_thread({
"id": "test_id",
"username": self.user.username,
"read": True,
})
self.register_post_thread_response(cs_thread)
with self.assert_signal_sent(api, 'thread_created', sender=None, user=self.user, exclude_args=('post',)):
create_thread(self.request, data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": self.LONG_TITLE[:1000],
"title_truncated": True,
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group_set", "group_is_none", "group_is_set"],
)
)
@ddt.unpack
def test_group_id(self, role_name, course_is_cohorted, topic_is_cohorted, data_group_state):
"""
Tests whether the user has permission to create a thread with certain
group_id values.
If there is no group, user cannot create a thread.
Else if group is None or set, and the course is not cohorted and/or the
role is a student, user can create a thread.
"""
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
if course_is_cohorted:
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_post_thread_response({"username": self.user.username})
data = self.minimal_data.copy()
data["course_id"] = six.text_type(cohort_course.id)
if data_group_state == "group_is_none":
data["group_id"] = None
elif data_group_state == "group_is_set":
if course_is_cohorted:
data["group_id"] = cohort.id + 1
else:
data["group_id"] = 1 # Set to any value since there is no cohort
expected_error = (
data_group_state in ["group_is_none", "group_is_set"] and
(not course_is_cohorted or role_name == FORUM_ROLE_STUDENT)
)
try:
create_thread(self.request, data)
self.assertFalse(expected_error)
actual_post_data = httpretty.last_request().parsed_body
if data_group_state == "group_is_set":
self.assertEqual(actual_post_data["group_id"], [str(data["group_id"])])
elif data_group_state == "no_group_set" and course_is_cohorted and topic_is_cohorted:
self.assertEqual(actual_post_data["group_id"], [str(cohort.id)])
else:
self.assertNotIn("group_id", actual_post_data)
except ValidationError as ex:
if not expected_error:
self.fail(u"Unexpected validation error: {}".format(ex))
def test_following(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
self.register_subscription_response(self.user)
data = self.minimal_data.copy()
data["following"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["following"], True)
cs_request = httpretty.last_request()
self.assertEqual(
urlparse(cs_request.path).path,
"/api/v1/users/{}/subscriptions".format(self.user.id)
)
self.assertEqual(cs_request.method, "POST")
self.assertEqual(
cs_request.parsed_body,
{"source_type": ["thread"], "source_id": ["test_id"]}
)
def test_voted(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
self.register_thread_votes_response("test_id")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'thread_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_thread(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
self.register_thread_flag_response("test_id")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_course_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["This field is required."]})
def test_course_id_invalid(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "invalid!"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
create_thread(self.request, {"course_id": "non/existent/course"})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
create_thread(self.request, self.minimal_data)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.minimal_data["course_id"] = six.text_type(disabled_course.id)
with self.assertRaises(DiscussionDisabledError):
create_thread(self.request, self.minimal_data)
def test_invalid_field(self):
data = self.minimal_data.copy()
data["type"] = "invalid_type"
with self.assertRaises(ValidationError):
create_thread(self.request, data)
@ddt.ddt
@disable_signal(api, 'comment_created')
@disable_signal(api, 'comment_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class CreateCommentTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_comment"""
@classmethod
def setUpClass(cls):
super(CreateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": six.text_type(self.course.id),
"commentable_id": "test_topic",
})
)
self.minimal_data = {
"thread_id": "test_thread",
"raw_body": "Test body",
}
@ddt.data(None, "test_parent")
@mock.patch("eventtracking.tracker.emit")
def test_success(self, parent_id, mock_emit):
if parent_id:
self.register_get_comment_response({"id": parent_id, "thread_id": "test_thread"})
self.register_post_comment_response(
{
"id": "test_comment",
"username": self.user.username,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
},
thread_id="test_thread",
parent_id=parent_id
)
data = self.minimal_data.copy()
if parent_id:
data["parent_id"] = parent_id
with self.assert_signal_sent(api, 'comment_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_comment(self.request, data)
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"],
"child_count": 0,
}
self.assertEqual(actual, expected)
expected_url = (
"/api/v1/comments/{}".format(parent_id) if parent_id else
"/api/v1/threads/test_thread/comments"
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
expected_url
)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [six.text_type(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)]
}
)
expected_event_name = (
"edx.forum.comment.created" if parent_id else
"edx.forum.response.created"
)
expected_event_data = {
"discussion": {"id": "test_thread"},
"commentable_id": "test_topic",
"options": {"followed": False},
"id": "test_comment",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
if parent_id:
expected_event_data["response"] = {"id": parent_id}
actual_event_name, actual_event_data = mock_emit.call_args[0]
self.assertEqual(actual_event_name, expected_event_name)
self.assertEqual(actual_event_data, expected_event_data)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
)
)
@ddt.unpack
def test_endorsed(self, role_name, is_thread_author, thread_type):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": six.text_type(self.course.id),
"thread_type": thread_type,
"user_id": str(self.user.id) if is_thread_author else str(self.user.id + 1),
})
)
self.register_post_comment_response({"username": self.user.username}, "test_thread")
data = self.minimal_data.copy()
data["endorsed"] = True
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(not is_thread_author or thread_type == "discussion")
)
try:
create_comment(self.request, data)
self.assertEqual(httpretty.last_request().parsed_body["endorsed"], ["True"])
self.assertFalse(expected_error)
except ValidationError:
self.assertTrue(expected_error)
def test_voted(self):
self.register_post_comment_response({"id": "test_comment", "username": self.user.username}, "test_thread")
self.register_comment_votes_response("test_comment")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'comment_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_comment(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_comment_response({"id": "test_comment", "username": self.user.username}, "test_thread")
self.register_comment_flag_response("test_comment")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_comment(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_thread_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["This field is required."]})
def test_thread_id_not_found(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ThreadNotFoundError):
create_comment(self.request, self.minimal_data)
def test_nonexistent_course(self):
self.register_get_thread_response(
make_minimal_cs_thread({"id": "test_thread", "course_id": "non/existent/course"})
)
with self.assertRaises(CourseNotFoundError):
create_comment(self.request, self.minimal_data)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
create_comment(self.request, self.minimal_data)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": six.text_type(disabled_course.id),
"commentable_id": "test_topic",
})
)
with self.assertRaises(DiscussionDisabledError):
create_comment(self.request, self.minimal_data)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_get_thread_response(make_minimal_cs_thread({
"id": "cohort_thread",
"course_id": six.text_type(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}))
self.register_post_comment_response({"username": self.user.username}, thread_id="cohort_thread")
data = self.minimal_data.copy()
data["thread_id"] = "cohort_thread"
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
create_comment(self.request, data)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
def test_invalid_field(self):
data = self.minimal_data.copy()
del data["raw_body"]
with self.assertRaises(ValidationError):
create_comment(self.request, data)
@ddt.ddt
@disable_signal(api, 'thread_edited')
@disable_signal(api, 'thread_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class UpdateThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_thread"""
@classmethod
def setUpClass(cls):
super(UpdateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": six.text_type(self.course.id),
"commentable_id": "original_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"thread_type": "discussion",
"title": "Original Title",
"body": "Original body",
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_put_thread_response(cs_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
# Ensure that the default following value of False is not applied implicitly
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_thread()
update_thread(self.request, "test_thread", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_thread(self.request, "test_thread", {"raw_body": "Edited body"})
self.assertEqual(actual, self.expected_thread_data({
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"topic_id": "original_topic",
"read": True,
"title": "Original Title",
}))
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [six.text_type(self.course.id)],
"commentable_id": ["original_topic"],
"thread_type": ["discussion"],
"title": ["Original Title"],
"body": ["Edited body"],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
"read": ["False"],
}
)
def test_nonexistent_thread(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ThreadNotFoundError):
update_thread(self.request, "test_thread", {})
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
update_thread(self.request, "test_thread", {})
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
update_thread(self.request, "test_thread", {})
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": six.text_type(disabled_course.id)})
with self.assertRaises(DiscussionDisabledError):
update_thread(self.request, "test_thread", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_thread({
"course_id": six.text_type(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_thread(self.request, "test_thread", {})
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_author_only_fields(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
data = {field: "edited" for field in ["topic_id", "title", "raw_body"]}
data["type"] = "question"
expected_error = role_name == FORUM_ROLE_STUDENT
try:
update_thread(self.request, "test_thread", data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{field: ["This field is not editable."] for field in data.keys()}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_following(self, old_following, new_following):
"""
Test attempts to edit the "following" field.
old_following indicates whether the thread should be followed at the
start of the test. new_following indicates the value for the "following"
field in the update. If old_following and new_following are the same, no
update should be made. Otherwise, a subscription should be POSTed or
DELETEd according to the new_following value.
"""
if old_following:
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_subscription_response(self.user)
self.register_thread()
data = {"following": new_following}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["following"], new_following)
last_request_path = urlparse(httpretty.last_request().path).path
subscription_url = "/api/v1/users/{}/subscriptions".format(self.user.id)
if old_following == new_following:
self.assertNotEqual(last_request_path, subscription_url)
else:
self.assertEqual(last_request_path, subscription_url)
self.assertEqual(
httpretty.last_request().method,
"POST" if new_following else "DELETE"
)
request_data = (
httpretty.last_request().parsed_body if new_following else
parse_qs(urlparse(httpretty.last_request().path).query)
)
request_data.pop("request_id", None)
self.assertEqual(
request_data,
{"source_type": ["thread"], "source_id": ["test_thread"]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
@mock.patch("eventtracking.tracker.emit")
def test_voted(self, current_vote_status, new_vote_status, mock_emit):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the thread should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
self.register_thread_votes_response("test_thread")
self.register_thread()
data = {"voted": new_vote_status}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/threads/test_thread/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.voted")
self.assertEqual(
event_data,
{
'undo_vote': not new_vote_status,
'url': '',
'target_username': self.user.username,
'vote_value': 'up',
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'commentable_id': 'original_topic',
'id': 'test_thread'
}
)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
starting_vote_count = 1
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_thread(self.request, "test_thread", data)
self.register_thread(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
#setup
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_thread"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_thread(request, "test_thread", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the thread should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_thread_flag_response("test_thread")
self.register_thread({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/threads/test_thread/abuse_flag"
unflag_url = "/api/v1/threads/test_thread/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
def test_invalid_field(self):
self.register_thread()
with self.assertRaises(ValidationError) as assertion:
update_thread(self.request, "test_thread", {"raw_body": ""})
self.assertEqual(
assertion.exception.message_dict,
{"raw_body": ["This field may not be blank."]}
)
@ddt.ddt
@disable_signal(api, 'comment_edited')
@disable_signal(api, 'comment_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class UpdateCommentTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_comment"""
@classmethod
def setUpClass(cls):
super(UpdateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment(self, overrides=None, thread_overrides=None, course=None):
"""
Make a comment with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
if course is None:
course = self.course
cs_thread_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": six.text_type(course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": "test_comment",
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"body": "Original body",
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_put_comment_response(cs_comment_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
self.register_comment()
update_comment(self.request, "test_comment", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
@ddt.data(None, "test_parent")
def test_basic(self, parent_id):
self.register_comment({"parent_id": parent_id})
with self.assert_signal_sent(api, 'comment_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_comment(self.request, "test_comment", {"raw_body": "Edited body"})
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"],
"child_count": 0,
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Edited body"],
"course_id": [six.text_type(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
def test_nonexistent_comment(self):
self.register_get_comment_error_response("test_comment", 404)
with self.assertRaises(CommentNotFoundError):
update_comment(self.request, "test_comment", {})
def test_nonexistent_course(self):
self.register_comment(thread_overrides={"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
update_comment(self.request, "test_comment", {})
def test_unenrolled(self):
self.register_comment()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
update_comment(self.request, "test_comment", {})
def test_discussions_disabled(self):
self.register_comment(course=_discussion_disabled_course_for(self.user))
with self.assertRaises(DiscussionDisabledError):
update_comment(self.request, "test_comment", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_get_thread_response(make_minimal_cs_thread())
self.register_comment(
{"thread_id": "test_thread"},
thread_overrides={
"id": "test_thread",
"course_id": six.text_type(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_comment(self.request, "test_comment", {})
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
))
@ddt.unpack
def test_raw_body_access(self, role_name, is_thread_author, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1))
}
)
expected_error = role_name == FORUM_ROLE_STUDENT and not is_comment_author
try:
update_comment(self.request, "test_comment", {"raw_body": "edited"})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"raw_body": ["This field is not editable."]}
)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
[True, False],
))
@ddt.unpack
def test_endorsed_access(self, role_name, is_thread_author, thread_type, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"thread_type": thread_type,
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1)),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(thread_type == "discussion" or not is_thread_author)
)
try:
update_comment(self.request, "test_comment", {"endorsed": True})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"endorsed": ["This field is not editable."]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
@mock.patch("eventtracking.tracker.emit")
def test_voted(self, current_vote_status, new_vote_status, mock_emit):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the comment should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": new_vote_status}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if new_vote_status else 0)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/comments/test_comment/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.response.voted")
self.assertEqual(
event_data,
{
'undo_vote': not new_vote_status,
'url': '',
'target_username': self.user.username,
'vote_value': 'up',
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'commentable_id': 'dummy',
'id': 'test_comment'
}
)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
starting_vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_comment(self.request, "test_comment", data)
self.register_comment(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_comment"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_comment(request, "test_comment", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the comment should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_comment_flag_response("test_comment")
self.register_comment({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/comments/test_comment/abuse_flag"
unflag_url = "/api/v1/comments/test_comment/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
@ddt.ddt
@disable_signal(api, 'thread_deleted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class DeleteThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_thread"""
@classmethod
def setUpClass(cls):
super(DeleteThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and DELETE on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": six.text_type(self.course.id),
"user_id": str(self.user.id),
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_delete_thread_response(cs_data["id"])
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_thread(self.request, self.thread_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads/{}".format(self.thread_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(ThreadNotFoundError):
delete_thread(self.request, "missing_thread")
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
delete_thread(self.request, self.thread_id)
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
delete_thread(self.request, self.thread_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": six.text_type(disabled_course.id)})
with self.assertRaises(DiscussionDisabledError):
delete_thread(self.request, self.thread_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a thread
All privileged roles are able to delete a thread. A student role can
only delete a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_thread({
"course_id": six.text_type(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.ddt
@disable_signal(api, 'comment_deleted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class DeleteCommentTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_comment"""
@classmethod
def setUpClass(cls):
super(DeleteCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
self.comment_id = "test_comment"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment_and_thread(self, overrides=None, thread_overrides=None):
"""
Make a comment with appropriate data overridden by the override
parameters and register mock responses for both GET and DELETE on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
cs_thread_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": six.text_type(self.course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": self.comment_id,
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_delete_comment_response(self.comment_id)
def test_basic(self):
self.register_comment_and_thread()
with self.assert_signal_sent(api, 'comment_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_comment(self.request, self.comment_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/comments/{}".format(self.comment_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_comment_id_not_found(self):
self.register_get_comment_error_response("missing_comment", 404)
with self.assertRaises(CommentNotFoundError):
delete_comment(self.request, "missing_comment")
def test_nonexistent_course(self):
self.register_comment_and_thread(
thread_overrides={"course_id": "non/existent/course"}
)
with self.assertRaises(CourseNotFoundError):
delete_comment(self.request, self.comment_id)
def test_not_enrolled(self):
self.register_comment_and_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
delete_comment(self.request, self.comment_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_comment_and_thread(
thread_overrides={"course_id": six.text_type(disabled_course.id)},
overrides={"course_id": six.text_type(disabled_course.id)}
)
with self.assertRaises(DiscussionDisabledError):
delete_comment(self.request, self.comment_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"user_id": str(self.user.id + 1)}
)
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a comment
All privileged roles are able to delete a comment. A student role can
only delete a comment if,
the student role is the author and the comment is not in a cohort,
the student role is the author and the comment is in the author's cohort.
"""
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_comment_and_thread(
overrides={"thread_id": "test_thread"},
thread_overrides={
"course_id": six.text_type(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class RetrieveThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase
):
"""Tests for get_thread"""
@classmethod
def setUpClass(cls):
super(RetrieveThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(RetrieveThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for GET on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": six.text_type(self.course.id),
"commentable_id": "test_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"title": "Test Title",
"body": "Test body",
"resp_total": 0,
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
def test_basic(self):
self.register_thread({"resp_total": 2})
self.assertEqual(get_thread(self.request, self.thread_id), self.expected_thread_data({
"response_count": 2,
"unread_comment_count": 1,
}))
self.assertEqual(httpretty.last_request().method, "GET")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(ThreadNotFoundError):
get_thread(self.request, "missing_thread")
def test_nonauthor_enrolled_in_course(self):
non_author_user = UserFactory.create()
self.register_get_user_response(non_author_user)
CourseEnrollmentFactory.create(user=non_author_user, course_id=self.course.id)
self.register_thread()
self.request.user = non_author_user
self.assertEqual(get_thread(self.request, self.thread_id), self.expected_thread_data({
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"unread_comment_count": 1,
}))
self.assertEqual(httpretty.last_request().method, "GET")
def test_not_enrolled_in_course(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
get_thread(self.request, self.thread_id)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for retrieving a thread
All privileged roles are able to retrieve a thread. A student role can
only retrieve a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_thread({
"course_id": six.text_type(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
get_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
| agpl-3.0 |
Samuel789/MediPi | MedManagementWeb/env/lib/python3.5/site-packages/pip/_vendor/distlib/util.py | 327 | 52991 | #
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on macOS
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
| apache-2.0 |
hansonrobotics/chatbot | src/chatbot/stats.py | 1 | 3618 | import os
import logging
import pandas as pd
import glob
import re
import datetime as dt
from collections import Counter
logger = logging.getLogger('hr.chatbot.stats')
trace_pattern = re.compile(
r'../(?P<fname>.*), (?P<tloc>\(.*\)), (?P<pname>.*), (?P<ploc>\(.*\))')
def collect_history_data(history_dir, days):
today = dt.datetime.utcnow()
dfs = []
for d in glob.glob('{}/*'.format(history_dir)):
if os.path.isdir(d):
dirname = os.path.basename(d)
dirdate = None
try:
dirdate = dt.datetime.strptime(dirname, '%Y%m%d')
except Exception as ex:
logger.error(ex)
if dirdate and (days == -1 or (today - dirdate).days < days):
for fname in glob.glob('{}/{}/*.csv'.format(history_dir, dirname)):
try:
dfs.append(pd.read_csv(fname))
except Exception as ex:
logger.warn("Reading {} error: {}".format(fname, ex))
if not dfs:
return None
df = pd.concat(dfs, ignore_index=True)
df = df[df.Datetime != 'Datetime'].sort(
['User', 'Datetime']).drop_duplicates()
return df
def history_stats(history_dir, days):
df = collect_history_data(history_dir, days)
if df is None:
return {}
if days == -1:
stats_csv = '{}/full_history.csv'.format(history_dir)
else:
stats_csv = '{}/last_{}_days.csv'.format(history_dir, days)
columns = [u'Datetime', u'Revision', u'User', u'BotName',
u'AnsweredBy', u'Question', u'Answer', u'Rate', u'Trace']
df.to_csv(stats_csv, index=False, columns=columns)
logger.info("Write statistic records to {}".format(stats_csv))
records = len(df)
rates = len(df[df.Rate.notnull()])
good_rates = len(df[df.Rate.isin(['good'])])
bad_rates = len(df[df.Rate.isin(['bad'])])
if records > 0:
csd = float(records - bad_rates) / records
response = {
'customers_satisfaction_degree': csd,
'number_of_records': records,
'number_of_rates': rates,
'number_of_good_rates': good_rates,
'number_of_bad_rates': bad_rates,
}
return response
def playback_history(df):
from client import Client
client = Client(os.environ.get('HR_CHATBOT_AUTHKEY', 'AAAAB3NzaC'), test=True)
pattern_column = []
for question in df.Question:
answer = client.ask(question, True)
traces = answer.get('trace')
patterns = []
if traces:
for trace in traces:
match_obj = trace_pattern.match(trace)
if match_obj:
patterns.append(match_obj.group('pname'))
pattern_column.append(patterns)
df.loc[:,'Pattern'] = pd.Series(pattern_column, index=df.index)
return df
def pattern_stats(history_dir, days):
df = collect_history_data(history_dir, days)
if df is None:
return {}
df = playback_history(df)
patterns = sum(df.Pattern, [])
counter = Counter(patterns)
pattern_freq = pd.Series(counter)
pattern_freq.sort(ascending=False)
stats_csv = '{}/pattern_frequency.csv'.format(history_dir)
pattern_freq.to_csv(stats_csv)
logger.info("Write pattern statistic to {}".format(stats_csv))
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
history_stats(os.path.expanduser('~/.hr/chatbot/history'), -1)
history_stats(os.path.expanduser('~/.hr/chatbot/history'), 7)
pattern_stats(os.path.expanduser('~/.hr/chatbot/history'), -1)
| mit |
TravisDart/tictactoe | tictactoe.py | 1 | 1378 | import os
from pymongo import MongoClient
from TicTacToeBoard import TicTacToeBoard
from ProgressCounter import ProgressCounter
client = MongoClient(os.environ["MONGOLABURL"])
collection = client['test1']['one']
_ = TicTacToeBoard._
blankBoard = TicTacToeBoard([
_, _, _,
_, _, _,
_, _, _
])
collection.remove({}) # Start over each time.
progressCounter = ProgressCounter()
boardQueue = set([int(blankBoard)])
boards = {
0: collection.insert({"board": int(blankBoard)})
}
while len(boardQueue) != 0:
board = TicTacToeBoard(boardQueue.pop())
thisObject = collection.find_one({"_id": boards[int(board)]})
if board.getWhoWon() is None:
thisObject["possibilities"] = []
for nextBoard in board.createNextStep():
normalizedBoard, symmetryOp = nextBoard.getUniqueRepresentation()
if normalizedBoard not in boards:
boards[normalizedBoard] = collection.insert({
"board": normalizedBoard
})
boardQueue.add(normalizedBoard)
thisObject["possibilities"] += [{
"board": boards[normalizedBoard], # This is the mongoID
"symmetry": symmetryOp
}]
collection.update({"_id": thisObject["_id"]}, thisObject)
progressCounter += 1
progressCounter.printCount()
progressCounter.done()
| mit |
onlinepcwizard/dms | dms/zone_data_util.py | 1 | 29679 | #!/usr/bin/env python3.2
#
# Copyright (c) Net24 Limited, Christchurch, New Zealand 2011-2012
# and Voyager Internet Ltd, New Zealand, 2012-2013
#
# This file is part of py-magcode-core.
#
# Py-magcode-core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Py-magcode-core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with py-magcode-core. If not, see <http://www.gnu.org/licenses/>.
#
"""
Module for ZoneDataUtil mix in class for zone_engine
Split out so that changes can be seen more easily
"""
import re
from copy import copy
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from magcode.core.globals_ import *
from magcode.core.database import sql_types
from dms.globals_ import *
from dms.exceptions import *
from dms.auto_ptr_util import check_auto_ptr_privilege
from dms.dns import RRTYPE_SOA
from dms.dns import RRTYPE_NS
from dms.dns import RRTYPE_A
from dms.dns import RRTYPE_AAAA
from dms.dns import RRTYPE_CNAME
from dms.dns import RRTYPE_MX
from dms.dns import RRTYPE_SRV
from dms.dns import RRTYPE_PTR
from dms.dns import RROP_DELETE
from dms.dns import RROP_UPDATE_RRTYPE
from dms.dns import RROP_ADD
from dms.dns import RROP_PTR_UPDATE
from dms.dns import RROP_PTR_UPDATE_FORCE
from dms.dns import validate_zi_hostname
from dms.dns import validate_zi_ttl
from dms.dns import is_inet_hostname
from dms.dns import label_from_address
from dms.dns import new_zone_soa_serial
import dms.database.zone_cfg as zone_cfg
from dms.database.zone_sm import exec_zonesm
from dms.database.zone_sm import ZoneSMDoRefresh
from dms.database.zone_sm import ZoneSM
from dms.database.reverse_network import ReverseNetwork
from dms.database.zone_instance import ZoneInstance
from dms.database.rr_comment import RRComment
from dms.database.resource_record import data_to_rr
from dms.database.resource_record import RR_PTR
from dms.database.resource_record import ResourceRecord
from dms.database.reference import find_reference
from dms.database.zi_update import ZiUpdate
from dms.database.update_group import new_update_group
class DataTools(object):
"""
Container class for methods and runtime data for consistency
checking code
"""
def __init__(self, db_session, zone_sm, zi_cname_flag=False):
"""
Initialise runtime data
"""
self.db_session = db_session
self.zone_sm = zone_sm
self.name = zone_sm.name
self.zi_cname_flag = zi_cname_flag
self.zi_rr_data = {}
self.auto_ptr_data = []
self.apex_comment = None
def check_rr_consistency(self, rrs, rr, rr_data, update_group):
"""
Check that RR can be consistently added to zone
"""
# Skip for any RROP_DELETE
if update_group and rr.update_op and rr.update_op == RROP_DELETE:
return
if (not update_group or not rr.update_op
or rr.update_op != RROP_UPDATE_RRTYPE):
# Duplicate Record check
if rr in rrs:
raise DuplicateRecordInZone(self.name, rr_data)
# Can't add another SOA if there is one there already
if rr.type_ == RRTYPE_SOA:
num_soa = len([r for r in rrs if r.type_ == RRTYPE_SOA])
if num_soa:
raise ZoneAlreadyHasSOARecord(self.name, rr_data)
# CNAME addition check
if rr.type_ == RRTYPE_CNAME:
self.zi_cname_flag = True
# anti-CNAME addition check
if self.zi_cname_flag:
# Find any cnames with rr label and barf
num_lbls = len([ r for r in rrs
if (r.type_ == RRTYPE_CNAME
and r.label == rr.label)])
# Check that we are not updating an existing CNAME
if (num_lbls and update_group and rr.update_op
and rr.update_op == RROP_UPDATE_RRTYPE
and rr.type_ == RRTYPE_CNAME):
num_lbls = 0
if num_lbls:
raise ZoneCNAMEExists(self.name, rr_data)
def check_zi_consistency(self, rrs):
"""
Check consistency of zone instance
"""
# CNAME check
rr_cnames = [r for r in rrs if r.type_ == RRTYPE_CNAME]
for rr in rr_cnames:
clash = len([ r for r in rrs
if (r.label == rr.label and r.type_ != RRTYPE_CNAME)])
if clash:
raise ZoneCNAMELabelExists(self.name, self.zi_rr_data[str(rr)])
# Check NS MX and SRV records point to actual A
# and AAAA records if they are in zone
# (Bind Option check-integrity)
# NS
rr_nss = [r for r in rrs if r.type_ == RRTYPE_NS
and r.label != '@']
for rr in rr_nss:
if not rr.rdata.endswith('.'):
target_hosts = [r for r in rrs if r.label == rr.rdata]
if not len(target_hosts):
raise ZoneCheckIntegrityNoGlue(self.name,
self.zi_rr_data[str(rr)], rr.rdata)
# MX
rr_mxs = [r for r in rrs if r.type_ == RRTYPE_MX]
for rr in rr_mxs:
if not rr.rdata.endswith('.'):
rdata = rr.rdata.split()
target_hosts = [r for r in rrs if r.label == rdata[1]]
if not len(target_hosts):
raise ZoneCheckIntegrityNoGlue(self.name,
self.zi_rr_data[str(rr)], rdata[1])
#SRV
rr_srvs = [r for r in rrs if r.type_ == RRTYPE_SRV]
for rr in rr_srvs:
if not rr.rdata.endswith('.'):
rdata = rr.rdata.split()
target_hosts = [r for r in rrs if r.label == rdata[3]]
if not len(target_hosts):
raise ZoneCheckIntegrityNoGlue(self.name,
self.zi_rr_data[str(rr)], rdata[3])
# If NS records are part of the zone, no point in doing
# sanity checks as client will not be sending any SOAs
if self.zone_sm.use_apex_ns:
return
# Check that zi has 1 SOA, and that its for the apex '@'
rr_soas = [r for r in rrs if r.type_ == RRTYPE_SOA]
if not rr_soas:
raise ZoneHasNoSOARecord(self.name)
if len(rr_soas) > 1:
raise ZoneAlreadyHasSOARecord(self.name,
self.zi_rr_data[str(rr_soas[1])])
if rr_soas[0].label != '@':
raise ZoneSOARecordNotAtApex(self.name,
self.zi_rr_data[str(rr_soas[0])])
# Check that apex has at least 1 NS record
rr_nss = [r for r in rrs if r.type_ == RRTYPE_NS
and r.label == '@']
if not rr_nss:
raise ZoneHasNoNSRecord(self.name,
self.zi_rr_data[str(rr_soas[0])])
def put_zi_rr_data(self, key, rr_data):
"""
Store rr_data for later use
"""
self.zi_rr_data[key] = rr_data
def get_auto_ptr_data(self):
"""
Return auto_ptr_data
"""
return self.auto_ptr_data
def handle_auto_ptr_data(self, rr, rr_data):
"""
Handle auto reverse IP functionality.
This is brief to quickly come up with a list of candidates
that can be filtered for netblock reverse zone later on.
"""
# We only look at IP address records
if (rr.type_ != RRTYPE_A
and rr.type_ != RRTYPE_AAAA):
return
# We ignore DELETE update_ops, as algorithm will ignore that
if (rr.update_op and rr.update_op == RROP_DELETE):
return
# Use the dnspython rewritten rdata to make sure that IPv6
# addresses are uniquely written.
hostname = rr.label + '.' + self.name if rr.label != '@' else self.name
# Force reverse is once only, and not saved to DB, track_reverse is
# force reverse all the time
force_reverse = False
if rr_data.get('force_reverse'):
force_reverse = True if rr_data['force_reverse'] else False
if rr_data.get('track_reverse'):
force_reverse = True if rr_data['track_reverse'] else force_reverse
disable = False
if rr_data.get('disable'):
disable = True if rr_data['disable'] else False
zone_ref = self.zone_sm.reference
zone_ref_str = zone_ref.reference if zone_ref else None
self.auto_ptr_data.append({ 'address': rr.rdata,
'disable': disable,
'force_reverse': force_reverse,
'hostname': hostname,
'reference': zone_ref_str})
def check_reference_string(self, ref_str):
"""
Check that the supplied reference string is complete
"""
if not re.match(r'^[\-_a-zA-Z0-9.@]+$', ref_str):
error_msg = "can only contain characters '-_a-zA-Z0-9.@'"
raise ReferenceFormatError(ref_str, error_msg)
if not re.match(r'^[0-9a-zA-Z][\-_a-zA-Z0-9.@]*$', ref_str):
error_msg = "must start with 'a-zA-Z0-9'"
raise ReferenceFormatError(ref_str, error_msg)
if len(ref_str) > 1024:
error_msg = "too long, must be <= 1024."
raise ReferenceFormatError(ref_str, error_msg)
def check_extra_data_privilege(self, rr_data, admin_privilege,
helpdesk_privilege):
"""
Check privilege for use of extra data items to do with auto
reverse IP setting and pyparsing error finformation
"""
if (not admin_privilege):
if (rr_data.get('lock_ptr')):
raise AdminPrivilegeNeeded(self.name, rr_data,
'lock_ptr')
rr_data.pop('lock_ptr', None)
if (not admin_privilege and not helpdesk_privilege):
if rr_data.get('reference'):
raise HelpdeskPrivilegeNeeded(self.name, rr_data,
'reference')
rr_data.pop('reference', None)
def add_comment(self, top_comment, comment=None, tag=None, **kwargs):
"""
Add a new comment or apex_comment
"""
# Don't do anything unless 'comment' is supplied!
if not comment and not top_comment:
return None
db_session = self.db_session
# Deal with Apex comment - special, even set text to default
# if none!
if (top_comment or tag == settings['apex_rr_tag']):
if self.zone_sm.use_apex_ns:
# If Apex done by global config, update routines
# will create an appropriate Apex comment
return None
if not comment:
comment = settings['apex_comment_template'] % self.name
tag = settings['apex_rr_tag']
# Create a new comment
rr_comment = RRComment(comment=comment, tag=tag)
db_session.add(rr_comment)
# Need to flush to get a new id from database
db_session.flush()
if (rr_comment.tag == settings['apex_rr_tag']):
self.apex_comment = rr_comment
return rr_comment.id_
def get_apex_comment(self):
"""
Return Apex Comment
"""
return self.apex_comment
def rr_data_create_comments(self, zi_data, zone_ttl,
creating_real_zi=True):
"""
Common code for creating comments, and creating comment IDs
"""
# Get comment IDs created and established.
rr_group_data = zi_data.get('rr_groups')
for rr_group in rr_group_data:
rr_groups_index = rr_group_data.index(rr_group)
top_comment = creating_real_zi and rr_groups_index == 0
comment_group_id = self.add_comment(top_comment, **rr_group)
rr_group['comment_group_id'] = comment_group_id
for rr_data in rr_group['rrs']:
# get rr_groups_index and rrs_index for error handling
rr_data['rrs_index'] = rr_group['rrs'].index(rr_data)
rr_data['rr_groups_index'] = rr_groups_index
# Handle comment IDs
rr_data['comment_rr_id'] = self.add_comment(False, **rr_data)
rr_data['comment_group_id'] = comment_group_id
# Following needed to initialise dnspython RRs correctly
rr_data['zone_ttl'] = zone_ttl
self.rr_group_data = rr_group_data
zi_data.pop('rr_groups', None)
def add_rrs(self, rrs_func, add_rr_func,
admin_privilege, helpdesk_privilege,
update_group=None):
"""
Add RR to data base
Note use of rrs_func so that list of rrs is always refreshed in
function. Can be supplied by using a no argument lambda function.
This is so that in the case of a full ZI, rrs can be added to it,
which is different to the case of incremental updates, where the
list of RRs is constructed, and the rrs just added directly to the
resource records table.
"""
db_session = self.db_session
for rr_group in self.rr_group_data:
for rr_data in rr_group['rrs']:
# Remove unneeded keys from rr_data
rr_data.pop('comment', None)
rr_data.pop('zone_id', None)
# Check privilege
self.check_extra_data_privilege(rr_data, admin_privilege,
helpdesk_privilege)
rr = data_to_rr(self.name, rr_data)
self.check_rr_consistency(rrs_func(), rr, rr_data, update_group)
# Store rr_data for zi consistency checks
self.put_zi_rr_data(str(rr), rr_data)
# Add rr to SQLAlchemy data structures
db_session.add(rr)
# Sort out RR reference part of the data structure
rr_ref_str = rr_data.get('reference')
if rr_ref_str:
self.check_reference_string(rr_ref_str)
rr_ref = find_reference(db_session, rr_ref_str)
rr.ref_id = rr_ref.id_ if rr_ref else None
rr.reference = rr_ref
# Sort out update_group if given
if update_group:
update_group.update_ops.append(rr)
add_rr_func(rr)
self.handle_auto_ptr_data(rr, rr_data)
class PseudoZi(ZiUpdate):
"""
Dummy ZI class so that ZiUpdate operations can do a trial run, so that
incremental updates can be consistency checked by zi checking code.
"""
def __init__(self, db_session, zi):
# make sure ZiUpdate runs in trial mode
ZiUpdate.__init__(self, db_session=db_session, trial_run=True)
# Copy rrs list so that changes do not trigger SQAlchemy
self.rrs = []
for rr in zi.rrs:
rr_type = sql_types[type(rr).__name__]
new_rr = rr_type(label=rr.label, domain=zi.zone.name,
ttl=rr.ttl, zone_ttl=rr.zone_ttl,
rdata=rr.rdata, lock_ptr=rr.lock_ptr, disable=rr.disable,
track_reverse=rr.track_reverse)
self.rrs.append(new_rr)
def add_rr(self, rr):
"""
Add RR to rrs list
"""
self.rrs.append(rr)
def remove_rr(self, rr):
"""
Remove rr from rrs list
"""
self.rrs.remove(rr)
class ZoneDataUtil(object):
"""
Mix in class for ZoneEngine, containing _data_to_zi and _data_to_incr
functions
"""
def _data_to_zi(self, name, zi_data, change_by, normalize_ttls=False,
admin_privilege=False, helpdesk_privilege=False):
"""
Construct a new ZI, RRS and comments, from zone_data.
"""
def set_missing_zi_data():
"""
Set missing fields in supplied zi_data to prevent problems
"""
# Set ZI Zone ttl if not already set
if 'zone_ttl' not in zi_data:
zi_data['zone_ttl'] = zone_ttl
# Set other SOA values in zi_data from defaults
# if they are not there. soa_ttl can be None
for field in ['soa_mname', 'soa_rname', 'soa_refresh', 'soa_retry',
'soa_expire', 'soa_minimum']:
if not zi_data.get(field):
zi_data[field] = zone_cfg.get_row_exc(db_session, field,
sg=zone_sm.sg)
# We always update serial number on zone udpdate/publish
# but it is nicer and probably less troublesome to replace
# an existing serial number that may be out there
if not zi_data.get('soa_serial'):
if zone_sm.soa_serial:
zi_data['soa_serial'] = zone_sm.soa_serial
else:
# Obviously a new zone
zi_data['soa_serial'] = new_zone_soa_serial(db_session)
def check_zi_data():
"""
Check incoming zi_data attributes for correctness
"""
for field in ['soa_mname', 'soa_rname']:
validate_zi_hostname(name, field, zi_data[field])
for field in ['soa_refresh', 'soa_retry', 'soa_expire',
'soa_minimum', 'soa_ttl', 'zone_ttl']:
if field == 'soa_ttl' and not zi_data.get(field):
# SOA TTL can be None
continue
validate_zi_ttl(name, field, zi_data[field])
for field in ['soa_serial']:
if field == 'soa_serial' and zi_data.get(field, None) == None:
# SOA serial can be None
continue
# Check incoming data type of soa_serial
if not isinstance(zi_data['soa_serial'], int):
raise SOASerialTypeError(name)
if not ( 0 < zi_data['soa_serial'] <= (2**32-1)):
# RFC 2136 Section 4.2 AO serial cannot be zero
raise SOASerialRangeError(name)
# Function start
db_session = self.db_session
# Get zone_sm to get zone ID etc
zone_sm = self._get_zone_sm(name)
zone_id = zone_sm.id_
# initialise data and zone consistency checking
data_tools = DataTools(db_session, zone_sm)
# Sort out a candidate value for zone_ttl so that RRs can be created
zone_ttl = zi_data.get('zone_ttl',
zone_cfg.get_row_exc(db_session, 'zone_ttl', sg=zone_sm.sg))
zone_ttl_supplied = 'zone_ttl' in zi_data
# Create comments, and set up comment IDs, and stuff for handlng
# RR Groups zi_data structures
data_tools.rr_data_create_comments(zi_data, zone_ttl)
# Deal with ZI data problems, and supply defaults if missing
set_missing_zi_data()
check_zi_data()
# This constructor call sets attributes in zi as well!
zi = ZoneInstance(change_by=change_by, **zi_data)
db_session.add(zi)
apex_comment = data_tools.get_apex_comment()
if apex_comment:
zi.add_apex_comment(apex_comment)
# Get zi.id_ zi.zone_id from database
db_session.flush()
# Add RRs to zi
# Note use of lambda so that list of rrs is always refreshed in
# function
data_tools.add_rrs(lambda :zi.rrs, zi.add_rr,
admin_privilege, helpdesk_privilege)
# tie zi into data_structures
zone_sm.all_zis.append(zi)
zi.zone = zone_sm
db_session.flush()
# Normalise TTLs here
if normalize_ttls and zone_ttl_supplied:
zi.normalize_ttls()
# Update SOA and NS records - can't hurt to do it here
# This also cleans out any incoming apex NS records if
# client should not be setting them.
zi.update_apex(db_session)
# Update Zone TTLs for clean initialisation
zi.update_zone_ttls()
db_session.flush()
# Check zone consistency. Do this here as Apex RRs need to be complete.
data_tools.check_zi_consistency(zi.rrs)
return zi, data_tools.get_auto_ptr_data()
def _data_to_update(self, name, update_data, update_type, change_by,
admin_privilege=False, helpdesk_privilege=False):
"""
Construct an update group for a zone, from supplied RRS and comments.
Functional equivalent of _data_to_zi() above, but for incremental
updates
"""
# Function start
db_session = self.db_session
# Check that update_type is supplied
if not update_type:
raise UpdateTypeRequired(name)
# Get zone_sm to get zone ID etc
zone_sm = self._get_zone_sm(name)
zone_id = zone_sm.id_
# See if incremental updates are enabled for zone before queuing any
if not zone_sm.inc_updates:
raise IncrementalUpdatesDisabled(name)
# Don't queue updates for a disabled zone
if zone_sm.is_disabled():
raise ZoneDisabled(name)
# Privilege check for no apex zones - admin only
if not zone_sm.use_apex_ns and not admin_privilege:
raise ZoneAdminPrivilegeNeeded(name)
# Use candidate ZI as it always is available. zi is published zi
zi = self._get_zi(zone_sm.zi_candidate_id)
if not zi:
raise ZiNotFound(name, zone_sm.zi_candidate_id)
# Get value of zone_ttl so that RRs can be created
zone_ttl = zi.zone_ttl
# Create RRs list from published ZI
pzi = PseudoZi(db_session, zi)
# initialise data and zone consistency checking
zi_cname_flag = False
if len([r for r in pzi.rrs if r.type_ == RRTYPE_CNAME]):
zi_cname_flag = True
data_tools = DataTools(db_session, zone_sm, zi_cname_flag)
# Create comments, and set up comment IDs, and stuff for handlng
# RR Groups zi_data structures
data_tools.rr_data_create_comments(update_data, zone_ttl,
creating_real_zi=False)
try:
# Create new update_group
update_group = new_update_group(db_session, update_type,
zone_sm, change_by)
except IntegrityError as exc:
raise UpdateTypeAlreadyQueued(name, update_type)
# Add RRs to DB and operate on Pseudo ZI
data_tools.add_rrs(lambda :pzi.rrs, pzi.trial_op_rr,
admin_privilege, helpdesk_privilege, update_group=update_group)
data_tools.check_zi_consistency(pzi.rrs)
# Get all data out to DB, and ids etc established.
db_session.flush()
# Refresh zone to implement updates
exec_zonesm(zone_sm, ZoneSMDoRefresh)
# Return auto update info
return data_tools.get_auto_ptr_data()
def _queue_auto_ptr_data(self, auto_ptr_data):
"""
Queue auto PTR data as incremental updates against respective reverse
zones.
"""
if not auto_ptr_data:
return
if not len(auto_ptr_data):
return
if not settings['auto_reverse']:
return
db_session = self.db_session
# Create new update_group
ug_dict = {}
auto_ptr_privilege_flag = False
for ptr_data in auto_ptr_data:
# Ignore addresses we don't have reverse zone for
query = db_session.query(ZoneSM)\
.join(ReverseNetwork)\
.filter(":address <<= reverse_networks.network")\
.params(address = ptr_data['address'])
query = ZoneSM.query_is_not_deleted(query)
query = ZoneSM.query_inc_updates(query)
query = query.order_by(ReverseNetwork.network.desc())\
.limit(1)
try:
zone_sm = query.one()
except NoResultFound:
continue
# Ignore invalid host names
if not is_inet_hostname(ptr_data['hostname'], absolute=True,
wildcard=False):
log_error("Hostname '%s' is not a valid hostname."
% ptr_data['hostname'])
continue
# Determine proposed update operation
update_op = RROP_PTR_UPDATE_FORCE if ptr_data['force_reverse'] \
else RROP_PTR_UPDATE
# Execute privilege checks ahead of time to save unnecessary churn
# Better than needlessly going through whole rigamorole of
# incremental update processing later on for no effect
#1 See if old PTR exists to retrieve any RR reference
# Both following also used lower down when generating RR_PTR
label = label_from_address(ptr_data['address'])
rr_ref = find_reference(db_session, ptr_data['reference'],
raise_exc=False)
# query for old record - this generates one select
# Optimization - if check has previously suceeded, don't check
# again as this is all checked further in
if not auto_ptr_privilege_flag:
qlabel= label[:label.rfind(zone_sm.name)-1]
query = db_session.query(ResourceRecord)\
.filter(ResourceRecord.label == qlabel)\
.filter(ResourceRecord.zi_id == zone_sm.zi_candidate_id)\
.filter(ResourceRecord.disable == False)\
.filter(ResourceRecord.type_ == RRTYPE_PTR)
old_rrs = query.all()
old_rr = old_rrs[0] if len(old_rrs) else None
# Check that we can proceed, only if check has not succeded yet
if not check_auto_ptr_privilege(rr_ref, self.sectag, zone_sm,
old_rr):
if old_rr:
log_debug("Zone '%s' - can't replace '%s' PTR"
" as neither"
" sectags '%s' vs '%s'"
" references '%s' vs '%s'/'%s' (old PTR/rev zone)"
"match ,"
" or values not given."
% (zone_sm.name, old_rr.label,
self.sectag.sectag, settings['admin_sectag'],
rr_ref, old_rr.reference, zone_sm.reference))
else:
log_debug("Zone '%s' - can't add '%s' PTR as neither"
" sectags '%s' vs '%s'"
" references '%s' vs '%s' (rev zone) match,"
" or values not given."
% (zone_sm.name, qlabel,
self.sectag.sectag, settings['admin_sectag'],
rr_ref, zone_sm.reference))
continue
auto_ptr_privilege_flag = True
# Create a new update group if zone has not been seen before.
try:
update_group, zone_ttl = ug_dict.get(zone_sm)
except (ValueError, TypeError):
# Obtain reverse zone_ttl so PTR rrs can be created
# Use candidate ZI as it always is available.
# zi is published zi
zi = self._get_zi(zone_sm.zi_candidate_id)
if not zi:
log_error("Zone '%s': does not have candidate zi."
% zone_sm.name)
continue
zone_ttl = zi.zone_ttl
update_group = new_update_group(db_session, None,
zone_sm, None, ptr_only=True,
sectag=self.sectag.sectag)
ug_dict[zone_sm] = (update_group, zone_ttl)
# Allocate RR_PTR update record
rr = RR_PTR(label=label, zone_ttl=zone_ttl,
rdata=ptr_data['hostname'], disable=ptr_data['disable'],
domain=zone_sm.name, update_op=update_op)
rr.ref_id = rr_ref.id_ if rr_ref else None
rr.reference = rr_ref
# Chain on RR_PTR update record
update_group.update_ops.append(rr)
# Flush everything to disk
db_session.flush()
# Issue zone refreshes to implement PTR changes
for zone_sm in ug_dict:
if zone_sm.is_disabled():
continue
exec_zonesm(zone_sm, ZoneSMDoRefresh)
# Make sure everything is committed
db_session.commit()
| gpl-3.0 |
vericoin/vericoin-core | qa/rpc-tests/listtransactions.py | 1 | 4674 | #!/usr/bin/env python2
# Copyright (c) 2014 The VeriCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import VeriCoinTestFramework
from test_framework.util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(VeriCoinTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
if __name__ == '__main__':
ListTransactionsTest().main()
| mit |
OpusVL/odoo-bank-sort-code | bank_sort_code/__init__.py | 1 | 1266 | # -*- coding: utf-8 -*-
##############################################################################
#
# Add Bank Sort Code to banks in Odoo.
# Copyright (C) 2014 OpusVL
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# If you require assistance, support, or further development of this
# software, please contact OpusVL using the details below:
#
# Telephone: +44 (0)1788 298 410
# Email: community@opusvl.com
# Web: http://opusvl.com
#
##############################################################################
from . import res_bank_extension
import res_partner_bank_extension
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fergalbyrne/nupic | src/nupic/data/filters.py | 34 | 4194 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from datetime import datetime, timedelta
class AutoResetFilter(object):
"""Initial implementation of auto-reset is fairly simple. You just give it a
time interval. Like aggregation, we the first time period start with the
time of the first record (t0) and signal a reset at the first record on or after
t0 + interval, t0 + 2 * interval, etc.
We could get much fancier than this, but it is not clear what will be
needed. For example, if you want a reset every day, you might expect the
period to start at midnight. We also don't handle variable-time periods --
month and year.
"""
def __init__(self, interval=None, datetimeField=None):
self.setInterval(interval, datetimeField)
def setInterval(self, interval=None, datetimeField=None):
if interval is not None:
assert isinstance(interval, timedelta)
self.interval = interval
self.datetimeField = datetimeField
self.lastAutoReset = None
def process(self, data):
if self.interval is None:
return True # no more data needed
if self.datetimeField is None:
self._getDatetimeField(data)
date = data[self.datetimeField]
if data['_reset'] != 0:
self.lastAutoReset = date
return True # no more data needed
if self.lastAutoReset is None:
self.lastAutoReset = date
return True
if date >= self.lastAutoReset + self.interval:
# might have skipped several intervals
while date >= self.lastAutoReset + self.interval:
self.lastAutoReset += self.interval
data['_reset'] = 1
return True # no more data needed
elif date < self.lastAutoReset:
# sequence went back in time!
self.lastAutoReset = date
return True
def _getDatetimeField(self, data):
datetimeField = None
assert isinstance(data, dict)
for (name, value) in data.items():
if isinstance(value, datetime):
datetimeField = name
break
if datetimeField is None:
raise RuntimeError("Autoreset requested for the data but there is no date field")
self.datetimeField = datetimeField
def getShortName(self):
if interval is not None:
s = "autoreset_%d_%d" % (interval.days, interval.seconds)
else:
s = "autoreset_none"
return s
class DeltaFilter(object):
def __init__(self, origField, deltaField):
"""Add a delta field to the data.
"""
self.origField = origField
self.deltaField = deltaField
self.previousValue = None
self.rememberReset = False
def process(self, data):
val = data[self.origField]
if self.previousValue is None or data['_reset']:
self.previousValue = val
self.rememberReset = data['_reset']
return False
# We have a delta
delta = val - self.previousValue
self.previousValue = val
if isinstance(delta, timedelta):
data[self.deltaField] = float(delta.days * 24 * 3600) + \
float(delta.seconds) + float(delta.microseconds) * 1.0e-6
else:
data[self.deltaField] = float(delta)
if self.rememberReset:
data['_reset'] = 1
self.rememberReset = False
return True
def getShortName(self):
return "delta_%s" % self.origField
| agpl-3.0 |
gartung/dxr | tests/test_markup/test_markup.py | 9 | 1888 | """Tests for emission of high-level markup, as by templates"""
from dxr.testing import DxrInstanceTestCase
from nose.tools import eq_, ok_
class MarkupTests(DxrInstanceTestCase):
def test_autofocus_root(self):
"""Autofocus the query field at the root of each tree but not
elsewhere."""
markup = self.source_page('')
ok_('<input type="text" name="q" autofocus' in markup)
response = self.client().get('/code/source/%26folder%26')
eq_(response.status_code, 200)
ok_('<input type="text" name="q" autofocus' not in response.data)
def test_folder_name_escaping(self):
"""Make sure folder names are HTML-escaped."""
markup = self.source_page('')
ok_('&folder&' not in markup)
ok_('&folder&' in markup)
def test_body_escaping(self):
"""Make sure source code is HTML-escaped."""
markup = self.source_page('%26folder%26/README.mkd')
ok_('<stuff>' not in markup)
ok_('& things' not in markup)
ok_('<stuff>' in markup)
ok_('& things' in markup)
def test_folder_links(self):
"""Make sure folders link to the right places, not just to their first
chars."""
markup = self.source_page('')
ok_('<a href="/code/source/%26folder%26" class="icon folder">&folder&</a>'
in markup)
def test_file_links(self):
"""Make sure files link to the right places."""
markup = self.source_page('%26folder%26')
ok_('<a href="/code/source/%26folder%26/README.mkd" class="icon unknown">README.mkd</a>'
in markup)
def test_analytics_snippet_empty(self):
"""Make sure google analytics snippet doesn't show up
in when the key isn't configured"""
markup = self.source_page('')
ok_('.google-analytics.com' not in markup)
| mit |
kienpham2000/ansible-modules-core | cloud/rax_keypair.py | 17 | 4918 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_keypair
short_description: Create a keypair for use with Rackspace Cloud Servers
description:
- Create a keypair for use with Rackspace Cloud Servers
version_added: 1.5
options:
name:
description:
- Name of keypair
required: true
public_key:
description:
- Public Key string to upload. Can be a file path or string
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: Matt Martz
notes:
- Keypairs cannot be manipulated, only created and deleted. To "update" a
keypair you must first delete and then recreate.
- The ability to specify a file path for the public key was added in 1.7
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
region: DFW
register: keypair
- name: Create local public key
local_action:
module: copy
content: "{{ keypair.keypair.public_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
- name: Create local private key
local_action:
module: copy
content: "{{ keypair.keypair.private_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
region: DFW
register: keypair
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_keypair(module, name, public_key, state):
changed = False
cs = pyrax.cloudservers
if cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
keypair = {}
if state == 'present':
if os.path.isfile(public_key):
try:
f = open(public_key)
public_key = f.read()
f.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % public_key)
try:
keypair = cs.keypairs.find(name=name)
except cs.exceptions.NotFound:
try:
keypair = cs.keypairs.create(name, public_key)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
keypair = cs.keypairs.find(name=name)
except:
pass
if keypair:
try:
keypair.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(),
public_key=dict(),
state=dict(default='present', choices=['absent', 'present']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
public_key = module.params.get('public_key')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_keypair(module, name, public_key, state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
| gpl-3.0 |
tboyce021/home-assistant | homeassistant/components/meteo_france/const.py | 1 | 6603 | """Meteo-France component constants."""
from homeassistant.components.weather import (
ATTR_CONDITION_CLEAR_NIGHT,
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_EXCEPTIONAL,
ATTR_CONDITION_FOG,
ATTR_CONDITION_HAIL,
ATTR_CONDITION_LIGHTNING,
ATTR_CONDITION_LIGHTNING_RAINY,
ATTR_CONDITION_PARTLYCLOUDY,
ATTR_CONDITION_POURING,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SNOWY_RAINY,
ATTR_CONDITION_SUNNY,
ATTR_CONDITION_WINDY,
ATTR_CONDITION_WINDY_VARIANT,
)
from homeassistant.const import (
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
LENGTH_MILLIMETERS,
PERCENTAGE,
PRESSURE_HPA,
SPEED_KILOMETERS_PER_HOUR,
TEMP_CELSIUS,
UV_INDEX,
)
DOMAIN = "meteo_france"
PLATFORMS = ["sensor", "weather"]
COORDINATOR_FORECAST = "coordinator_forecast"
COORDINATOR_RAIN = "coordinator_rain"
COORDINATOR_ALERT = "coordinator_alert"
UNDO_UPDATE_LISTENER = "undo_update_listener"
ATTRIBUTION = "Data provided by Météo-France"
CONF_CITY = "city"
FORECAST_MODE_HOURLY = "hourly"
FORECAST_MODE_DAILY = "daily"
FORECAST_MODE = [FORECAST_MODE_HOURLY, FORECAST_MODE_DAILY]
ATTR_NEXT_RAIN_1_HOUR_FORECAST = "1_hour_forecast"
ATTR_NEXT_RAIN_DT_REF = "forecast_time_ref"
ENTITY_NAME = "name"
ENTITY_UNIT = "unit"
ENTITY_ICON = "icon"
ENTITY_DEVICE_CLASS = "device_class"
ENTITY_ENABLE = "enable"
ENTITY_API_DATA_PATH = "data_path"
SENSOR_TYPES = {
"pressure": {
ENTITY_NAME: "Pressure",
ENTITY_UNIT: PRESSURE_HPA,
ENTITY_ICON: None,
ENTITY_DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
ENTITY_ENABLE: False,
ENTITY_API_DATA_PATH: "current_forecast:sea_level",
},
"rain_chance": {
ENTITY_NAME: "Rain chance",
ENTITY_UNIT: PERCENTAGE,
ENTITY_ICON: "mdi:weather-rainy",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "probability_forecast:rain:3h",
},
"snow_chance": {
ENTITY_NAME: "Snow chance",
ENTITY_UNIT: PERCENTAGE,
ENTITY_ICON: "mdi:weather-snowy",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "probability_forecast:snow:3h",
},
"freeze_chance": {
ENTITY_NAME: "Freeze chance",
ENTITY_UNIT: PERCENTAGE,
ENTITY_ICON: "mdi:snowflake",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "probability_forecast:freezing",
},
"wind_gust": {
ENTITY_NAME: "Wind gust",
ENTITY_UNIT: SPEED_KILOMETERS_PER_HOUR,
ENTITY_ICON: "mdi:weather-windy-variant",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: False,
ENTITY_API_DATA_PATH: "current_forecast:wind:gust",
},
"wind_speed": {
ENTITY_NAME: "Wind speed",
ENTITY_UNIT: SPEED_KILOMETERS_PER_HOUR,
ENTITY_ICON: "mdi:weather-windy",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: False,
ENTITY_API_DATA_PATH: "current_forecast:wind:speed",
},
"next_rain": {
ENTITY_NAME: "Next rain",
ENTITY_UNIT: None,
ENTITY_ICON: None,
ENTITY_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: None,
},
"temperature": {
ENTITY_NAME: "Temperature",
ENTITY_UNIT: TEMP_CELSIUS,
ENTITY_ICON: None,
ENTITY_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ENTITY_ENABLE: False,
ENTITY_API_DATA_PATH: "current_forecast:T:value",
},
"uv": {
ENTITY_NAME: "UV",
ENTITY_UNIT: UV_INDEX,
ENTITY_ICON: "mdi:sunglasses",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "today_forecast:uv",
},
"weather_alert": {
ENTITY_NAME: "Weather alert",
ENTITY_UNIT: None,
ENTITY_ICON: "mdi:weather-cloudy-alert",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: None,
},
"precipitation": {
ENTITY_NAME: "Daily precipitation",
ENTITY_UNIT: LENGTH_MILLIMETERS,
ENTITY_ICON: "mdi:cup-water",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "today_forecast:precipitation:24h",
},
"cloud": {
ENTITY_NAME: "Cloud cover",
ENTITY_UNIT: PERCENTAGE,
ENTITY_ICON: "mdi:weather-partly-cloudy",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "current_forecast:clouds",
},
"original_condition": {
ENTITY_NAME: "Original condition",
ENTITY_UNIT: None,
ENTITY_ICON: None,
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: False,
ENTITY_API_DATA_PATH: "current_forecast:weather:desc",
},
"daily_original_condition": {
ENTITY_NAME: "Daily original condition",
ENTITY_UNIT: None,
ENTITY_ICON: None,
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: False,
ENTITY_API_DATA_PATH: "today_forecast:weather12H:desc",
},
}
CONDITION_CLASSES = {
ATTR_CONDITION_CLEAR_NIGHT: ["Nuit Claire", "Nuit claire"],
ATTR_CONDITION_CLOUDY: ["Très nuageux", "Couvert"],
ATTR_CONDITION_FOG: [
"Brume ou bancs de brouillard",
"Brume",
"Brouillard",
"Brouillard givrant",
"Bancs de Brouillard",
],
ATTR_CONDITION_HAIL: ["Risque de grêle", "Risque de grèle"],
ATTR_CONDITION_LIGHTNING: ["Risque d'orages", "Orages"],
ATTR_CONDITION_LIGHTNING_RAINY: [
"Pluie orageuses",
"Pluies orageuses",
"Averses orageuses",
],
ATTR_CONDITION_PARTLYCLOUDY: [
"Ciel voilé",
"Ciel voilé nuit",
"Éclaircies",
"Eclaircies",
"Peu nuageux",
],
ATTR_CONDITION_POURING: ["Pluie forte"],
ATTR_CONDITION_RAINY: [
"Bruine / Pluie faible",
"Bruine",
"Pluie faible",
"Pluies éparses / Rares averses",
"Pluies éparses",
"Rares averses",
"Pluie modérée",
"Pluie / Averses",
"Averses",
"Pluie",
],
ATTR_CONDITION_SNOWY: [
"Neige / Averses de neige",
"Neige",
"Averses de neige",
"Neige forte",
"Quelques flocons",
],
ATTR_CONDITION_SNOWY_RAINY: ["Pluie et neige", "Pluie verglaçante"],
ATTR_CONDITION_SUNNY: ["Ensoleillé"],
ATTR_CONDITION_WINDY: [],
ATTR_CONDITION_WINDY_VARIANT: [],
ATTR_CONDITION_EXCEPTIONAL: [],
}
| apache-2.0 |
manics/openmicroscopy | examples/ScreenPlateWell/imagesperwell.py | 15 | 1689 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import omero
from omero.rtypes import rint
from omero_sys_ParametersI import ParametersI # Temporary
c = omero.client()
s = c.createSession()
q = s.getQueryService()
LOAD_WELLS = (
"select w from Well w join fetch w.wellSamples ws"
" join fetch ws.image i join fetch i.pixels p where w.plate.id = :id")
filter = omero.sys.Filter()
filter.limit = rint(10)
filter.offset = rint(0)
plates = q.findAll("Plate", filter)
if len(plates) == 0:
print "No plates"
sys.exit(0)
else:
import random
example_plate = random.choice(plates)
print "Loading wells for Plate %s (%s)" % (
example_plate.getId().getValue(), example_plate.getName().getValue())
# An example of true paging
filter.limit = rint(12)
params = ParametersI()
params.addId(example_plate.getId().getValue())
params.theFilter = filter
offset = 0
while True:
wells = q.findAllByQuery(LOAD_WELLS, params)
if len(wells) == 0:
break
else:
offset += len(wells)
params.theFilter.offset = rint(offset)
for well in wells:
id = well.getId().getValue()
row = well.getRow().getValue()
col = well.getColumn().getValue()
images = []
planes = 0
for ws in well.copyWellSamples():
img = ws.getImage()
pix = img.getPixels(0)
sizeC = pix.sizeC.val
sizeT = pix.sizeT.val
sizeZ = pix.sizeZ.val
images.append(img.getId().getValue())
planes += sizeZ*sizeT*sizeC
print ("Well %s (%2sx%2s) contains the images: %s with %s planes"
% (id, row, col, images, planes))
| gpl-2.0 |
dataxu/ansible | lib/ansible/parsing/metadata.py | 117 | 10058 | # (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
import yaml
from ansible.module_utils._text import to_text
# There are currently defaults for all metadata fields so we can add it
# automatically if a file doesn't specify it
DEFAULT_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
class ParseError(Exception):
"""Thrown when parsing a file fails"""
pass
def _seek_end_of_dict(module_data, start_line, start_col, next_node_line, next_node_col):
"""Look for the end of a dict in a set of lines
We know the starting position of the dict and we know the start of the
next code node but in between there may be multiple newlines and comments.
There may also be multiple python statements on the same line (separated
by semicolons)
Examples::
ANSIBLE_METADATA = {[..]}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} # Optional comments with confusing junk => {}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {
[..]
}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} ; DOCUMENTATION = [..]
ANSIBLE_METADATA = {}EOF
"""
if next_node_line is None:
# The dict is the last statement in the file
snippet = module_data.splitlines()[start_line:]
next_node_col = 0
# Include the last line in the file
last_line_offset = 0
else:
# It's somewhere in the middle so we need to separate it from the rest
snippet = module_data.splitlines()[start_line:next_node_line]
# Do not include the last line because that's where the next node
# starts
last_line_offset = 1
if next_node_col == 0:
# This handles all variants where there are only comments and blank
# lines between the dict and the next code node
# Step backwards through all the lines in the snippet
for line_idx, line in tuple(reversed(tuple(enumerate(snippet))))[last_line_offset:]:
end_col = None
# Step backwards through all the characters in the line
for col_idx, char in reversed(tuple(enumerate(c for c in line))):
if not isinstance(char, bytes):
# Python3 wart. slicing a byte string yields integers
char = bytes((char,))
if char == b'}' and end_col is None:
# Potentially found the end of the dict
end_col = col_idx
elif char == b'#' and end_col is not None:
# The previous '}' was part of a comment. Keep trying
end_col = None
if end_col is not None:
# Found the end!
end_line = start_line + line_idx
break
else:
raise ParseError('Unable to find the end of dictionary')
else:
# Harder cases involving multiple statements on one line
# Good Ansible Module style doesn't do this so we're just going to
# treat this as an error for now:
raise ParseError('Multiple statements per line confuses the module metadata parser.')
return end_line, end_col
def _seek_end_of_string(module_data, start_line, start_col, next_node_line, next_node_col):
"""
This is much trickier than finding the end of a dict. A dict has only one
ending character, "}". Strings have four potential ending characters. We
have to parse the beginning of the string to determine what the ending
character will be.
Examples:
ANSIBLE_METADATA = '''[..]''' # Optional comment with confusing chars '''
# Optional comment with confusing chars '''
DOCUMENTATION = [..]
ANSIBLE_METADATA = '''
[..]
'''
DOCUMENTATIONS = [..]
ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
SHORT_NAME = ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
String marker variants:
* '[..]'
* "[..]"
* '''[..]'''
* \"\"\"[..]\"\"\"
Each of these come in u, r, and b variants:
* '[..]'
* u'[..]'
* b'[..]'
* r'[..]'
* ur'[..]'
* ru'[..]'
* br'[..]'
* b'[..]'
* rb'[..]'
"""
raise NotImplementedError('Finding end of string not yet implemented')
def extract_metadata(module_ast=None, module_data=None, offsets=False):
"""Extract the metadata from a module
:kwarg module_ast: ast representation of the module. At least one of this
or ``module_data`` must be given. If the code calling
:func:`extract_metadata` has already parsed the module_data into an ast,
giving the ast here will save reparsing it.
:kwarg module_data: Byte string containing a module's code. At least one
of this or ``module_ast`` must be given.
:kwarg offsets: If set to True, offests into the source code will be
returned. This requires that ``module_data`` be set.
:returns: a tuple of metadata (a dict), line the metadata starts on,
column the metadata starts on, line the metadata ends on, column the
metadata ends on, and the names the metadata is assigned to. One of
the names the metadata is assigned to will be ANSIBLE_METADATA. If no
metadata is found, the tuple will be (None, -1, -1, -1, -1, None).
If ``offsets`` is False then the tuple will consist of
(metadata, -1, -1, -1, -1, None).
:raises ansible.parsing.metadata.ParseError: if ``module_data`` does not parse
:raises SyntaxError: if ``module_data`` is needed but does not parse correctly
"""
if offsets and module_data is None:
raise TypeError('If offsets is True then module_data must also be given')
if module_ast is None and module_data is None:
raise TypeError('One of module_ast or module_data must be given')
metadata = None
start_line = -1
start_col = -1
end_line = -1
end_col = -1
targets = None
if module_ast is None:
module_ast = ast.parse(module_data)
for root_idx, child in reversed(list(enumerate(module_ast.body))):
if isinstance(child, ast.Assign):
for target in child.targets:
if isinstance(target, ast.Name) and target.id == 'ANSIBLE_METADATA':
metadata = ast.literal_eval(child.value)
if not offsets:
continue
try:
# Determine where the next node starts
next_node = module_ast.body[root_idx + 1]
next_lineno = next_node.lineno
next_col_offset = next_node.col_offset
except IndexError:
# Metadata is defined in the last node of the file
next_lineno = None
next_col_offset = None
if isinstance(child.value, ast.Dict):
# Determine where the current metadata ends
end_line, end_col = _seek_end_of_dict(module_data,
child.lineno - 1,
child.col_offset,
next_lineno,
next_col_offset)
elif isinstance(child.value, ast.Str):
metadata = yaml.safe_load(child.value.s)
end_line, end_col = _seek_end_of_string(module_data,
child.lineno - 1,
child.col_offset,
next_lineno,
next_col_offset)
elif isinstance(child.value, ast.Bytes):
metadata = yaml.safe_load(to_text(child.value.s, errors='surrogate_or_strict'))
end_line, end_col = _seek_end_of_string(module_data,
child.lineno - 1,
child.col_offset,
next_lineno,
next_col_offset)
else:
raise ParseError('Ansible plugin metadata must be a dict')
# Do these after the if-else so we don't pollute them in
# case this was a false positive
start_line = child.lineno - 1
start_col = child.col_offset
targets = [t.id for t in child.targets]
break
if metadata is not None:
# Once we've found the metadata we're done
break
return metadata, start_line, start_col, end_line, end_col, targets
| gpl-3.0 |
EntityFXCode/arsenalsuite | python/pythondotnet/pythonnet/src/tests/test_enum.py | 10 | 5319 | # ===========================================================================
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
# ===========================================================================
import sys, os, string, unittest, types
from System import DayOfWeek
from Python import Test
class EnumTests(unittest.TestCase):
"""Test CLR enum support."""
def testEnumStandardAttrs(self):
"""Test standard enum attributes."""
self.failUnless(DayOfWeek.__name__ == 'DayOfWeek')
self.failUnless(DayOfWeek.__module__ == 'System')
self.failUnless(type(DayOfWeek.__dict__) == types.DictProxyType)
self.failUnless(DayOfWeek.__doc__ == '')
def testEnumGetMember(self):
"""Test access to enum members."""
self.failUnless(DayOfWeek.Sunday == 0)
self.failUnless(DayOfWeek.Monday == 1)
self.failUnless(DayOfWeek.Tuesday == 2)
self.failUnless(DayOfWeek.Wednesday == 3)
self.failUnless(DayOfWeek.Thursday == 4)
self.failUnless(DayOfWeek.Friday == 5)
self.failUnless(DayOfWeek.Saturday == 6)
def testByteEnum(self):
"""Test byte enum."""
self.failUnless(Test.ByteEnum.Zero == 0)
self.failUnless(Test.ByteEnum.One == 1)
self.failUnless(Test.ByteEnum.Two == 2)
def testSByteEnum(self):
"""Test sbyte enum."""
self.failUnless(Test.SByteEnum.Zero == 0)
self.failUnless(Test.SByteEnum.One == 1)
self.failUnless(Test.SByteEnum.Two == 2)
def testShortEnum(self):
"""Test short enum."""
self.failUnless(Test.ShortEnum.Zero == 0)
self.failUnless(Test.ShortEnum.One == 1)
self.failUnless(Test.ShortEnum.Two == 2)
def testUShortEnum(self):
"""Test ushort enum."""
self.failUnless(Test.UShortEnum.Zero == 0)
self.failUnless(Test.UShortEnum.One == 1)
self.failUnless(Test.UShortEnum.Two == 2)
def testIntEnum(self):
"""Test int enum."""
self.failUnless(Test.IntEnum.Zero == 0)
self.failUnless(Test.IntEnum.One == 1)
self.failUnless(Test.IntEnum.Two == 2)
def testUIntEnum(self):
"""Test uint enum."""
self.failUnless(Test.UIntEnum.Zero == 0L)
self.failUnless(Test.UIntEnum.One == 1L)
self.failUnless(Test.UIntEnum.Two == 2L)
def testLongEnum(self):
"""Test long enum."""
self.failUnless(Test.LongEnum.Zero == 0L)
self.failUnless(Test.LongEnum.One == 1L)
self.failUnless(Test.LongEnum.Two == 2L)
def testULongEnum(self):
"""Test ulong enum."""
self.failUnless(Test.ULongEnum.Zero == 0L)
self.failUnless(Test.ULongEnum.One == 1L)
self.failUnless(Test.ULongEnum.Two == 2L)
def testInstantiateEnumFails(self):
"""Test that instantiation of an enum class fails."""
def test():
ob = DayOfWeek()
self.failUnlessRaises(TypeError, test)
def testSubclassEnumFails(self):
"""Test that subclassing of an enumeration fails."""
def test():
class Boom(DayOfWeek):
pass
self.failUnlessRaises(TypeError, test)
def testEnumSetMemberFails(self):
"""Test that setattr operations on enumerations fail."""
def test():
DayOfWeek.Sunday = 13
self.failUnlessRaises(TypeError, test)
def test():
del DayOfWeek.Sunday
self.failUnlessRaises(TypeError, test)
def testEnumWithFlagsAttrConversion(self):
"""Test enumeration conversion with FlagsAttribute set."""
from System.Windows.Forms import Label
# This works because the AnchorStyles enum has FlagsAttribute.
label = Label()
label.Anchor = 99
# This should fail because our test enum doesn't have it.
def test():
Test.FieldTest().EnumField = 99
self.failUnlessRaises(ValueError, test)
def testEnumConversion(self):
"""Test enumeration conversion."""
object = Test.FieldTest()
self.failUnless(object.EnumField == 0)
object.EnumField = Test.ShortEnum.One
self.failUnless(object.EnumField == 1)
def test():
Test.FieldTest().EnumField = 20
self.failUnlessRaises(ValueError, test)
def test():
Test.FieldTest().EnumField = 100000
self.failUnlessRaises(OverflowError, test)
def test():
Test.FieldTest().EnumField = "str"
self.failUnlessRaises(TypeError, test)
def test_suite():
return unittest.makeSuite(EnumTests)
def main():
unittest.TextTestRunner().run(test_suite())
if __name__ == '__main__':
main()
| gpl-2.0 |
barometz/shirk | plugs/Auth/auth.py | 1 | 3986 | # Copyright (c) 2012 Dominic van Berkel
# See LICENSE for details.
from plugs import plugbase
from util import Event
class AuthPlug(plugbase.Plug):
"""Auth plug. Handles auth stuffs."""
name = 'Auth'
# manual_auths is a dict of source:target that are created after !auth requests so they can be
# responded to appropriately.
manual_auths = dict()
def load(self, startingup=True):
"""Force reloading the userlist in case the plug is reloaded"""
if not startingup:
for nick, user in self.users.users_by_nick.iteritems():
self.handle_usercreated(user)
@plugbase.event
def handle_usercreated(self, user):
"""A user has joined a channel, so let's give them perms."""
user.power = 0
user.auth_method = ''
found = False
if user.hostmask in self.hosts_auth:
found = True
self.powerup(user, self.hosts_auth[user.hostmask], 'hostmask', user.hostmask)
for nick in self.known_nicks:
if user.nickname.lower().startswith(nick):
found = True
self.core.sendLine('WHOIS %s' % (user.nickname,))
break
if not found and user.nickname in self.manual_auths:
# !auth attempt from unknown user
self.log.info('Failed authentication attempt by %s - nickname not found in auth config.' % (user.nickname,))
self.respond(user.nickname, self.manual_auths[user.nickname],
"%s is not in the auth file. This incident will be reported." % user.nickname)
del self.manual_auths[user.nickname]
@plugbase.event
def handle_userrenamed(self, user, oldnick):
"""A user has changed their nickname, let's recheck auth"""
for nick in self.known_nicks:
if user.nickname.lower().startswith(nick):
self.core.sendLine('WHOIS %s' % (user.nickname,))
break
def powerup(self, user, power, auth_method, auth_match):
"""Set user's power, log and act on `self.manual_auths` if necessary.
:param user: The User instance that is being powered up
:param power: The power (int) the user should have
:param auth_method: The method user to authenticate
:param auth_match: The matched value (e.g. the hostmask or NS account)
"""
user.power = power
user.auth_method = auth_method
if user.nickname in self.manual_auths:
self.respond(user.nickname, self.manual_auths[user.nickname], "Successfully authenticated %s"
% user.nickname)
del self.manual_auths[user.nickname]
self.log.info('Power of %s set to %d based on %s: %s'
% (user.nickname, user.power, auth_method, auth_match))
@plugbase.raw('330')
def handle_loggedinas(self, command, prefix, params):
"""Act on Freenode's 'Logged in as:' response in the WHOIS reply."""
nickname = params[1]
account = params[2]
if account in self.users_auth:
user = self.users.by_nick(nickname)
self.powerup(user, self.users_auth[account], 'NickServ', account)
@plugbase.command()
def cmd_auth(self, source, target, argv):
"""!auth handler to trigger authentication when that didn't happen right at join."""
user = self.users.by_nick(source)
if user is not None:
self.manual_auths[source] = target
self.handle_usercreated(user)
@plugbase.command()
def cmd_whoami(self, source, target, argv):
"""Tell the user what their power is and why."""
user = self.users.by_nick(source)
if user is None or user.power == 0:
self.respond(source, target, '%s: You are powerless.' % source)
else:
self.respond(source, target, '%s: You are authenticated (%s) and have power %d'
% (source, user.auth_method, user.power)) | mit |
abhishekgahlot/or-tools | examples/python/volsay2.py | 34 | 2029 | # Copyright 2011 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Volsay problem in Google or-tools.
From the OPL model volsay.mod
Using arrays.
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.linear_solver import pywraplp
def main(unused_argv):
# Create the solver.
# using GLPK
solver = pywraplp.Solver('CoinsGridGLPK',
pywraplp.Solver.GLPK_LINEAR_PROGRAMMING)
# Using CLP
# solver = pywraplp.Solver('CoinsGridCLP',
# pywraplp.Solver.CLP_LINEAR_PROGRAMMING)
# data
num_products = 2
Gas = 0
Chloride = 1
products = ['Gas', 'Chloride']
# declare variables
production = [solver.NumVar(0, 100000, 'production[%i]' % i)
for i in range(num_products)]
#
# constraints
#
solver.Add(production[Gas] + production[Chloride] <= 50)
solver.Add(3 * production[Gas] + 4 * production[Chloride] <= 180)
# objective
objective = solver.Maximize(40 * production[Gas] + 50 * production[Chloride])
print 'NumConstraints:', solver.NumConstraints()
#
# solution and search
#
solver.Solve()
print
print 'objective = ', solver.Objective().Value()
for i in range(num_products):
print products[i], '=', production[i].SolutionValue(),
print 'ReducedCost = ', production[i].ReducedCost()
if __name__ == '__main__':
main('Volsay')
| apache-2.0 |
uberdugo/mlia | Ch05/EXTRAS/plot2D.py | 7 | 1276 | '''
Created on Oct 6, 2010
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import logRegres
dataMat,labelMat=logRegres.loadDataSet()
dataArr = array(dataMat)
weights = logRegres.stocGradAscent0(dataArr,labelMat)
n = shape(dataArr)[0] #number of points to create
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
markers =[]
colors =[]
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.scatter(xcord,ycord, c=colors, s=markers)
type1 = ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
type2 = ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
#weights = [-2.9, 0.72, 1.29]
#weights = [-5, 1.09, 1.42]
weights = [13.03822793, 1.32877317, -1.96702074]
weights = [4.12, 0.48, -0.6168]
y = (-weights[0]-weights[1]*x)/weights[2]
type3 = ax.plot(x, y)
#ax.legend([type1, type2, type3], ["Did Not Like", "Liked in Small Doses", "Liked in Large Doses"], loc=2)
#ax.axis([-5000,100000,-2,25])
plt.xlabel('X1')
plt.ylabel('X2')
plt.show() | gpl-3.0 |
pbsds/hatena-server | database/__init__.py | 1 | 5487 | from twisted.internet import reactor
import atexit, os
from Hatenatools import TMB
#The database handling flipnote files and info
#this one stores them in plaintext, only works with one server worker
class Database:
def __init__(self):
#read database stuff into memory:
if os.path.exists("database/new_flipnotes.dat"):
f = open("database/new_flipnotes.dat", "rb")#contains the newest 5000 flipnotes
file = f.read()
f.close()
else:
file = ""
if file:
self.Newest = [tuple(i.split("\t")) for i in file.split("\n")]#[i] = [creatorID, filename]
else:
self.Newest = []
self.Creator = {}#to store creator info updates before writing to disk. Creator[id][n] = [filename, views, stars, green stars, red stars, blue stars, purple stars, Channel, Downloads]
#to check if an update is neccesary(caching):
self.new = False#True when new flipnotes has been uploaded
self.Views = 0
self.Stars = 0
self.Downloads = 0
#schtuff
reactor.callLater(60*3, self.flusher)
atexit.register(self.write)
def flusher(self):#Automatically flushes the files every 3 minutes and trims down memory usage
reactor.callLater(60*3, self.flusher)
self.write()
def write(self):
if self.new:
#trim newest:
if len(self.Newest) > 5000:
self.Newest = self.Newest[:5000]
#write to file:
f = open("database/new_flipnotes.dat", "wb")
f.write("\n".join(("\t".join(i) for i in self.Newest)))
f.close()
self.new = False
#write creator changes to file:
for ID in self.Creator.keys():
f = open("database/Creators/%s/flipnotes.dat" % ID, "wb")
f.write("\n".join(("\t".join(map(str, i)) for i in self.Creator[ID])))
f.close()
del self.Creator[ID]
#interface:
def CreatorExists(self, CreatorID):
return os.path.exists("database/Creators/" + CreatorID) or (CreatorID in self.Creator)
def FlipnoteExists(self, CreatorID, filename):
return os.path.exists(self.FlipnotePath(CreatorID, filename))
def GetCreator(self, CreatorID, Store=False):#Returns a list of all the self.GetFlipnote(). "Store" holds it in memory for a while, use this when making changes or reading it often
if CreatorID in self.Creator:
return self.Creator[CreatorID]
else:
if not os.path.exists("database/Creators/" + CreatorID):
return None
f = open("database/Creators/%s/flipnotes.dat" % CreatorID, "rb")
ret = [i.split("\t") for i in f.read().split("\n")]
f.close()
#update to newer format:
#current format = [filename, views, stars, green stars, red stars, blue stars, purple stars, Channel, Downloads]
for i in xrange(len(ret)):
if len(ret[i]) < 9:
filename = ret[i][0]#take this as a give for now
for n, default in enumerate((filename, 0, 0, 0, 0, 0, 0, "", 0)):
if len(ret[i]) <= n:
ret[i].append(default)
if Store:
self.Creator[CreatorID] = ret
return ret
def GetFlipnote(self, CreatorID, filename, Store=False):#returns: [filename, views, stars, green stars, red stars, blue stars, purple stars, Channel, Downloads]
for i in (self.GetCreator(CreatorID, Store) or []):
if i[0] == filename:
return i
return False
def GetFlipnotePPM(self, CreatorID, filename):#the ppm binary data
f = open(self.FlipnotePath(CreatorID, filename), "rb")
ret = f.read()
f.close()
return ret
def GetFlipnoteTMB(self, CreatorID, filename):#the tmb binary data
f = open(self.FlipnotePath(CreatorID, filename), "rb")
ret = f.read(0x6a0)
f.close()
return ret
def AddFlipnote(self, content, Channel=""):#content = ppm binary data
tmb = TMB().Read(content)
if not tmb:
return False
#CreatorID = tmb.Username
CreatorID = tmb.EditorAuthorID
filename = tmb.CurrentFilename[:-4]
del tmb
if self.FlipnoteExists(CreatorID, filename):#already exists
return False
#add to database:
self.new = True
self.Newest.insert(0, (CreatorID, filename))
if not self.GetCreator(CreatorID, True):
self.Creator[CreatorID] = [[filename, 0, 0, 0, 0, 0, 0, Channel, 0]]
else:
self.Creator[CreatorID].append([filename, 0, 0, 0, 0, 0, 0, Channel, 0])
#write flipnote to file:
if not os.path.isdir("database/Creators/" + CreatorID):
os.mkdir("database/Creators/" + CreatorID)
f = open(self.FlipnotePath(CreatorID, filename), "wb")
f.write(content)
f.close()
return CreatorID, filename
def AddView(self, CreatorID, filename):
for i, flipnote in enumerate(self.GetCreator(CreatorID, True) or []):
if flipnote[0] == filename:
self.Creator[CreatorID][i][1] = int(flipnote[1]) + 1
self.Views += 1
return True
return False
def AddStar(self, CreatorID, filename, amount=1, color='yellow'):
starindices = {
'yellow': 2,
'green': 3,
'red': 4,
'blue': 5,
'purple': 6
}
for i, flipnote in enumerate(self.GetCreator(CreatorID, True) or []):
if flipnote[0] == filename:
self.Creator[CreatorID][i][starindices[color]] = int(flipnote[starindices[color]]) + amount
self.Stars += 1
return True
return False
def AddDownload(self, CreatorID, filename):
for i, flipnote in enumerate(self.GetCreator(CreatorID, True) or []):
if flipnote[0] == filename:
self.Creator[CreatorID][i][8] = int(flipnote[8]) + 1
self.Downloads += 1
return True
return False
#internal helpers:
def FlipnotePath(self, CreatorID, filename):#use self.GetFlipnotePPM() instead
return "database/Creators/%s/%s.ppm" % (CreatorID, filename)
Database = Database()#is loaded, yesth!
| agpl-3.0 |
dave-shawley/ietfparse | tests/test_datastructure.py | 1 | 3585 | import unittest
from ietfparse.datastructures import ContentType
class ContentTypeCreationTests(unittest.TestCase):
def test_that_primary_type_is_normalized(self):
self.assertEqual('contenttype',
ContentType('COntentType', 'b').content_type)
def test_that_subtype_is_normalized(self):
self.assertEqual('subtype',
ContentType('a', ' SubType ').content_subtype)
def test_that_content_suffix_is_normalized(self):
self.assertEqual(
'json',
ContentType('a', 'b', content_suffix=' JSON').content_suffix)
def test_that_parameter_names_are_casefolded(self):
self.assertDictEqual({'key': 'Value'},
ContentType('a', 'b', parameters={
'KEY': 'Value'
}).parameters)
class ContentTypeStringificationTests(unittest.TestCase):
def test_that_simple_case_works(self):
self.assertEqual('primary/subtype',
str(ContentType('primary', 'subtype')))
def test_that_parameters_are_sorted_by_name(self):
ct = ContentType('a', 'b', {'one': '1', 'two': '2', 'three': 3})
self.assertEqual('a/b; one=1; three=3; two=2', str(ct))
def test_that_content_suffix_is_appended(self):
ct = ContentType('a', 'b', {'foo': 'bar'}, content_suffix='xml')
self.assertEqual('a/b+xml; foo=bar', str(ct))
class ContentTypeComparisonTests(unittest.TestCase):
def test_type_equals_itself(self):
self.assertEqual(ContentType('a', 'b'), ContentType('a', 'b'))
def test_that_differing_types_are_not_equal(self):
self.assertNotEqual(ContentType('a', 'b'), ContentType('b', 'a'))
def test_that_differing_suffixes_are_not_equal(self):
self.assertNotEqual(ContentType('a', 'b', content_suffix='1'),
ContentType('a', 'b', content_suffix='2'))
def test_that_differing_params_are_not_equal(self):
self.assertNotEqual(ContentType('a', 'b', parameters={'one': '1'}),
ContentType('a', 'b'))
def test_that_case_is_ignored_when_comparing_types(self):
self.assertEqual(ContentType('text', 'html', {'level': '3.2'}, 'json'),
ContentType('Text', 'Html', {'Level': '3.2'}, 'JSON'))
def test_primary_wildcard_is_less_than_anything_else(self):
self.assertLess(ContentType('*', '*'), ContentType('text', 'plain'))
self.assertLess(ContentType('*', '*'), ContentType('text', '*'))
def test_subtype_wildcard_is_less_than_concrete_types(self):
self.assertLess(ContentType('application', '*'),
ContentType('application', 'json'))
self.assertLess(ContentType('text', '*'),
ContentType('application', 'json'))
def test_type_with_fewer_parameters_is_lesser(self):
self.assertLess(
ContentType('application', 'text', parameters={'1': 1}),
ContentType('application', 'text', parameters={
'1': 1,
'2': 2
}))
def test_otherwise_equal_types_ordered_by_primary(self):
self.assertLess(ContentType('first', 'one', parameters={'1': 1}),
ContentType('second', 'one', parameters={'1': 1}))
def test_otherwise_equal_types_ordered_by_subtype(self):
self.assertLess(
ContentType('application', 'first', parameters={'1': 1}),
ContentType('application', 'second', parameters={'1': 1}))
| bsd-3-clause |
AnderEnder/ansible-modules-extras | network/f5/bigip_virtual_server.py | 3 | 20637 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Etienne Carriere <etienne.carriere@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_virtual_server
short_description: "Manages F5 BIG-IP LTM virtual servers"
description:
- "Manages F5 BIG-IP LTM virtual servers via iControl SOAP API"
version_added: "2.1"
author:
- Etienne Carriere (@Etienne-Carriere)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
requirements:
- bigsuds
options:
state:
description:
- Virtual Server state
- Absent, delete the VS if present
- C(present) (and its synonym enabled), create if needed the VS and set
state to enabled
- C(disabled), create if needed the VS and set state to disabled
required: false
default: present
choices:
- present
- absent
- enabled
- disabled
aliases: []
partition:
description:
- Partition
required: false
default: 'Common'
name:
description:
- Virtual server name
required: true
aliases:
- vs
destination:
description:
- Destination IP of the virtual server (only host is currently supported).
Required when state=present and vs does not exist.
required: true
aliases:
- address
- ip
port:
description:
- Port of the virtual server . Required when state=present and vs does not exist
required: false
default: None
all_profiles:
description:
- List of all Profiles (HTTP,ClientSSL,ServerSSL,etc) that must be used
by the virtual server
required: false
default: None
all_rules:
version_added: "2.2"
description:
- List of rules to be applied in priority order
required: false
default: None
all_enabled_vlans:
version_added: "2.2"
description:
- List of vlans to be enabled
required: false
default: None
pool:
description:
- Default pool for the virtual server
required: false
default: None
snat:
description:
- Source network address policy
required: false
default: None
default_persistence_profile:
description:
- Default Profile which manages the session persistence
required: false
default: None
description:
description:
- Virtual server description
required: false
default: None
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Add virtual server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
state: present
partition: MyPartition
name: myvirtualserver
destination: "{{ ansible_default_ipv4['address'] }}"
port: 443
pool: "{{ mypool }}"
snat: Automap
description: Test Virtual Server
all_profiles:
- http
- clientssl
all_enabled_vlans:
- /Common/vlan2
delegate_to: localhost
- name: Modify Port of the Virtual Server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
state: present
partition: MyPartition
name: myvirtualserver
port: 8080
delegate_to: localhost
- name: Delete virtual server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
state: absent
partition: MyPartition
name: myvirtualserver
delegate_to: localhost
'''
RETURN = '''
---
deleted:
description: Name of a virtual server that was deleted
returned: changed
type: string
sample: "my-virtual-server"
'''
# map of state values
STATES = {
'enabled': 'STATE_ENABLED',
'disabled': 'STATE_DISABLED'
}
STATUSES = {
'enabled': 'SESSION_STATUS_ENABLED',
'disabled': 'SESSION_STATUS_DISABLED',
'offline': 'SESSION_STATUS_FORCED_DISABLED'
}
def vs_exists(api, vs):
# hack to determine if pool exists
result = False
try:
api.LocalLB.VirtualServer.get_object_status(virtual_servers=[vs])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def vs_create(api, name, destination, port, pool):
_profiles = [[{'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': 'tcp'}]]
created = False
# a bit of a hack to handle concurrent runs of this module.
# even though we've checked the vs doesn't exist,
# it may exist by the time we run create_vs().
# this catches the exception and does something smart
# about it!
try:
api.LocalLB.VirtualServer.create(
definitions=[{'name': [name], 'address': [destination], 'port': port, 'protocol': 'PROTOCOL_TCP'}],
wildmasks=['255.255.255.255'],
resources=[{'type': 'RESOURCE_TYPE_POOL', 'default_pool_name': pool}],
profiles=_profiles)
created = True
return created
except bigsuds.OperationFailed as e:
if "already exists" not in str(e):
raise Exception('Error on creating Virtual Server : %s' % e)
def vs_remove(api, name):
api.LocalLB.VirtualServer.delete_virtual_server(
virtual_servers=[name]
)
def get_rules(api, name):
return api.LocalLB.VirtualServer.get_rule(
virtual_servers=[name]
)[0]
def set_rules(api, name, rules_list):
updated = False
if rules_list is None:
return False
rules_list = list(enumerate(rules_list))
try:
current_rules = map(lambda x: (x['priority'], x['rule_name']), get_rules(api, name))
to_add_rules = []
for i, x in rules_list:
if (i, x) not in current_rules:
to_add_rules.append({'priority': i, 'rule_name': x})
to_del_rules = []
for i, x in current_rules:
if (i, x) not in rules_list:
to_del_rules.append({'priority': i, 'rule_name': x})
if len(to_del_rules) > 0:
api.LocalLB.VirtualServer.remove_rule(
virtual_servers=[name],
rules=[to_del_rules]
)
updated = True
if len(to_add_rules) > 0:
api.LocalLB.VirtualServer.add_rule(
virtual_servers=[name],
rules=[to_add_rules]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting rules : %s' % e)
def get_profiles(api, name):
return api.LocalLB.VirtualServer.get_profile(
virtual_servers=[name]
)[0]
def set_profiles(api, name, profiles_list):
updated = False
try:
if profiles_list is None:
return False
current_profiles = map(lambda x: x['profile_name'], get_profiles(api, name))
to_add_profiles = []
for x in profiles_list:
if x not in current_profiles:
to_add_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x})
to_del_profiles = []
for x in current_profiles:
if (x not in profiles_list) and (x != "/Common/tcp"):
to_del_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x})
if len(to_del_profiles) > 0:
api.LocalLB.VirtualServer.remove_profile(
virtual_servers=[name],
profiles=[to_del_profiles]
)
updated = True
if len(to_add_profiles) > 0:
api.LocalLB.VirtualServer.add_profile(
virtual_servers=[name],
profiles=[to_add_profiles]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting profiles : %s' % e)
def set_enabled_vlans(api, name, vlans_enabled_list):
updated = False
try:
if vlans_enabled_list is None:
return False
to_add_vlans = []
for x in vlans_enabled_list:
to_add_vlans.append(x)
api.LocalLB.VirtualServer.set_vlan(
virtual_servers=[name],
vlans = [{ 'state':'STATE_ENABLED', 'vlans':[to_add_vlans] }]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting enabled vlans : %s' % e)
def set_snat(api, name, snat):
updated = False
try:
current_state = get_snat_type(api, name)
if snat is None:
return updated
elif snat == 'None' and current_state != 'SRC_TRANS_NONE':
api.LocalLB.VirtualServer.set_source_address_translation_none(
virtual_servers=[name]
)
updated = True
elif snat == 'Automap' and current_state != 'SRC_TRANS_AUTOMAP':
api.LocalLB.VirtualServer.set_source_address_translation_automap(
virtual_servers=[name]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting snat : %s' % e)
def get_snat_type(api, name):
return api.LocalLB.VirtualServer.get_source_address_translation_type(
virtual_servers=[name]
)[0]
def get_pool(api, name):
return api.LocalLB.VirtualServer.get_default_pool_name(
virtual_servers=[name]
)[0]
def set_pool(api, name, pool):
updated = False
try:
current_pool = get_pool(api, name)
if pool is not None and (pool != current_pool):
api.LocalLB.VirtualServer.set_default_pool_name(
virtual_servers=[name],
default_pools=[pool]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting pool : %s' % e)
def get_destination(api, name):
return api.LocalLB.VirtualServer.get_destination_v2(
virtual_servers=[name]
)[0]
def set_destination(api, name, destination):
updated = False
try:
current_destination = get_destination(api, name)
if destination is not None and destination != current_destination['address']:
api.LocalLB.VirtualServer.set_destination_v2(
virtual_servers=[name],
destinations=[{'address': destination, 'port': current_destination['port']}]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting destination : %s' % e)
def set_port(api, name, port):
updated = False
try:
current_destination = get_destination(api, name)
if port is not None and port != current_destination['port']:
api.LocalLB.VirtualServer.set_destination_v2(
virtual_servers=[name],
destinations=[{'address': current_destination['address'], 'port': port}]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting port : %s' % e)
def get_state(api, name):
return api.LocalLB.VirtualServer.get_enabled_state(
virtual_servers=[name]
)[0]
def set_state(api, name, state):
updated = False
try:
current_state = get_state(api, name)
# We consider that being present is equivalent to enabled
if state == 'present':
state = 'enabled'
if STATES[state] != current_state:
api.LocalLB.VirtualServer.set_enabled_state(
virtual_servers=[name],
states=[STATES[state]]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting state : %s' % e)
def get_description(api, name):
return api.LocalLB.VirtualServer.get_description(
virtual_servers=[name]
)[0]
def set_description(api, name, description):
updated = False
try:
current_description = get_description(api, name)
if description is not None and current_description != description:
api.LocalLB.VirtualServer.set_description(
virtual_servers=[name],
descriptions=[description]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting description : %s ' % e)
def get_persistence_profiles(api, name):
return api.LocalLB.VirtualServer.get_persistence_profile(
virtual_servers=[name]
)[0]
def set_default_persistence_profiles(api, name, persistence_profile):
updated = False
if persistence_profile is None:
return updated
try:
current_persistence_profiles = get_persistence_profiles(api, name)
default = None
for profile in current_persistence_profiles:
if profile['default_profile']:
default = profile['profile_name']
break
if default is not None and default != persistence_profile:
api.LocalLB.VirtualServer.remove_persistence_profile(
virtual_servers=[name],
profiles=[[{'profile_name': default, 'default_profile': True}]]
)
if default != persistence_profile:
api.LocalLB.VirtualServer.add_persistence_profile(
virtual_servers=[name],
profiles=[[{'profile_name': persistence_profile, 'default_profile': True}]]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting default persistence profile : %s' % e)
def main():
argument_spec = f5_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present',
choices=['present', 'absent', 'disabled', 'enabled']),
name=dict(type='str', required=True, aliases=['vs']),
destination=dict(type='str', aliases=['address', 'ip']),
port=dict(type='int'),
all_profiles=dict(type='list'),
all_rules=dict(type='list'),
all_enabled_vlans=dict(type='list'),
pool=dict(type='str'),
description=dict(type='str'),
snat=dict(type='str'),
default_persistence_profile=dict(type='str')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
name = fq_name(partition, module.params['name'])
destination = module.params['destination']
port = module.params['port']
all_profiles = fq_list_names(partition, module.params['all_profiles'])
all_rules = fq_list_names(partition, module.params['all_rules'])
all_enabled_vlans = fq_list_names(partition, module.params['all_enabled_vlans'])
pool = fq_name(partition, module.params['pool'])
description = module.params['description']
snat = module.params['snat']
default_persistence_profile = fq_name(partition, module.params['default_persistence_profile'])
if 1 > port > 65535:
module.fail_json(msg="valid ports must be in range 1 - 65535")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if not module.check_mode:
if vs_exists(api, name):
# hack to handle concurrent runs of module
# pool might be gone before we actually remove
try:
vs_remove(api, name)
result = {'changed': True, 'deleted': name}
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result['changed'] = False
else:
raise
else:
# check-mode return value
result = {'changed': True}
else:
update = False
if not vs_exists(api, name):
if (not destination) or (not port):
module.fail_json(msg="both destination and port must be supplied to create a VS")
if not module.check_mode:
# a bit of a hack to handle concurrent runs of this module.
# even though we've checked the virtual_server doesn't exist,
# it may exist by the time we run virtual_server().
# this catches the exception and does something smart
# about it!
try:
vs_create(api, name, destination, port, pool)
set_profiles(api, name, all_profiles)
set_enabled_vlans(api, name, all_enabled_vlans)
set_rules(api, name, all_rules)
set_snat(api, name, snat)
set_description(api, name, description)
set_default_persistence_profiles(api, name, default_persistence_profile)
set_state(api, name, state)
result = {'changed': True}
except bigsuds.OperationFailed as e:
raise Exception('Error on creating Virtual Server : %s' % e)
else:
# check-mode return value
result = {'changed': True}
else:
update = True
if update:
# VS exists
if not module.check_mode:
# Have a transaction for all the changes
try:
api.System.Session.start_transaction()
result['changed'] |= set_destination(api, name, fq_name(partition, destination))
result['changed'] |= set_port(api, name, port)
result['changed'] |= set_pool(api, name, pool)
result['changed'] |= set_description(api, name, description)
result['changed'] |= set_snat(api, name, snat)
result['changed'] |= set_profiles(api, name, all_profiles)
result['changed'] |= set_enabled_vlans(api, name, all_enabled_vlans)
result['changed'] |= set_rules(api, name, all_rules)
result['changed'] |= set_default_persistence_profiles(api, name, default_persistence_profile)
result['changed'] |= set_state(api, name, state)
api.System.Session.submit_transaction()
except Exception as e:
raise Exception("Error on updating Virtual Server : %s" % e)
else:
# check-mode return value
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
kutuhal/oracle-r12-accounting | lib/django/contrib/admin/templatetags/admin_list.py | 46 | 17082 | from __future__ import unicode_literals
import datetime
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, EMPTY_CHANGELIST_VALUE, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.utils import formats
from django.utils.encoding import force_text
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html('<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
link_or_text = format_html(
'<a href="{}"{}>{}</a>',
url,
format_html(
' onclick="opener.dismissRelatedLookupPopup(window, '
''{}'); return false;"', result_id
) if cl.is_popup else '',
result_repr)
yield format_html('<{}{}>{}</{}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field(field_name)
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda filters: cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
})
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
| bsd-3-clause |
carlmw/oscar-wager | django/contrib/gis/db/backends/spatialite/creation.py | 16 | 4167 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.db.backends.sqlite3.creation import DatabaseCreation
class SpatiaLiteCreation(DatabaseCreation):
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
This method is overloaded to load up the SpatiaLite initialization
SQL prior to calling the `syncdb` command.
"""
if verbosity >= 1:
print "Creating test database '%s'..." % self.connection.alias
test_database_name = self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Confirm the feature set of the test database
self.connection.features.confirm()
# Need to load the SpatiaLite initialization SQL before running `syncdb`.
self.load_spatialite_sql()
call_command('syncdb', verbosity=verbosity, interactive=False, database=self.connection.alias)
if settings.CACHE_BACKEND.startswith('db://'):
from django.core.cache import parse_backend_uri
_, cache_name, _ = parse_backend_uri(settings.CACHE_BACKEND)
call_command('createcachetable', cache_name)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(SpatiaLiteCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ', ' +
style.SQL_KEYWORD(str(int(not f.null))) +
');')
if f.spatial_index:
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('CreateSpatialIndex') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ');')
return output
def load_spatialite_sql(self):
"""
This routine loads up the SpatiaLite SQL file.
"""
# Getting the location of the SpatiaLite SQL file, and confirming
# it exists.
spatialite_sql = self.spatialite_init_file()
if not os.path.isfile(spatialite_sql):
raise ImproperlyConfigured('Could not find the required SpatiaLite initialization '
'SQL file (necessary for testing): %s' % spatialite_sql)
# Opening up the SpatiaLite SQL initialization file and executing
# as a script.
sql_fh = open(spatialite_sql, 'r')
try:
cur = self.connection._cursor()
cur.executescript(sql_fh.read())
finally:
sql_fh.close()
def spatialite_init_file(self):
# SPATIALITE_SQL may be placed in settings to tell GeoDjango
# to use a specific path to the SpatiaLite initilization SQL.
return getattr(settings, 'SPATIALITE_SQL',
'init_spatialite-%s.%s.sql' %
self.connection.ops.spatial_version[:2])
| bsd-3-clause |
dineshsg/spoken-website | cron/old/create_playlists.py | 2 | 2307 | import MySQLdb
import time
import sys
#sys.path.insert(0, '../spoken')
#sys.path.insert(0, '../../spoken')
from youtube_upload import *
from config import *
# creating youtube object
youtube = Youtube(DEVELOPER_KEY)
debug("Login to Youtube API: email='%s', password='%s'" %
(EMAIL, "*" * len(PASSWORD)))
try:
youtube.login(EMAIL, PASSWORD)
except gdata.service.BadAuthentication:
raise BadAuthentication("Authentication failed")
db = MySQLdb.connect(host = DB_HOST, user = DB_USER, passwd = DB_PASS, \
db = DB_NAME)
cur = db.cursor()
cur.execute("SELECT * FROM creation_fosscategory ORDER BY foss")
rows = cur.fetchall()
error_log_file_head = open(LOG_ROOT + 'playlist-error-log.txt',"w")
success_log_file_head = open(LOG_ROOT + 'playlist-success-log.txt',"w")
for row in rows:
cur.execute("SELECT creation_language.id, creation_language.name FROM \
creation_language WHERE (creation_language.id IN (SELECT DISTINCT \
U0.language_id FROM creation_tutorialresource U0 INNER JOIN \
creation_tutorialdetail U1 ON ( U0.tutorial_detail_id = U1.id ) \
WHERE ((U0.status = 1 OR U0.status = 2 ) AND U1.foss_id = %s )) \
AND NOT (creation_language.id IN (SELECT U0.language_id FROM \
creation_playlistinfo U0 WHERE U0.foss_id = %s ))) ORDER BY \
creation_language.name ASC" % (row[0], row[0]))
langrows = cur.fetchall()
for langrow in langrows:
title = row[1] + ' - ' + langrow[1]
playlistid = youtube.create_playlist(title, row[2])
if playlistid:
currtime = time.strftime('%Y-%m-%d %H:%M:%S')
sql = """INSERT INTO creation_playlistinfo (foss_id, language_id, \
playlist_id, created, updated) VALUES(%d, %d, '%s', '%s', \
'%s')""" % (row[0], langrow[0], playlistid, currtime, currtime)
cur.execute(sql)
db.commit()
success_string = row[1] + ' - ' + langrow[1] + ' -- ' + playlistid
success_log_file_head.write(success_string + '\n')
print success_string
else:
error_string = row[1] + ' - ' + langrow[1] + ' -- FAILED'
error_log_file_head.write(error_string + '\n')
print error_string
error_log_file_head.close()
success_log_file_head.close()
| gpl-3.0 |
avanov/django | django/contrib/gis/db/backends/spatialite/introspection.py | 391 | 3131 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import (
DatabaseIntrospection, FlexibleFieldLookupDict,
)
from django.utils import six
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point': 'GeometryField',
'linestring': 'GeometryField',
'polygon': 'GeometryField',
'multipoint': 'GeometryField',
'multilinestring': 'GeometryField',
'multipolygon': 'GeometryField',
'geometrycollection': 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
type_col = 'type' if self.connection.ops.spatial_version < (4, 0, 0) else 'geometry_type'
cursor.execute('SELECT coord_dimension, srid, %s '
'FROM geometry_columns '
'WHERE f_table_name=%%s AND f_geometry_column=%%s' % type_col,
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
ogr_type = row[2]
if isinstance(ogr_type, six.integer_types) and ogr_type > 1000:
# Spatialite versions >= 4 use the new SFSQL 1.2 offsets
# 1000 (Z), 2000 (M), and 3000 (ZM) to indicate the presence of
# higher dimensional coordinates (M not yet supported by Django).
ogr_type = ogr_type % 1000 + OGRGeomType.wkb25bit
field_type = OGRGeomType(ogr_type).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if (isinstance(dim, six.string_types) and 'Z' in dim) or dim == 3:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
def get_indexes(self, cursor, table_name):
indexes = super(SpatiaLiteIntrospection, self).get_indexes(cursor, table_name)
cursor.execute('SELECT f_geometry_column '
'FROM geometry_columns '
'WHERE f_table_name=%s AND spatial_index_enabled=1', (table_name,))
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': False, 'unique': False}
return indexes
| bsd-3-clause |
gechdcb/shadowsocks | shadowsocks/crypto/sodium.py | 1032 | 3778 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
__all__ = ['ciphers']
libsodium = None
loaded = False
buf_size = 2048
# for salsa20 and chacha20
BLOCK_SIZE = 64
def load_libsodium():
global loaded, libsodium, buf
libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic',
'libsodium')
if libsodium is None:
raise Exception('libsodium not found')
libsodium.crypto_stream_salsa20_xor_ic.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.crypto_stream_chacha20_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
buf = create_string_buffer(buf_size)
loaded = True
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
ciphers = {
'salsa20': (32, 8, SodiumCrypto),
'chacha20': (32, 8, SodiumCrypto),
}
def test_salsa20():
cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_chacha20():
cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_chacha20()
test_salsa20()
| apache-2.0 |
andyzsf/rietveld | tests/test_incomingmail.py | 23 | 8805 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import setup
setup.process_args()
from email.message import Message
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from google.appengine.api.users import User
from utils import TestCase
from codereview import models, views
class TestIncomingMail(TestCase):
def setUp(self):
super(TestIncomingMail, self).setUp()
self.login('foo@example.com')
self.issue = models.Issue(subject='test')
self.issue.put()
self.issue2 = models.Issue(subject='test2')
self.issue2.put()
self.logout()
def test_incoming_mail(self):
msg = Message()
msg['To'] = 'reply@example.com'
msg['From'] = 'sender@example.com'
msg['Subject'] = 'subject (issue%s)' % self.issue.key.id()
msg.set_payload('body')
response = self.client.post('/_ah/mail/reply@example.com',
msg.as_string(), content_type='text/plain')
self.assertEqual(response.status_code, 200)
self.assertEqual(models.Message.query(ancestor=self.issue.key).count(), 1)
self.assertEqual(models.Message.query(ancestor=self.issue2.key).count(), 0)
msg = models.Message.query(ancestor=self.issue.key).get()
self.assertEqual(msg.text, 'body')
self.assertEqual(msg.subject,
'subject (issue%s)' % self.issue.key.id())
self.assertEqual(msg.sender, 'sender@example.com')
self.assertEqual(msg.recipients, ['reply@example.com'])
self.assert_(msg.date is not None)
self.assertEqual(msg.draft, False)
def test_incoming_mail_invalid_subject(self):
msg = Message()
msg['To'] = 'reply@example.com'
msg['From'] = 'sender@example.com'
msg['Subject'] = 'invalid'
msg.set_payload('body')
response = self.client.post('/_ah/mail/reply@example.com',
msg, content_type='text/plain')
self.assertEqual(response.status_code, 200)
self.assertEqual(models.Message.query(ancestor=self.issue.key).count(), 0)
def test_unknown_issue(self):
msg = Message()
msg['From'] = 'sender@example.com'
msg['Subject'] = 'subject (issue99999)'
msg.set_payload('body')
self.assertRaises(views.InvalidIncomingEmailError,
views._process_incoming_mail, msg.as_string(),
'reply@example.com')
def test_empty_message(self):
msg = Message()
msg['From'] = 'sender@example.com'
msg['Subject'] = 'subject (issue%s)\r\n\r\n' % self.issue.key.id()
self.assertRaises(views.InvalidIncomingEmailError,
views._process_incoming_mail, msg.as_string(),
'reply@example.com')
def test_senders_becomes_reviewer(self):
msg = Message()
msg['From'] ='sender@example.com'
msg['Subject'] = 'subject (issue%s)' % self.issue.key.id()
msg.set_payload('body')
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
issue = models.Issue.get_by_id(self.issue.key.id()) # re-fetch issue
self.assertEqual(issue.reviewers, ['sender@example.com'])
issue.reviewers = []
issue.put()
# try again with sender that has an account
# we do this to handle CamelCase emails correctly
models.Account.get_account_for_user(User('sender@example.com'))
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
issue = models.Issue.get_by_id(self.issue.key.id())
self.assertEqual(issue.reviewers, ['sender@example.com'])
def test_long_subjects(self):
# multi-line subjects should be collapsed into a single line
msg = Message()
msg['Subject'] = ('foo '*30)+' (issue%s)' % self.issue.key.id()
msg['From'] = 'sender@example.com'
msg.set_payload('body')
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.query(ancestor=self.issue.key).get()
self.assertEqual(len(imsg.subject.splitlines()), 1)
def test_multipart(self):
# Text first
msg = MIMEMultipart('alternative')
msg['Subject'] = 'subject (issue%s)' % self.issue.key.id()
msg['From'] = 'sender@example.com'
msg.attach(MIMEText('body', 'plain'))
msg.attach(MIMEText('ignore', 'html'))
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.query(ancestor=self.issue.key).get()
self.assertEqual(imsg.text, 'body')
imsg.key.delete()
# HTML first
msg = MIMEMultipart('alternative')
msg['Subject'] = 'subject (issue%s)' % self.issue.key.id()
msg['From'] = 'sender@example.com'
msg.attach(MIMEText('ignore', 'html'))
msg.attach(MIMEText('body', 'plain'))
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.query(ancestor=self.issue.key).get()
self.assertEqual(imsg.text, 'body')
imsg.key.delete()
# no text at all
msg = MIMEMultipart('alternative')
msg['Subject'] = 'subject (issue%s)' % self.issue.key.id()
msg['From'] = 'sender@example.com'
msg.attach(MIMEText('ignore', 'html'))
self.assertRaises(views.InvalidIncomingEmailError,
views._process_incoming_mail, msg.as_string(),
'reply@example.com')
def test_mails_from_appengine(self): # bounces
msg = Message()
msg['Subject'] = 'subject (issue%s)' % self.issue.key.id()
msg['From'] = 'sender@example.com'
msg['X-Google-Appengine-App-Id'] = 'foo'
self.assertRaises(views.InvalidIncomingEmailError,
views._process_incoming_mail, msg.as_string(),
'reply@exampe.com')
def test_huge_body_is_truncated(self): # see issue325
msg = Message()
msg['subject'] = 'subject (issue%s)' % self.issue.key.id()
msg['From'] = 'sender@example.com'
msg.set_payload('1' * 600 * 1024)
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.query(ancestor=self.issue.key).get()
self.assertEqual(len(imsg.text), 500 * 1024)
self.assert_(imsg.text.endswith('... (message truncated)'))
def test_charset(self):
# make sure that incoming mails with non-ascii chars are handled correctly
# see related http://code.google.com/p/googleappengine/issues/detail?id=2326
jtxt = '\x1b$B%O%m!<%o!<%k%I!*\x1b(B'
jcode = 'iso-2022-jp'
msg = Message()
msg.set_payload(jtxt, jcode)
msg['Subject'] = 'subject (issue%s)' % self.issue.key.id()
msg['From'] = 'sender@example.com'
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.query(ancestor=self.issue.key).get()
self.assertEqual(imsg.text.encode(jcode), jtxt)
def test_encoding(self):
# make sure that incoming mails with 8bit encoding are handled correctly.
# see realted http://code.google.com/p/googleappengine/issues/detail?id=2383
jtxt = '\x1b$B%O%m!<%o!<%k%I!*\x1b(B'
jcode = 'iso-2022-jp'
msg = Message()
msg.set_payload(jtxt, jcode)
msg['Subject'] = 'subject (issue%s)' % self.issue.key.id()
msg['From'] = 'sender@example.com'
del msg['Content-Transfer-Encoding'] # replace 7bit encoding
msg['Content-Transfer-Encoding'] = '8bit'
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.query(ancestor=self.issue.key).get()
self.assertEqual(imsg.text.encode(jcode), jtxt)
def test_missing_encoding(self):
# make sure that incoming mails with missing encoding and
# charset are handled correctly.
body = 'Âfoo'
msg = ('From: sender@example.com',
'Subject: subject (issue%s)' % self.issue.key.id(),
'',
body)
views._process_incoming_mail('\r\n'.join(msg), 'reply@example.com')
imsg = models.Message.query(ancestor=self.issue.key).get()
self.assertEqual(imsg.text, u'Âfoo')
imsg.key.delete()
body = '\xf6'
msg = ('From: sender@example.com',
'Subject: subject (issue%s)' % self.issue.key.id(),
'',
body)
views._process_incoming_mail('\r\n'.join(msg), 'reply@example.com')
imsg = models.Message.query(ancestor=self.issue.key).get()
self.assertEqual(imsg.text, u'\ufffd')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ericholscher/cookiecutter | cookiecutter/exceptions.py | 4 | 1440 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter.exceptions
-----------------------
All exceptions used in the Cookiecutter code base are defined here.
"""
class CookiecutterException(Exception):
"""
Base exception class. All Cookiecutter-specific exceptions should subclass
this class.
"""
class NonTemplatedInputDirException(CookiecutterException):
"""
Raised when a project's input dir is not templated.
The name of the input directory should always contain a string that is
rendered to something else, so that input_dir != output_dir.
"""
class UnknownTemplateDirException(CookiecutterException):
"""
Raised when Cookiecutter cannot determine which directory is the project
template, e.g. more than one dir appears to be a template dir.
"""
class MissingProjectDir(CookiecutterException):
"""
Raised during cleanup when remove_repo() can't find a generated project
directory inside of a repo.
"""
class ConfigDoesNotExistException(CookiecutterException):
"""
Raised when get_config() is passed a path to a config file, but no file
is found at that path.
"""
class InvalidConfiguration(CookiecutterException):
"""
Raised if the global configuration file is not valid YAML or is
badly contructed.
"""
class UnknownRepoType(CookiecutterException):
"""
Raised if a repo's type cannot be determined.
"""
| bsd-3-clause |
travisgoldie/Signal_iQ | python/test/scripts/hw_minigun.py | 1 | 2516 | #!/usr/bin/env python
###############################################################################
# vim: tabstop=4:shiftwidth=4:expandtab:
# Copyright (c) 2017 SIOS Technology Corp. All rights reserved.
##############################################################################
"""
This script will spray events for various environments and VMs.
"""
import logging
import sys
from datetime import datetime, timedelta
from pytz import timezone
from os.path import dirname, realpath
curr_path = dirname(realpath(__file__))
sys.path.insert(0, '{}/../../'.format(curr_path))
from SignaliQ.client import Client
from SignaliQ.model.CloudProviderEvent import CloudProviderEvent
from SignaliQ.model.CloudVM import CloudVM
from SignaliQ.model.NetworkInterface import NetworkInterface
from SignaliQ.model.ProviderEventsUpdateMessage import ProviderEventsUpdateMessage
__log__ = logging.getLogger(__name__)
def main(args):
# Setup the client and send the data!
client = Client()
client.connect()
id_interf = {
500: [
"00:50:56:9b:3a:9b",
"00:50:56:9b:51:f2",
"00:50:56:9b:6f:09",
],
505: [
"00:50:56:93:7a:b9",
],
}
id_list = [id_interf for xx in range(5)]
event_time = (
datetime
.now()
.replace(tzinfo = timezone('US/Eastern'))
)
for idx in id_list:
for env_id, hws in id_interf.items():
event_time += timedelta(minutes = 5)
__log__.info(
"Creating event with time {} and env id of {}".format(
event_time.strftime('%Y-%m-%dT%H:%M:%S%z'), env_id,
)
)
events = [
CloudProviderEvent(
description = "Caused by a bad mood",
environment_id = env_id,
layer = "Storage",
severity = "Critical",
time = event_time.strftime('%Y-%m-%dT%H:%M:%S%z'),
event_type = "SDK Event",
vms = [
CloudVM(network_interfaces = [NetworkInterface(hw_address = hw)]) for hw in hws
],
)
]
event_message = ProviderEventsUpdateMessage(
environment_id = env_id,
events = events,
)
client.send(event_message)
# DONE
client.disconnect()
if __name__ == "__main__":
main(sys.argv)
| mit |
vlegoff/tsunami | src/secondaires/auberge/editeurs/aubedit/edt_chambres.py | 1 | 5517 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# pereIBILITY OF SUCH DAMAGE.
"""Ce fichier contient l'éditeur EdtChambres, détaillé plus bas."""
from primaires.interpreteur.editeur import Editeur
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
from secondaires.auberge.editeurs.aubedit.edt_chambre import EdtChambre
from primaires.format.fonctions import supprimer_accents
class EdtChambres(Editeur):
"""Contexte-éditeur des chambres d'une auberge."""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur"""
Editeur.__init__(self, pere, objet, attribut)
self.ajouter_option("c", self.opt_creer_chambre)
self.ajouter_option("d", self.opt_supprimer_chambre)
def accueil(self):
"""Message d'accueil du contexte"""
auberge = self.objet
msg = "| |tit|" + "Édition des chambres de l'auberge {}".format(
auberge.cle).ljust(76)
msg += "|ff||\n" + self.opts.separateur + "\n"
msg += "Cet éditeur vous permet d'ajouter ou de supprimer des\n" \
"chambres dans cette auberge. Entrez simplement son " \
"numéro pour\nl'éditer.\n\n" \
"Options disponibles :\n" \
" |cmd|/c <numéro> <identifiant_de_salle>|ff| pour " \
"ajouter une chambre\n" \
" |cmd|/d <numéro>|ff| pour supprimer une chambre\n\n" \
"Exemplels :\n" \
"|ent|/c 1 zone:cle|ff|\n" \
"|ent|/c suite zone:cle|ff|\n" \
"|ent|/d 1|ff|\n" \
"(Notez que le numéro n'est pas nécessairement un nombre.\n\n"
msg += "Chambres définies :\n"
if len(auberge.chambres) == 0:
msg += "\n Aucune"
else:
chambres = sorted([c for c in auberge.chambres.values()],
key=lambda c: c.numero)
for chambre in chambres:
msg += "\n |ent|" + chambre.numero + "|ff|"
msg += " vers " + chambre.ident_salle
return msg
def opt_creer_chambre(self, arguments):
"""Ajoute une chambre.
Syntaxe :
/a <numéro> <ident_salle>
"""
arguments = arguments.lower()
auberge = self.objet
if arguments.strip() == "":
self.pere << "|err|Précisez |ent|un numéro|ff| et |ent|un " \
"identifiant de salle.|ff|"
return
try:
numero, ident = arguments.split(" ")
except ValueError:
self.pere << "|err|Syntaxe invalide : |ent|/a <numéro> " \
"<ident_salle>|ff|"
return
if numero in auberge.numero_chambres:
self.pere << "|err|Ce numéro est déjà utilisé.|ff|"
return
try:
salle = importeur.salle[ident]
except KeyError:
self.pere << "|err|La salle '{}' est introuvable.|ff|".format(
ident)
return
auberge.ajouter_chambre(numero, salle)
self.actualiser()
def opt_supprimer_chambre(self, arguments):
"""Supprime la chambre passée en paramètre.
Syntaxe :
/d <numéro>
"""
auberge = self.objet
chambre = auberge.get_chambre_avec_numero(arguments)
if chambre:
auberge.supprimer_chambre(chambre.ident_salle)
self.actualiser()
else:
self.pere << "|err|Chambre introuvable.|ff|"
def interpreter(self, msg):
"""Interprétation de l'éditeur"""
auberge = self.objet
chambre = auberge.get_chambre_avec_numero(msg)
if chambre:
enveloppe = EnveloppeObjet(EdtChambre, chambre)
enveloppe.parent = self
contexte = enveloppe.construire(self.pere)
self.migrer_contexte(contexte)
else:
self.pere << "|err|Chambre {} introuvable.|ff|".format(repr(msg))
| bsd-3-clause |
tlianza/Rfugee | lib/requests-2.7.0-py2.7.egg/requests/api.py | 435 | 5415 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, params=None, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| mit |
NLHEALTHCARE/PYELT | tests/unit_tests_basic/test05_db_functions.py | 1 | 3894 | from pyelt.mappings.sor_to_dv_mappings import SorToEntityMapping
from tests.unit_tests_basic import _domainmodel, _db_functions, _db_functions2
from tests.unit_tests_basic._db_functions import CreateAgb
from tests.unit_tests_basic._domainmodel import Patient
from tests.unit_tests_basic._mappings import init_source_to_sor_mappings
from tests.unit_tests_basic.global_test_suite import get_global_test_pipeline, init_db
__author__ = 'hvreenen'
import unittest
class TestCase_DbFunctions(unittest.TestCase):
is_init = False
def setUp(self):
if not TestCase_DbFunctions.is_init:
init_db()
TestCase_DbFunctions.is_init = True
self.pipeline = get_global_test_pipeline()
self.pipe = self.pipeline.get_or_create_pipe('test_system')
self.pipe.register_domain(_domainmodel)
self.pipe.register_db_functions(_db_functions, self.pipe.sor)
self.pipe.mappings = []
self.pipe.mappings.extend(init_source_to_sor_mappings())
self.pipe.mappings.extend(init_sor_to_dv_mappings(self.pipe.sor))
def test_run1(self):
self.pipeline.run()
self.pipe.sor.reflect_functions()
self.assertEquals(2, len(self.pipe.sor.functions))
def test_run1_her(self):
self.pipeline.run()
self.pipe.sor.reflect_functions()
self.assertEquals(2, len(self.pipe.sor.functions))
def test_run3(self):
self.pipe.register_db_functions(_db_functions2, self.pipe.sor)
self.pipeline.run()
self.pipe.sor.reflect_functions()
self.assertEquals(3, len(self.pipe.sor.functions))
def init_sor_to_dv_mappings(sor):
mappings = []
mapping = SorToEntityMapping('patient_hstage', Patient, sor)
mapping.map_bk('patientnummer')
# mapping.map_field('lower(upper(achternaam)) => personalia.achternaam text')
# mapping.map_field('tussenvoegsels => personalia.tussenvoegsels text')
# mapping.map_field('voornaam => personalia.voornaam text')
# mapping.map_field('straat => adres.straat text')
# mapping.map_field('huisnummer::integer => adres.huisnummer integer')
# mapping.map_field('huisnummertoevoeging => adres.huisnummertoevoeging text')
# mapping.map_field('postcode => adres.postcode text')
# mapping.map_field('plaats => adres.plaats text')
# mapping.map_field('geslacht => default.geslacht text')
# mapping.map_field('now() => default.geboortedatum date')
# mapping.map_field("Coalesce(inschrijvingsnummer, 'jahgsd') => default.inschrijvingsnummer text")
# mapping.map_field('bsn => default.bsn text')
# mapping.map_field('inschrijvingsdatum::date => default.inschrijvingsdatum date')
mapping.map_field(CreateAgb('inschrijvingsdatum', 'bsn'), Patient.Personalia.achternaam)
mapping.map_field(CreateAgb('plaats', 'bsn'), Patient.Personalia.tussenvoegsels)
mappings.append(mapping)
return mappings
if __name__ == '__main__':
unittest.main()
# def test_transform(self):
# t = FieldTransformation()
# t.field_name = 'field'
# t.new_step('inner({fld})')
# t.new_step('outer({fld})')
# sql = t.get_sql('')
# self.assertEqual(sql, 'outer(inner(field))')
#
# t = FieldTransformation()
# t.field_name = 'field'
# t.new_step('inner({fld}, 1)')
# t.new_step('outer({fld})')
# sql = t.get_sql('')
# self.assertEqual(sql, 'outer(inner(field, 1))')
#
# t = FieldTransformation()
# t.field_name = 'field'
# t.new_step('inner({fld})')
# t.new_step('outer({fld}, 1)')
# sql = t.get_sql('')
# self.assertEqual(sql, 'outer(inner(field), 1)')
# print(sql)
#
# t = FieldTransformation()
# t.field_name = 'field'
# t.new_step('inner({fld}, field2)')
# t.new_step('outer({fld})')
# sql = t.get_sql('tbl')
# #todo oplossen
# self.assertEqual(sql, 'outer(tbl.inner(tbl.field, tbl.field2))')
| gpl-3.0 |
darkorb/eve-wspace | evewspace/Map/templatetags/mapnav.py | 15 | 1224 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django import template
from Map.models import *
register = template.Library()
@register.inclusion_tag('map_list.html')
def mapnavlist(user):
"""Return list of maps that should appear in the user's nav bar."""
#Make a list, yay!
maplist = []
for map in Map.objects.all():
if map.get_permission(user) > 0:
maplist.append(map)
return {'maps': maplist}
| gpl-3.0 |
renyi533/tensorflow | tensorflow/python/kernel_tests/summary_ops_test.py | 6 | 46187 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 summary ops from summary_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine.sequential import Sequential
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SummaryOpsCoreTest(test_util.TensorFlowTestCase):
def testWrite(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write('tag', 42, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write('tag', 42, step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_metadata(self):
logdir = self.get_temp_dir()
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = 'foo'
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('obj', 0, 0, metadata=metadata)
summary_ops.write('bytes', 0, 0, metadata=metadata.SerializeToString())
m = constant_op.constant(metadata.SerializeToString())
summary_ops.write('string_tensor', 0, 0, metadata=m)
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(metadata, events[1].summary.value[0].metadata)
self.assertEqual(metadata, events[2].summary.value[0].metadata)
self.assertEqual(metadata, events[3].summary.value[0].metadata)
def testWrite_name(self):
@def_function.function
def f():
output = summary_ops.write('tag', 42, step=12, name='anonymous')
self.assertTrue(output.name.startswith('anonymous'))
f()
def testWrite_ndarray(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [[1, 2], [3, 4]], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([[1, 2], [3, 4]], to_numpy(value))
def testWrite_tensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
t = constant_op.constant([[1, 2], [3, 4]])
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', t, step=12)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_tensor_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f(t):
with writer.as_default():
summary_ops.write('tag', t, step=12)
t = constant_op.constant([[1, 2], [3, 4]])
f(t)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_stringTensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [b'foo', b'bar'], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([b'foo', b'bar'], to_numpy(value))
@test_util.run_gpu_only
def testWrite_gpuDeviceContext(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
with ops.device('/GPU:0'):
value = constant_op.constant(42.0)
step = constant_op.constant(12, dtype=dtypes.int64)
summary_ops.write('tag', value, step=step).numpy()
empty_metadata = summary_pb2.SummaryMetadata()
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertEqual(42, to_numpy(events[1].summary.value[0]))
self.assertEqual(empty_metadata, events[1].summary.value[0].metadata)
@test_util.also_run_as_tf_function
def testWrite_noDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42, step=0))
@test_util.also_run_as_tf_function
def testWrite_noStep_okayIfAlsoNoDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42))
@test_util.also_run_as_tf_function
def testWrite_noStep(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer(logdir).as_default():
with self.assertRaisesRegex(ValueError, 'No step set'):
summary_ops.write('tag', 42)
def testWrite_usingDefaultStep(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
summary_ops.set_step(1)
summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
summary_ops.write('tag', 1.0)
mystep = variables.Variable(10, dtype=dtypes.int64)
summary_ops.set_step(mystep)
summary_ops.write('tag', 1.0)
mystep.assign_add(1)
summary_ops.write('tag', 1.0)
events = events_from_logdir(logdir)
self.assertEqual(5, len(events))
self.assertEqual(1, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(10, events[3].step)
self.assertEqual(11, events[4].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
summary_ops.set_step(1)
f()
summary_ops.set_step(2)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the function was first traced.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
f()
mystep.assign_add(1)
f()
mystep.assign(10)
f()
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
summary_ops.set_step(1)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(write_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the graph was constructed.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
first_assign_op = mystep.assign_add(1)
second_assign_op = mystep.assign(10)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(mystep.initializer)
sess.run(write_op)
sess.run(first_assign_op)
sess.run(write_op)
sess.run(second_assign_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_recordIf_constant(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
self.assertTrue(summary_ops.write('default', 1, step=0))
with summary_ops.record_if(True):
self.assertTrue(summary_ops.write('set_on', 1, step=0))
with summary_ops.record_if(False):
self.assertFalse(summary_ops.write('set_off', 1, step=0))
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_constant_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
# Use assertAllEqual instead of assertTrue since it works in a defun.
self.assertAllEqual(summary_ops.write('default', 1, step=0), True)
with summary_ops.record_if(True):
self.assertAllEqual(summary_ops.write('set_on', 1, step=0), True)
with summary_ops.record_if(False):
self.assertAllEqual(summary_ops.write('set_off', 1, step=0), False)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_callable(self):
logdir = self.get_temp_dir()
with context.eager_mode():
step = variables.Variable(-1, dtype=dtypes.int64)
def record_fn():
step.assign_add(1)
return int(step % 2) == 0
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(record_fn):
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_callable_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
step = variables.Variable(-1, dtype=dtypes.int64)
@def_function.function
def record_fn():
step.assign_add(1)
return math_ops.equal(step % 2, 0)
@def_function.function
def f():
with writer.as_default():
with summary_ops.record_if(record_fn):
return [
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step)]
self.assertAllEqual(f(), [True, False, True])
self.assertAllEqual(f(), [False, True, False])
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_tensorInput_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int64)])
def f(step):
with writer.as_default():
with summary_ops.record_if(math_ops.equal(step % 2, 0)):
return summary_ops.write('tag', 1, step=step)
self.assertTrue(f(0))
self.assertFalse(f(1))
self.assertTrue(f(2))
self.assertFalse(f(3))
self.assertTrue(f(4))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWriteRawPb(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_fromFunction(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_multipleValues(self):
logdir = self.get_temp_dir()
pb1 = summary_pb2.Summary()
pb1.value.add().simple_value = 1.0
pb1.value.add().simple_value = 2.0
pb2 = summary_pb2.Summary()
pb2.value.add().simple_value = 3.0
pb3 = summary_pb2.Summary()
pb3.value.add().simple_value = 4.0
pb3.value.add().simple_value = 5.0
pb3.value.add().simple_value = 6.0
pbs = [pb.SerializeToString() for pb in (pb1, pb2, pb3)]
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pbs, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
expected_pb = summary_pb2.Summary()
for i in range(6):
expected_pb.value.add().simple_value = i + 1.0
self.assertProtoEquals(expected_pb, events[1].summary)
def testWriteRawPb_invalidValue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
with self.assertRaisesRegex(
errors.DataLossError,
'Bad tf.compat.v1.Summary binary proto tensor string'):
summary_ops.write_raw_pb('notaproto', step=12)
@test_util.also_run_as_tf_function
def testGetSetStep(self):
try:
self.assertIsNone(summary_ops.get_step())
summary_ops.set_step(1)
# Use assertAllEqual instead of assertEqual since it works in a defun.
self.assertAllEqual(1, summary_ops.get_step())
summary_ops.set_step(constant_op.constant(2))
self.assertAllEqual(2, summary_ops.get_step())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable(self):
with context.eager_mode():
try:
mystep = variables.Variable(0)
summary_ops.set_step(mystep)
self.assertAllEqual(0, summary_ops.get_step().read_value())
mystep.assign_add(1)
self.assertAllEqual(1, summary_ops.get_step().read_value())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(1, summary_ops.get_step().read_value())
summary_ops.get_step().assign_add(1)
self.assertAllEqual(2, summary_ops.get_step().read_value())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable_fromFunction(self):
with context.eager_mode():
try:
@def_function.function
def set_step(step):
summary_ops.set_step(step)
return summary_ops.get_step()
@def_function.function
def get_and_increment():
summary_ops.get_step().assign_add(1)
return summary_ops.get_step()
mystep = variables.Variable(0)
self.assertAllEqual(0, set_step(mystep))
self.assertAllEqual(0, summary_ops.get_step().read_value())
self.assertAllEqual(1, get_and_increment())
self.assertAllEqual(2, get_and_increment())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(3, get_and_increment())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.also_run_as_tf_function
def testSummaryScope(self):
with summary_ops.summary_scope('foo') as (tag, scope):
self.assertEqual('foo', tag)
self.assertEqual('foo/', scope)
with summary_ops.summary_scope('bar') as (tag, scope):
self.assertEqual('foo/bar', tag)
self.assertEqual('foo/bar/', scope)
with summary_ops.summary_scope('with/slash') as (tag, scope):
self.assertEqual('foo/with/slash', tag)
self.assertEqual('foo/with/slash/', scope)
with ops.name_scope(None, skip_on_eager=False):
with summary_ops.summary_scope('unnested') as (tag, scope):
self.assertEqual('unnested', tag)
self.assertEqual('unnested/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_defaultName(self):
with summary_ops.summary_scope(None) as (tag, scope):
self.assertEqual('summary', tag)
self.assertEqual('summary/', scope)
with summary_ops.summary_scope(None, 'backup') as (tag, scope):
self.assertEqual('backup', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_handlesCharactersIllegalForScope(self):
with summary_ops.summary_scope('f?o?o') as (tag, scope):
self.assertEqual('f?o?o', tag)
self.assertEqual('foo/', scope)
# If all characters aren't legal for a scope name, use default name.
with summary_ops.summary_scope('???', 'backup') as (tag, scope):
self.assertEqual('???', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_nameNotUniquifiedForTag(self):
constant_op.constant(0, name='foo')
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with ops.name_scope('with', skip_on_eager=False):
constant_op.constant(0, name='slash')
with summary_ops.summary_scope('with/slash') as (tag, _):
self.assertEqual('with/slash', tag)
def testAllV2SummaryOps(self):
logdir = self.get_temp_dir()
def define_ops():
result = []
# TF 2.0 summary ops
result.append(summary_ops.write('write', 1, step=0))
result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
# TF 1.x tf.contrib.summary ops
result.append(summary_ops.generic('tensor', 1, step=1))
result.append(summary_ops.scalar('scalar', 2.0, step=1))
result.append(summary_ops.histogram('histogram', [1.0], step=1))
result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
return result
with context.graph_mode():
ops_without_writer = define_ops()
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(True):
ops_recording_on = define_ops()
with summary_ops.record_if(False):
ops_recording_off = define_ops()
# We should be collecting all ops defined with a default writer present,
# regardless of whether recording was set on or off, but not those defined
# without a writer at all.
del ops_without_writer
expected_ops = ops_recording_on + ops_recording_off
self.assertCountEqual(expected_ops, summary_ops.all_v2_summary_ops())
class SummaryWriterTest(test_util.TensorFlowTestCase):
def testCreate_withInitAndClose(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
get_total = lambda: len(events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
def testCreate_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
# Returned SummaryWriter must be stored in a non-local variable so it
# lives throughout the function execution.
if not hasattr(f, 'writer'):
f.writer = summary_ops.create_file_writer_v2(logdir)
with context.eager_mode():
f()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
def testCreate_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
with context.graph_mode():
logdir_tensor = constant_op.constant(logdir)
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
summary_ops.create_file_writer_v2(logdir_tensor)
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
summary_ops.create_file_writer_v2(constant_op.constant(logdir))
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
f()
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_unpersistedResource_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
with summary_ops.create_file_writer_v2(logdir).as_default():
pass # Calling .as_default() is enough to indicate use.
with context.eager_mode():
# TODO(nickfelt): change this to a better error
with self.assertRaisesRegex(
errors.NotFoundError, 'Resource.*does not exist'):
f()
# Even though we didn't use it, an event file will have been created.
self.assertEqual(1, len(gfile.Glob(os.path.join(logdir, '*'))))
def testCreate_immediateSetAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
summary_ops.create_file_writer_v2(logdir).set_as_default()
summary_ops.flush()
finally:
# Ensure we clean up no matter how the test executes.
summary_ops._summary_state.writer = None # pylint: disable=protected-access
def testCreate_immediateAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.flush()
def testNoSharing(self):
# Two writers with the same logdir should not share state.
logdir = self.get_temp_dir()
with context.eager_mode():
writer1 = summary_ops.create_file_writer_v2(logdir)
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
writer2 = summary_ops.create_file_writer_v2(logdir)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testNoSharing_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f1():
if not hasattr(f1, 'writer'):
f1.writer = summary_ops.create_file_writer_v2(logdir)
with f1.writer.as_default():
summary_ops.write('tag', 1, step=1)
@def_function.function
def f2():
if not hasattr(f2, 'writer'):
f2.writer = summary_ops.create_file_writer_v2(logdir)
with f2.writer.as_default():
summary_ops.write('tag', 1, step=2)
with context.eager_mode():
f1()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
f2()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
f1()
f2()
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testMaxQueue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(
logdir, max_queue=1, flush_millis=999999).as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
def testWriterFlush(self):
logdir = self.get_temp_dir()
get_total = lambda: len(events_from_logdir(logdir))
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
writer.flush()
self.assertEqual(2, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(2, get_total())
# Exiting the "as_default()" should do an implicit flush
self.assertEqual(3, get_total())
def testFlushFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
# Test "writer" parameter
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
summary_ops.flush(writer=writer)
self.assertEqual(4, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(4, get_total())
summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access
self.assertEqual(5, get_total())
@test_util.assert_no_new_tensors
def testNoMemoryLeak_graphMode(self):
logdir = self.get_temp_dir()
with context.graph_mode(), ops.Graph().as_default():
summary_ops.create_file_writer_v2(logdir)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoMemoryLeak_eagerMode(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', 1, step=0)
def testClose_preventsLaterUse(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
writer.close()
writer.close() # redundant close() is a no-op
writer.flush() # redundant flush() is a no-op
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.init()
with self.assertRaisesRegex(RuntimeError, 'already closed'):
with writer.as_default():
self.fail('should not get here')
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.set_as_default()
def testClose_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
writer.close()
self.assertNotIn(eventfile, get_open_filenames())
def testDereference_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
del writer
self.assertNotIn(eventfile, get_open_filenames())
class SummaryOpsTest(test_util.TensorFlowTestCase):
def tearDown(self):
summary_ops.trace_off()
def run_metadata(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def run_metadata_graphs(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata_graphs(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def create_run_metadata(self):
step_stats = step_stats_pb2.StepStats(dev_stats=[
step_stats_pb2.DeviceStepStats(
device='cpu:0',
node_stats=[step_stats_pb2.NodeExecStats(node_name='hello')])
])
return config_pb2.RunMetadata(
function_graphs=[
config_pb2.RunMetadata.FunctionGraphs(
pre_optimization_graph=graph_pb2.GraphDef(
node=[node_def_pb2.NodeDef(name='foo')]))
],
step_stats=step_stats)
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.keras_model(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
def run_trace(self, f, step=1):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
summary_ops.trace_on(graph=True, profiler=False)
with writer.as_default():
f()
summary_ops.trace_export(name='foo', step=step)
writer.close()
events = events_from_logdir(logdir)
return events[1]
@test_util.run_v2_only
def testRunMetadata_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadata_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadata_wholeRunMetadata(self):
expected_run_metadata = """
step_stats {
dev_stats {
device: "cpu:0"
node_stats {
node_name: "hello"
}
}
}
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadata_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testRunMetadataGraph_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadataGraph_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata_graph"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_runMetadataFragment(self):
expected_run_metadata = """
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata_graphs(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@test_util.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
summary_ops.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
model = SimpleSubclass()
with test.mock.patch.object(logging, 'warn') as mock_log:
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@test_util.run_v2_only
def testKerasModel_otherExceptions(self):
model = Sequential()
with test.mock.patch.object(model, 'to_json') as mock_to_json:
with test.mock.patch.object(logging, 'warn') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring... oops')
@test_util.run_v2_only
def testTrace(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
event = self.run_trace(f)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
# Content of function_graphs is large and, for instance, device can change.
self.assertTrue(hasattr(actual_run_metadata, 'function_graphs'))
@test_util.run_v2_only
def testTrace_cannotEnableTraceInFunction(self):
@def_function.function
def f():
summary_ops.trace_on(graph=True, profiler=False)
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args), 'Cannot enable trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotEnableTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_on(graph=True, profiler=False)
self.assertRegexpMatches(
str(mock_log.call_args), 'Must enable trace in eager mode.')
@test_util.run_v2_only
def testTrace_cannotExportTraceWithoutTrace(self):
with six.assertRaisesRegex(self, ValueError,
'Must enable trace before export.'):
summary_ops.trace_export(name='foo', step=1)
@test_util.run_v2_only
def testTrace_cannotExportTraceInFunction(self):
summary_ops.trace_on(graph=True, profiler=False)
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
summary_ops.trace_export(name='foo', step=1)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args),
'Cannot export trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotExportTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_export(name='foo', step=1)
self.assertRegexpMatches(
str(mock_log.call_args),
'Can only export trace while executing eagerly.')
@test_util.run_v2_only
def testTrace_usesDefaultStep(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
try:
summary_ops.set_step(42)
event = self.run_trace(f, step=None)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def to_numpy(summary_value):
return tensor_util.MakeNdarray(summary_value.tensor)
if __name__ == '__main__':
test.main()
| apache-2.0 |
jianglu/mojo | build/win/reorder-imports.py | 103 | 1807 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import os
import shutil
import subprocess
import sys
def reorder_imports(input_dir, output_dir, architecture):
"""Run swapimports.exe on the initial chrome.exe, and write to the output
directory. Also copy over any related files that might be needed
(pdbs, manifests etc.).
"""
input_image = os.path.join(input_dir, 'chrome.exe')
output_image = os.path.join(output_dir, 'chrome.exe')
swap_exe = os.path.join(
__file__,
'..\\..\\..\\third_party\\syzygy\\binaries\\exe\\swapimport.exe')
args = [swap_exe, '--input-image=%s' % input_image,
'--output-image=%s' % output_image, '--overwrite', '--no-logo']
if architecture == 'x64':
args.append('--x64');
args.append('chrome_elf.dll');
subprocess.call(args)
for fname in glob.iglob(os.path.join(input_dir, 'chrome.exe.*')):
shutil.copy(fname, os.path.join(output_dir, os.path.basename(fname)))
return 0
def main(argv):
usage = 'reorder_imports.py -i <input_dir> -o <output_dir> -a <target_arch>'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-i', '--input', help='reorder chrome.exe in DIR',
metavar='DIR')
parser.add_option('-o', '--output', help='write new chrome.exe to DIR',
metavar='DIR')
parser.add_option('-a', '--arch', help='architecture of build (optional)',
default='ia32')
opts, args = parser.parse_args()
if not opts.input or not opts.output:
parser.error('Please provide and input and output directory')
return reorder_imports(opts.input, opts.output, opts.arch)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
pombredanne/bokeh | bokeh/core/templates.py | 7 | 1290 | ''' The templates module contains Jinja2 templates used by Bokeh embed
Bokeh models (e.g. plots, widgets, layouts) in various ways.
.. bokeh-jinja:: bokeh.core.templates.AUTOLOAD_JS
.. bokeh-jinja:: bokeh.core.templates.AUTOLOAD_TAG
.. bokeh-jinja:: bokeh.core.templates.CSS_RESOURCES
.. bokeh-jinja:: bokeh.core.templates.DOC_JS
.. bokeh-jinja:: bokeh.core.templates.FILE
.. bokeh-jinja:: bokeh.core.templates.JS_RESOURCES
.. bokeh-jinja:: bokeh.core.templates.NOTEBOOK_LOAD
.. bokeh-jinja:: bokeh.core.templates.NOTEBOOK_DIV
.. bokeh-jinja:: bokeh.core.templates.PLOT_DIV
.. bokeh-jinja:: bokeh.core.templates.SCRIPT_TAG
'''
from __future__ import absolute_import
from jinja2 import Environment, PackageLoader
_env = Environment(loader=PackageLoader('bokeh.core', '_templates'))
JS_RESOURCES = _env.get_template("js_resources.html")
CSS_RESOURCES = _env.get_template("css_resources.html")
SCRIPT_TAG = _env.get_template("script_tag.html")
PLOT_DIV = _env.get_template("plot_div.html")
DOC_JS = _env.get_template("doc_js.js")
FILE = _env.get_template("file.html")
NOTEBOOK_LOAD = _env.get_template("notebook_load.html")
NOTEBOOK_DIV = _env.get_template("notebook_div.html")
AUTOLOAD_JS = _env.get_template("autoload_js.js")
AUTOLOAD_TAG = _env.get_template("autoload_tag.html")
| bsd-3-clause |
SimonSapin/pycairo | examples/cairo_snippets/snippets_svg.py | 9 | 1373 | #!/usr/bin/env python
from __future__ import division
from math import pi as M_PI # used by many snippets
import sys
import cairo
if not cairo.HAS_SVG_SURFACE:
raise SystemExit ('cairo was not compiled with SVG support')
from snippets import snip_list, snippet_normalize
width_in_inches, height_in_inches = 2, 2
width_in_points, height_in_points = width_in_inches * 72, height_in_inches * 72
width, height = width_in_points, height_in_points # used by snippet_normalize()
def do_snippet (snippet):
if verbose_mode:
print('processing %s' % snippet)
filename = 'snippets/%s.svg' % snippet
surface = cairo.SVGSurface (filename, width_in_points, height_in_points)
cr = cairo.Context (surface)
cr.save()
try:
fName = 'snippets/%s.py' % snippet
code = open(fName).read()
exec (code, globals(), locals())
except:
exc_type, exc_value = sys.exc_info()[:2]
print >> sys.stderr, exc_type, exc_value
else:
cr.restore()
cr.show_page()
surface.finish()
if verbose_mode:
print
if __name__ == '__main__':
verbose_mode = True
if len(sys.argv) > 1 and sys.argv[1] == '-s':
verbose_mode = False
del sys.argv[1]
if len(sys.argv) > 1: # do specified snippets
snippet_list = sys.argv[1:]
else: # do all snippets
snippet_list = snip_list
for s in snippet_list:
do_snippet (s)
| gpl-3.0 |
EricssonResearch/calvin-base | calvin/actorstore/systemactors/web/LocationWeather.py | 1 | 2182 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, calvinsys, stateguard
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class LocationWeather(Actor):
"""
Get current weather at selected destination, given as "city,country code", "city", or ",country code"
Input:
location : location to fetch weather from
Output:
forecast: weather at given city, or null on error
"""
@manage([])
def init(self):
self.setup()
def did_migrate(self):
self.setup()
def setup(self):
self._service = calvinsys.open(self, "weather")
def teardown(self):
calvinsys.close(self._service)
def will_migrate(self):
self.teardown()
def will_end(self):
self.teardown()
@stateguard(lambda self: self._service and calvinsys.can_write(self._service))
@condition(action_input=['location'])
def start_forecast(self, location):
calvinsys.write(self._service, location)
@stateguard(lambda self: self._service and calvinsys.can_read(self._service))
@condition(action_output=['forecast'])
def finish_forecast(self):
forecast = calvinsys.read(self._service)
return (forecast,)
action_priority = (start_forecast, finish_forecast,)
requires = ['weather']
test_calvinsys = {'weather': {'read': ["sunny"],
'write': ["Lund"]}}
test_set = [
{
'inports': {'location': ["Lund"]},
'outports': {'forecast': ["sunny"]}
}
]
| apache-2.0 |
jhawkesworth/ansible | lib/ansible/modules/storage/netapp/netapp_e_iscsi_interface.py | 52 | 16203 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_iscsi_interface
short_description: NetApp E-Series manage iSCSI interface configuration
description:
- Configure settings of an E-Series iSCSI interface
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
controller:
description:
- The controller that owns the port you want to configure.
- Controller names are presented alphabetically, with the first controller as A,
the second as B, and so on.
- Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
limitation and could change in the future.
required: yes
choices:
- A
- B
name:
description:
- The channel of the port to modify the configuration of.
- The list of choices is not necessarily comprehensive. It depends on the number of ports
that are available in the system.
- The numerical value represents the number of the channel (typically from left to right on the HIC),
beginning with a value of 1.
required: yes
aliases:
- channel
state:
description:
- When enabled, the provided configuration will be utilized.
- When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled.
choices:
- enabled
- disabled
default: enabled
address:
description:
- The IPv4 address to assign to the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
subnet_mask:
description:
- The subnet mask to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
gateway:
description:
- The IPv4 gateway address to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
config_method:
description:
- The configuration method type to use for this interface.
- dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
choices:
- dhcp
- static
default: dhcp
mtu:
description:
- The maximum transmission units (MTU), in bytes.
- This allows you to configure a larger value for the MTU, in order to enable jumbo frames
(any value > 1500).
- Generally, it is necessary to have your host, switches, and other components not only support jumbo
frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to
leave this at the default.
default: 1500
aliases:
- max_frame_size
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
via dhcp, etc), can take seconds or minutes longer to take effect.
- This module will not be useful/usable on an E-Series system without any iSCSI interfaces.
- This module requires a Web Services API version of >= 1.3.
"""
EXAMPLES = """
- name: Configure the first port on the A controller with a static IPv4 address
netapp_e_iscsi_interface:
name: "1"
controller: "A"
config_method: static
address: "192.168.1.100"
subnet_mask: "255.255.255.0"
gateway: "192.168.1.1"
ssid: "1"
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Disable ipv4 connectivity for the second port on the B controller
netapp_e_iscsi_interface:
name: "2"
controller: "B"
state: disabled
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
- name: Enable jumbo frames for the first 4 ports on controller A
netapp_e_iscsi_interface:
name: "{{ item | int }}"
controller: "A"
state: enabled
mtu: 9000
config_method: dhcp
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
loop:
- 1
- 2
- 3
- 4
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The interface settings have been updated.
enabled:
description:
- Indicates whether IPv4 connectivity has been enabled or disabled.
- This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance,
it is unlikely that the configuration will actually be valid.
returned: on success
sample: True
type: bool
"""
import json
import logging
from pprint import pformat
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class IscsiInterface(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
controller=dict(type='str', required=True, choices=['A', 'B']),
name=dict(type='int', aliases=['channel']),
state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']),
address=dict(type='str', required=False),
subnet_mask=dict(type='str', required=False),
gateway=dict(type='str', required=False),
config_method=dict(type='str', required=False, default='dhcp', choices=['dhcp', 'static']),
mtu=dict(type='int', default=1500, required=False, aliases=['max_frame_size']),
log_path=dict(type='str', required=False),
))
required_if = [
["config_method", "static", ["address", "subnet_mask"]],
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, )
args = self.module.params
self.controller = args['controller']
self.name = args['name']
self.mtu = args['mtu']
self.state = args['state']
self.address = args['address']
self.subnet_mask = args['subnet_mask']
self.gateway = args['gateway']
self.config_method = args['config_method']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
self.post_body = dict()
self.controllers = list()
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.mtu < 1500 or self.mtu > 9000:
self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.")
if self.config_method == 'dhcp' and any([self.address, self.subnet_mask, self.gateway]):
self.module.fail_json(msg='A config_method of dhcp is mutually exclusive with the address,'
' subnet_mask, and gateway options.')
# A relatively primitive regex to validate that the input is formatted like a valid ip address
address_regex = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
if self.address and not address_regex.match(self.address):
self.module.fail_json(msg="An invalid ip address was provided for address.")
if self.subnet_mask and not address_regex.match(self.subnet_mask):
self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.")
if self.gateway and not address_regex.match(self.gateway):
self.module.fail_json(msg="An invalid ip address was provided for gateway.")
@property
def interfaces(self):
ifaces = list()
try:
(rc, ifaces) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
# Filter out non-iSCSI interfaces
ifaces = [iface['iscsi'] for iface in ifaces if iface['interfaceType'] == 'iscsi']
return ifaces
def get_controllers(self):
"""Retrieve a mapping of controller labels to their references
{
'A': '070000000000000000000001',
'B': '070000000000000000000002',
}
:return: the controllers defined on the system
"""
controllers = list()
try:
(rc, controllers) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/id'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
controllers.sort()
controllers_dict = {}
i = ord('A')
for controller in controllers:
label = chr(i)
controllers_dict[label] = controller
i += 1
return controllers_dict
def fetch_target_interface(self):
interfaces = self.interfaces
for iface in interfaces:
if iface['channel'] == self.name and self.controllers[self.controller] == iface['controllerId']:
return iface
channels = sorted(set((str(iface['channel'])) for iface in interfaces
if self.controllers[self.controller] == iface['controllerId']))
self.module.fail_json(msg="The requested channel of %s is not valid. Valid channels include: %s."
% (self.name, ", ".join(channels)))
def make_update_body(self, target_iface):
body = dict(iscsiInterface=target_iface['id'])
update_required = False
self._logger.info("Requested state=%s.", self.state)
self._logger.info("config_method: current=%s, requested=%s",
target_iface['ipv4Data']['ipv4AddressConfigMethod'], self.config_method)
if self.state == 'enabled':
settings = dict()
if not target_iface['ipv4Enabled']:
update_required = True
settings['ipv4Enabled'] = [True]
if self.mtu != target_iface['interfaceData']['ethernetData']['maximumFramePayloadSize']:
update_required = True
settings['maximumFramePayloadSize'] = [self.mtu]
if self.config_method == 'static':
ipv4Data = target_iface['ipv4Data']['ipv4AddressData']
if ipv4Data['ipv4Address'] != self.address:
update_required = True
settings['ipv4Address'] = [self.address]
if ipv4Data['ipv4SubnetMask'] != self.subnet_mask:
update_required = True
settings['ipv4SubnetMask'] = [self.subnet_mask]
if self.gateway is not None and ipv4Data['ipv4GatewayAddress'] != self.gateway:
update_required = True
settings['ipv4GatewayAddress'] = [self.gateway]
if target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configStatic':
update_required = True
settings['ipv4AddressConfigMethod'] = ['configStatic']
elif (target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configDhcp'):
update_required = True
settings.update(dict(ipv4Enabled=[True],
ipv4AddressConfigMethod=['configDhcp']))
body['settings'] = settings
else:
if target_iface['ipv4Enabled']:
update_required = True
body['settings'] = dict(ipv4Enabled=[False])
self._logger.info("Update required ?=%s", update_required)
self._logger.info("Update body: %s", pformat(body))
return update_required, body
def update(self):
self.controllers = self.get_controllers()
if self.controller not in self.controllers:
self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s."
% ", ".join(self.controllers.keys()))
iface_before = self.fetch_target_interface()
update_required, body = self.make_update_body(iface_before)
if update_required and not self.check_mode:
try:
url = (self.url +
'storage-systems/%s/symbol/setIscsiInterfaceProperties' % self.ssid)
(rc, result) = request(url, method='POST', data=json.dumps(body), headers=HEADERS, timeout=300,
ignore_errors=True, **self.creds)
# We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook
# is cancelled mid-flight), that it isn't worth the complexity.
if rc == 422 and result['retcode'] in ['busy', '3']:
self.module.fail_json(
msg="The interface is currently busy (probably processing a previously requested modification"
" request). This operation cannot currently be completed. Array Id [%s]. Error [%s]."
% (self.ssid, result))
# Handle authentication issues, etc.
elif rc != 200:
self.module.fail_json(
msg="Failed to modify the interface! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(result)))
self._logger.debug("Update request completed successfully.")
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
iface_after = self.fetch_target_interface()
self.module.exit_json(msg="The interface settings have been updated.", changed=update_required,
enabled=iface_after['ipv4Enabled'])
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = IscsiInterface()
iface()
if __name__ == '__main__':
main()
| gpl-3.0 |
jarn0ld/gnuradio | gr-channels/python/channels/__init__.py | 54 | 1350 | #
# Copyright 2012-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Blocks for channel models and related functions.
'''
import os
try:
from channels_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from channels_swig import *
# Blocks for Hardware Impairments
from amp_bal import *
from conj_fs_iqcorr import *
from distortion_2_gen import *
from distortion_3_gen import *
from iqbal_gen import *
from impairments import *
from phase_bal import *
from phase_noise_gen import *
from quantizer import *
| gpl-3.0 |
potash/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
Ceciliae/gourmet | gourmet/gtk_extras/timeEntry.py | 6 | 4398 | ### Copyright (C) 2005 Thomas M. Hinkle
### Copyright (C) 2009 Rolf Leggewie
###
### This library is free software; you can redistribute it and/or
### modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation; either version 2 of the
### License, or (at your option) any later version.
###
### This library is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
### General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this library; if not, write to the Free Software
### Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
### USA
import gtk
from gettext import gettext as _
import gourmet.convert as convert
import validatingEntry
TIME_TO_READ = 1000
class TimeEntry (validatingEntry.ValidatingEntry):
__gtype_name__ = 'TimeEntry'
def __init__ (self, conv=None):
if not conv: self.conv = convert.get_converter()
else: self.conv = conv
validatingEntry.ValidatingEntry.__init__(self)
self.entry.get_value = self.get_value
self.entry.set_value = self.set_value
def find_errors_in_progress (self, txt):
if (not txt) or self.conv.timestring_to_seconds(txt):
return None
elif not convert.NUMBER_MATCHER.match(txt.split()[0]):
return _('Time must begin with a number or fraction followed by a unit (minutes, hours, etc.).')
else:
words = txt.split()
#if len(words) == 1:
# self._hide_warning_slowly()
# return
if convert.NUMBER_MATCHER.match(words[-1]):
return None
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
return None
#self._hide_warning_slowly()
#return
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
#else:
# self.set_warning_text("Invalid or incomplete time")
# self._show_warning()
def find_completed_errors (self,*args):
txt = self.entry.get_text()
if txt and not self.conv.timestring_to_seconds(txt):
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
words = txt.split()
if len(words) == 1:
self._hide_warning_slowly()
return
elif convert.NUMBER_MATCHER.match(words[-1]):
return
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
self._hide_warning_slowly()
return
self.valid = False
self.warn = True
self.set_warning_text('Invalid input.' + 'Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
def set_value (self,seconds):
self.entry.set_text(
convert.seconds_to_timestring(seconds,
fractions=convert.FRACTIONS_ASCII)
)
def get_value (self):
return self.conv.timestring_to_seconds(self.entry.get_text())
def make_time_entry():
te=TimeEntry()
te.show()
return te
if __name__ == '__main__':
w=gtk.Window()
vb = gtk.VBox()
hb = gtk.HBox()
l=gtk.Label('_Label')
l.set_use_underline(True)
l.set_alignment(0,0.5)
hb.pack_start(l)
te=TimeEntry()
import sys
te.connect('changed',lambda w: sys.stderr.write('Time value: %s'%w.get_value()))
l.set_mnemonic_widget(te)
hb.pack_start(te,expand=False,fill=False)
vb.add(hb)
qb = gtk.Button(stock=gtk.STOCK_QUIT)
vb.add(qb)
l.show()
hb.show()
qb.show()
te.show()
vb.show()
qb.connect('clicked',lambda *args: w.hide() and gtk.main_quit() or gtk.main_quit())
w.add(vb)
w.show()
w.connect('delete_event',gtk.main_quit)
gtk.main()
| gpl-2.0 |
chiamingyen/PythonCAD_py3 | Interface/cadwindow.py | 1 | 39067 | ############################################################################
#
# Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
#
# This file is part of the example classes of the Qt Toolkit.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file LICENSE.GPL included in the packaging of
# this file. Please review the following information to ensure GNU
# General Public Licensing requirements will be met:
# http://www.trolltech.com/products/qt/opensource.html
#
# If you are unsure which license is appropriate for your use, please
# review the following information:
# http://www.trolltech.com/products/qt/licensing.html or contact the
# sales department at sales@trolltech.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
############################################################################
# This is only needed for Python v2 but is harmless for Python v3.
#import sip
#sip.setapi('QString', 2)
import os
import sys
from PyQt5 import QtCore, QtGui, QtPrintSupport, QtWidgets
from . import cadwindow_rc
from Generic.application import Application
#Interface
from Interface.LayerIntf.layerdock import LayerDock
from Interface.cadscene import CadScene
from Interface.cadview import CadView
from Interface.idocument import IDocument
from Interface.CmdIntf.cmdintf import CmdIntf
from Interface.Entity.base import BaseEntity
from Interface.Command.icommand import ICommand
from Interface.cadinitsetting import *
from Interface.Dialogs.preferences import Preferences
#Kernel
from Kernel.exception import *
from Kernel.initsetting import * #SNAP_POINT_ARRAY, ACTIVE_SNAP_POINT
from Interface.DrawingHelper.polarguides import getPolarMenu
class CadWindowMdi(QtWidgets.QMainWindow):
def __init__(self):
super(CadWindowMdi, self).__init__()
self.mdiArea = QtWidgets.QMdiArea()
self.mdiArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.mdiArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.setCentralWidget(self.mdiArea)
self.mdiArea.subWindowActivated.connect(self.subWindowActivatedEvent)
self.oldSubWin=None
# self.readSettings() #now works for position and size, support for toolbars is still missing(http://www.opendocs.net/pyqt/pyqt4/html/qsettings.html)
self.setWindowTitle("PythonCAD")
qIcon=self._getIcon('pythoncad')
if qIcon:
self.setWindowIcon(qIcon)
self.setUnifiedTitleAndToolBarOnMac(True)
#pythoncad kernel
self.__application = Application()
self.__cmd_intf = CmdIntf(self)
#self.__cmd_intf.FunctionHandler.commandExecuted+=self.commandExecuted
# create all dock windows
self._createDockWindows()
# create status bar
self._createStatusBar()
self.setUnifiedTitleAndToolBarOnMac(True)
self._registerCommands()
self.updateMenus()
self.lastDirectory=os.getenv('USERPROFILE') or os.getenv('HOME')
self.readSettings() #now works for position and size and ismaximized, and finally toolbar position
return
@property
def scene(self):
if self.mdiArea.activeSubWindow():
return self.mdiArea.activeSubWindow().scene
@property
def view(self):
if self.mdiArea.activeSubWindow():
return self.mdiArea.activeSubWindow().view
@property
def Application(self):
"""
get the kernel application object
"""
return self.__application
@property
def LayerDock(self):
"""
get the layer tree dockable window
"""
return self.__layer_dock
# ###############################################STATUSBAR
# ##########################################################
def _createStatusBar(self):
'''
Creates the statusbar object.
'''
self.statusBar().showMessage("Ready")
#------------------------------------------------------------------------------------Create status buttons
#Force Direction
self.forceDirectionStatus=statusButton('SForceDir.png', 'Orthogonal Mode [right click will in the future set increment constrain angle]')
self.forceDirectionStatus.clicked.connect(self.setForceDirection)
self.forceDirectionStatus.setMenu(getPolarMenu())
self.statusBar().addPermanentWidget(self.forceDirectionStatus)
#Snap
self.SnapStatus=statusButton('SSnap.png', 'Snap [right click displays snap list]\n for future implementation it should be a checkist')
self.SnapStatus.clicked.connect(self.setSnapStatus)
self.SnapStatus.setMenu(self.__cmd_intf.Category.getMenu(6))
self.SnapStatus.setChecked(True)
self.statusBar().addPermanentWidget(self.SnapStatus)
#Grid
self.GridStatus=statusButton('SGrid.png', 'Grid Mode [not available yet]')
self.GridStatus.clicked.connect(self.setGrid)
self.statusBar().addPermanentWidget(self.GridStatus)
#------------------------------------------------------------------------------------Set coordinates label on statusbar (updated by idocumet)
self.coordLabel=QtWidgets.QLabel("x=0.000\ny=0.000")
self.coordLabel.setAlignment(QtCore.Qt.AlignVCenter)
self.coordLabel.setFrameStyle(QtWidgets.QFrame.Panel | QtWidgets.QFrame.Sunken)
self.coordLabel.setMinimumWidth(80)
self.coordLabel.setMaximumHeight(20)
self.coordLabel.setFont(QtGui.QFont("Sans", 6))
self.statusBar().addPermanentWidget(self.coordLabel)
def setForceDirection(self):
if self.forceDirectionStatus.isChecked():
self.scene.forceDirectionEnabled=True
self.forceDirectionStatus.setFocus(False)
if self.scene.activeICommand!=None and self.scene.fromPoint!=None:
self.scene.GuideHandler.show()
else:
self.scene.forceDirectionEnabled=False
self.scene.GuideHandler.hide()
def setSnapStatus(self):
if self.SnapStatus.isChecked():
self.scene.snappingPoint.activeSnap=SNAP_POINT_ARRAY['LIST']
else:
self.scene.snappingPoint.activeSnap=SNAP_POINT_ARRAY['NONE']
def setGrid(self):
pass
# ###############################################END STATUSBAR
# ##########################################################
def commandExecuted(self):
self.resetCommand()
def _createDockWindows(self):
'''
Creates all dockable windows for the application
'''
# commandline
command_dock = self.__cmd_intf.commandLine
# if the commandline exists, add it
if not command_dock is None:
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, command_dock)
return
def closeEvent(self, event):
"""
manage close event
"""
self.mdiArea.closeAllSubWindows()
if self.activeMdiChild():
event.ignore()
else:
self.writeSettings()
event.accept()
def writeSettings(self):
settings = QtCore.QSettings('PythonCAD', 'MDI Settings')
def subWindowActivatedEvent(self):
"""
Sub windows activation
"""
if self.mdiArea.activeSubWindow():
if (self.mdiArea.activeSubWindow().document!=
self.__application.ActiveDocument):
self.resetCommand()
self.__application.ActiveDocument=self.mdiArea.activeSubWindow().document
self.updateMenus()
def resetCommand(self):
"""
Resect the active command
"""
self.__cmd_intf.resetCommand()
if self.scene!=None:
self.scene.cancelCommand()
self.statusBar().showMessage("Ready")
# ################################# SET if ICON AND MENU are ENABLED
# ##########################################################
def updateMenus(self):
"""
update menu status
"""
hasMdiChild = (self.activeMdiChild() is not None)
#File
self.__cmd_intf.setVisible('import', hasMdiChild)
self.__cmd_intf.setVisible('saveas', hasMdiChild)
self.__cmd_intf.setVisible('close', hasMdiChild)
self.__cmd_intf.setVisible('print', hasMdiChild)
#Edit
self.__cmd_intf.setVisible('undo', hasMdiChild)
self.__cmd_intf.setVisible('redo', hasMdiChild)
self.__cmd_intf.setVisible('copy', hasMdiChild)
self.__cmd_intf.setVisible('move', hasMdiChild)
self.__cmd_intf.setVisible('delete', hasMdiChild)
self.__cmd_intf.setVisible('mirror', hasMdiChild)
self.__cmd_intf.setVisible('rotate', hasMdiChild)
self.__cmd_intf.setVisible('trim', hasMdiChild)
self.__cmd_intf.setVisible('property', hasMdiChild)
#Draw
self.__cmd_intf.setVisible('point', hasMdiChild)
self.__cmd_intf.setVisible('segment', hasMdiChild)
self.__cmd_intf.setVisible('rectangle', hasMdiChild)
self.__cmd_intf.setVisible('polyline', hasMdiChild)
self.__cmd_intf.setVisible('circle', hasMdiChild)
self.__cmd_intf.setVisible('arc', hasMdiChild)
self.__cmd_intf.setVisible('ellipse', hasMdiChild)
self.__cmd_intf.setVisible('polygon', hasMdiChild)
self.__cmd_intf.setVisible('fillet', hasMdiChild)
self.__cmd_intf.setVisible('chamfer', hasMdiChild)
self.__cmd_intf.setVisible('bisect', hasMdiChild)
self.__cmd_intf.setVisible('text', hasMdiChild)
#Dimension
self.__cmd_intf.setVisible('dimension', hasMdiChild)
#View
self.__cmd_intf.setVisible('fit', hasMdiChild)
self.__cmd_intf.setVisible('zoomwindow', hasMdiChild)
self.__cmd_intf.setVisible('zoomitem', hasMdiChild)
#snap
self.__cmd_intf.setVisible('snapauto', hasMdiChild)
self.__cmd_intf.setVisible('snapend', hasMdiChild)
self.__cmd_intf.setVisible('snapmid', hasMdiChild)
self.__cmd_intf.setVisible('snapcen', hasMdiChild)
self.__cmd_intf.setVisible('snapper', hasMdiChild)
self.__cmd_intf.setVisible('snaptan', False)
self.__cmd_intf.setVisible('snapqua', hasMdiChild)
self.__cmd_intf.setVisible('snap00', hasMdiChild)
self.__cmd_intf.setVisible('snapint', hasMdiChild)
#Tools
self.__cmd_intf.setVisible('info2p', hasMdiChild)
#window
self.__cmd_intf.setVisible('tile', hasMdiChild)
self.__cmd_intf.setVisible('cascade', hasMdiChild)
self.__cmd_intf.setVisible('next', hasMdiChild)
self.__cmd_intf.setVisible('previous', hasMdiChild)
#hasSelection = (self.activeMdiChild() is not None and
# self.activeMdiChild().textCursor().hasSelection())
#self.cutAct.setEnabled(hasSelection)
#self.copyAct.setEnabled(hasSelection)
#StatusBAR Satus Tools
self.forceDirectionStatus.setEnabled(hasMdiChild)
self.GridStatus.setEnabled(hasMdiChild)
self.SnapStatus.setEnabled(hasMdiChild)
def createMdiChild(self, file=None):
"""
Create new IDocument
"""
if file:
newDoc=self.__application.openDocument(file)
else:
newDoc=self.__application.newDocument()
for mdiwind in self.mdiArea.subWindowList():
if mdiwind._IDocument__document.dbPath==file:
child=mdiwind
break
else:
child = IDocument(newDoc,self.__cmd_intf, self)
self.mdiArea.addSubWindow(child)
#child.copyAvailable.connect(self.cutAct.setEnabled)
#child.copyAvailable.connect(self.copyAct.setEnabled)
return child
#def setAppDocActiveOnUi(self, doc):
# self.mdiArea.
def _registerCommands(self):
'''
Register all commands that are handed by this object
'''
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, 'new', '&New Drawing', self._onNewDrawing)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, 'open', '&Open Drawing...', self._onOpenDrawing)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, 'import', '&Import Drawing...', self._onImportDrawing)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, 'saveas', '&Save As...', self._onSaveAsDrawing)
#
# Create recentFile structure
#
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, '-')
i=0
for file in self.Application.getRecentFiles:
fileName=self.strippedName(file)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, 'file_'+str(i), fileName, self._onOpenRecent)
i+=1
#
# separator
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, 'close', '&Close', self._onCloseDrawing)
# separator
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, 'print', '&Print', self._onPrint)
# separator
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.File, 'quit', '&Quit', self.close)
# Edit
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Edit, 'undo', '&Undo', self._onUndo)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Edit, 'redo', '&Redo', self._onRedo)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Edit, 'property', '&Property', self._onProperty)
# separator
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Edit, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Edit, 'preferences', '&User Preferences', self.preferences)
#Modify
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Modify, 'copy', '&Copy', self._onCopy)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Modify, 'move', '&Move', self._onMove)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Modify, 'rotate', '&Rotate', self._onRotate)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Modify, 'mirror', '&Mirror', self._onMirror)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Modify, 'delete', '&Delete', self._onDelete)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Modify, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Modify, 'trim', '&Trim', self._onTrim)
# Draw
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'point', '&Point', self._onPoint)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'segment', '&Segment', self._onSegment)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'rectangle', '&Rectangle', self._onRectangle)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'polyline', '&Polyline', self._onPolyline)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'circle', '&Circle', self._onCircle)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'arc', '&Arc', self._onArc)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'ellipse', '&Ellipse', self._onEllipse)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'polygon', '&Polygon', self._onPolygon)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'fillet', '&Fillet', self._onFillet)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'chamfer', '&Chamfer', self._onChamfer)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'bisect', '&Bisect', self._onBisect)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Draw, 'text', '&Text', self._onText)
# Dimension
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Dimension, 'dimension', '&Aligned Dimension', self._onDimension)
# View
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.View, 'fit', '&Fit', self._onFit)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.View, 'zoomwindow', 'Zoom&Window', self._onZoomWindow)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.View, 'zoomitem', 'Zoom&Item',self._onCenterItem)
# Snap
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, 'snapauto', 'Automatic Snap', self._onSnapCommand)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, 'snapend', 'End', self._onSnapCommand)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, 'snapmid', 'Middle', self._onSnapCommand)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, 'snapint', 'Intersection', self._onSnapCommand)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, 'snapper', 'Perpendicular', self._onSnapCommand)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, 'snapcen', 'Center', self._onSnapCommand)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, 'snapqua', 'Quadrant', self._onSnapCommand)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, 'snaptan', 'Tangent', self._onSnapCommand)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, '-')
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Snap, 'snap00', 'Origin', self._onSnapCommand)
#Tools
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Tools, 'info2p', 'Info Two Points', self._onInfo2p)
#--menu: Windows
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Windows, 'tile', '&Tile', self._onTile)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Windows, 'cascade', '&Cascade', self._onCascade)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Windows, 'next', 'Ne&xt', self.mdiArea.activateNextSubWindow)
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Windows, 'previous', 'Pre&vious', self.mdiArea.activatePreviousSubWindow)
# Help
self.__cmd_intf.registerCommand(self.__cmd_intf.Category.Help, 'about', '&About PythonCAD', self._onAbout)
return
def updateRecentFileList(self):
"""
update the menu recent file list
"""
i=0
for file in self.Application.getRecentFiles:
fileName=self.strippedName(file)
self.__cmd_intf.updateText('file_'+str(i), fileName)
i+=1
def strippedName(self, fullFileName):
"""
get only the name of the filePath
"""
# 正確取得 stripped file name
return QtCore.QFileInfo(fullFileName).fileName()
# ########################################## ON COMMANDS
# ##########################################################
def _onNewDrawing(self):
'''
Create a new drawing
'''
child = self.createMdiChild()
child.show()
self.updateRecentFileList()
return
def _onOpenDrawing(self):
'''
Open an existing drawing PDR or DXF
'''
# ask the user to select an existing drawing
# 可以正確打開檔案, 與 PyQt4 不同
drawing = str(QtWidgets.QFileDialog.getOpenFileName(parent=self,directory=self.lastDirectory, caption ="Open Drawing", filter ="Drawings (*.pdr *.dxf)")[0])
# open a document and load the drawing
#print("drawing is:", drawing)
if len(drawing)>0:
self.lastDirectory=os.path.split(drawing)[0]
(name, extension)=os.path.splitext(drawing)
if extension.upper()=='.DXF':
child = self.createMdiChild()
child.importExternalFormat(drawing)
elif extension.upper()=='.PDR':
child = self.createMdiChild(drawing)
else:
self.critical("Wrong command selected")
return
child.show()
self.updateRecentFileList()
self.view.fit()
return
def _onImportDrawing(self):
'''
Import existing drawing in current drawing (some issues with PyQt4.7)
'''
drawing = QtWidgets.QFileDialog.getOpenFileName(parent=self, caption="Import Drawing", directory=self.lastDirectory, filter="Dxf (*.dxf)")[0]
# open a document and load the drawing
if len(drawing)>0:
self.lastDirectory=os.path.split(drawing)[0]
self.mdiArea.activeSubWindow().importExternalFormat(drawing)
return
def _onOpenRecent(self):
"""
on open recent file
"""
#FIXME: if in the command line we insert file_1 or file_2
#here we get en error action dose not have command attributes
# action is en edit command not an action and have an empty value
action = self.sender()
if action:
spool, index=action.command.split('_')
fileName=self.Application.getRecentFiles[int(index)]
if len(fileName)>0:
child = self.createMdiChild(fileName)
child.show()
self.updateRecentFileList()
self.view.fit()
return
def _onSaveAsDrawing(self):
drawing = QtWidgets.QFileDialog.getSaveFileName(self, "Save As...", "/home", filter ="Drawings (*.pdr *.dxf)")[0]
if len(drawing)>0:
self.__application.saveAs(drawing)
def _onPrint(self):
# printer.setPaperSize(QPrinter.A4);
self.scene.clearSelection()
printer=QtPrintSupport.QPrinter()
printDialog=QtPrintSupport.QPrintDialog(printer)
if (printDialog.exec_() == QtWidgets.QDialog.Accepted):
painter=QtGui.QPainter()
painter.begin(printer)
painter.setRenderHint(QtGui.QPainter.Antialiasing);
#self.mdiArea.activeSubWindow().scene.render(painter)
self.mdiArea.activeSubWindow().view.render(painter)
painter.end()
self.statusBar().showMessage("Ready")
return
def _onCloseDrawing(self):
path=self.mdiArea.activeSubWindow().fileName
self.__application.closeDocument(path)
self.mdiArea.closeActiveSubWindow()
return
#---------------------ON COMMANDS in DRAW
def _onPoint(self):
self.statusBar().showMessage("CMD:Point")
self.callCommand('POINT')
return
def _onSegment(self):
self.statusBar().showMessage("CMD:Segment")
self.callCommand('SEGMENT')
return
def _onCircle(self):
self.statusBar().showMessage("CMD:Circle")
self.callCommand('CIRCLE')
return
def _onArc(self):
self.statusBar().showMessage("CMD:Arc")
self.callCommand('ARC')
return
def _onEllipse(self):
self.statusBar().showMessage("CMD:Ellipse")
self.callCommand('ELLIPSE')
return
def _onRectangle(self):
self.statusBar().showMessage("CMD:Rectangle")
self.callCommand('RECTANGLE')
return
def _onPolygon(self):
self.statusBar().showMessage("CMD:Polygon")
self.callCommand('POLYGON')
return
def _onPolyline(self):
self.statusBar().showMessage("CMD:Polyline")
self.callCommand('POLYLINE')
return
def _onFillet(self):
self.statusBar().showMessage("CMD:Fillet")
self.callCommand('FILLET')
return
def _onChamfer(self):
self.statusBar().showMessage("CMD:Chamfer")
self.callCommand('CHAMFER')
return
def _onBisect(self):
self.statusBar().showMessage("CMD:Bisect")
self.callCommand('BISECTOR')
return
def _onText(self):
self.statusBar().showMessage("CMD:Text")
self.callCommand('TEXT')
return
def _onDimension(self):
self.statusBar().showMessage("CMD:Dimension")
self.callCommand('DIMENSION')
return
#-------------------------ON COMMANDS in EDIT
def _onUndo(self):
self.scene.clearSelection()
try:
self.mdiArea.activeSubWindow().unDo()
except UndoDbExc:
self.critical("Unable To Perform Undo")
self.statusBar().showMessage("Ready")
return
def _onRedo(self):
self.scene.clearSelection()
try:
self.mdiArea.activeSubWindow().reDo()
except UndoDbExc:
self.critical("Unable To Perform Redo")
self.statusBar().showMessage("Ready")
def _onProperty(self):
self.statusBar().showMessage("CMD:Property")
self.callCommand('PROPERTY')
def preferences(self):
p=Preferences(self)
#TODO: Fill up preferences
if (p.exec_() == QtWidgets.QDialog.Accepted):
#TODO: save Preferences
pass
#---------------------------ON COMMANDS in MODIFY
def _onCopy(self):
self.statusBar().showMessage("CMD:Copy")
self.callCommand('COPY')
return
def _onMove(self):
self.statusBar().showMessage("CMD:Move")
self.callCommand('MOVE')
return
def _onDelete(self):
self.statusBar().showMessage("CMD:Delete")
self.callCommand('DELETE')
self.statusBar().showMessage("Ready")
return
def _onTrim(self):
self.statusBar().showMessage("CMD:Trim")
self.callCommand('TRIM')
self.statusBar().showMessage("Ready")
return
def _onMirror(self):
self.statusBar().showMessage("CMD:Mirror")
self.callCommand('MIRROR')
return
def _onRotate(self):
self.statusBar().showMessage("CMD:Rotate")
self.callCommand('ROTATE')
return
#---------------------------ON COMMANDS in VIEW
def _onFit(self):
self.view.fit()
def _onZoomWindow(self):
self.statusBar().showMessage("CMD:ZoomWindow")
self.scene._cmdZoomWindow=True
def _onCenterItem(self):
self.view.centerOnSelection()
#---------------------------ON COMMANDS in SNAP
def _onSnapCommand(self):
"""
On snep Command action
"""
#__________SNAP NONE?
self.scene.clearSelection()
action = self.sender()
if action:
if action.command=="snapauto":
self.scene.setActiveSnap(SNAP_POINT_ARRAY["LIST"])
elif action.command=="snapend":
self.scene.setActiveSnap(SNAP_POINT_ARRAY["END"])
elif action.command=="snapmid":
self.scene.setActiveSnap(SNAP_POINT_ARRAY["MID"])
elif action.command=="snapcen":
self.scene.setActiveSnap(SNAP_POINT_ARRAY["CENTER"])
elif action.command=="snapper":
self.scene.setActiveSnap(SNAP_POINT_ARRAY["ORTHO"])
elif action.command=="snaptan":
self.scene.setActiveSnap(SNAP_POINT_ARRAY["TANGENT"])
elif action.command=="snapqua":
self.scene.setActiveSnap(SNAP_POINT_ARRAY["QUADRANT"])
elif action.command=="snap00":
self.scene.setActiveSnap(SNAP_POINT_ARRAY["ORIG"])
elif action.command=="snapint":
self.scene.setActiveSnap(SNAP_POINT_ARRAY["INTERSECTION"])
else:
self.scene.setActiveSnap(SNAP_POINT_ARRAY["LIST"])
#------------------------ON COMMANDS in TOOLS
def _onInfo2p(self):
"""
on info two point command
"""
self.scene.clearSelection()
self.statusBar().showMessage("CMD:Info2Points")
self.callCommand('DISTANCE2POINT', 'document')
return
#-- - - -=- - - - -=on commands in menu: Windows
def _onTile(self): #<-(by: S-PM 110524)
"on Tile command" #standard "Documentation String"
self.mdiArea.tileSubWindows() #--call "tile" method
return
#_onTile>
def _onCascade(self): #<-(by: S-PM 110524)
"on Cascade command" #standard "Documentation String"
#-- - - -=- - - - -=- - - - -=- - - - -=- - - - -=- - - - -=- - - - -=- - - - -=
# Prologue
def cascadeFit(self): #<-"Fit" function definition
#--Register
rgL=[] #List
rgN=0 #Integer
rgGp=None #General purpose
#--Action
rgL=self.mdiArea.subWindowList() #--get child-List,
rgN=len(rgL) # and child-count
if (rgN<1): return(rgN) #no sub-window: exit
rgW = self.mdiArea.width() #--get actually available room
rgH = self.mdiArea.height()
#--compute cascade offset dimensions
if (rgN<2): #<-less than 2 sub-windows: no offset
rgCx=0
rgCy=0
elif (rgN>1): #<-more than 1 sub-window: get offset
rgCx=rgL[1].x()
rgCy=rgL[1].y()
#>
rgCx=rgCx*(rgN-1) #compute total cascade offset
rgCy=rgCy*(rgN-1)
#<-loop resize all sub-windows, so to fit them into available room
for rgGp in rgL:
rgGp.resize(rgW-rgCx, rgH-rgCy)
#>
return(rgN)
#cascadeFit>
#-- - - -=- - - - -=- - - - -=- - - - -=- - - - -=- - - - -=- - - - -=- - - - -=
# Action
#--call "cascade" method, as-is (rudimentary)
self.mdiArea.cascadeSubWindows()
rgN=cascadeFit(self) #"fit" all sub-windows into available room
return
#encoding: utf-8
#_onCascade>
#-----------------------ON COMMANDS in ABOUT
def _onAbout(self):
QtWidgets.QMessageBox.about(self, "About PythonCAD",
"""<b>PythonCAD</b> is a CAD package written, surprisingly enough, in Python using the PyQt5 interface.<p>
The PythonCAD project aims to produce a scriptable, open-source,
easy to use CAD package for any Python/PyQt supported Platforms
<p>
This is an Alfa Release For The new R38 Vesion <b>(R38.0.0.5)<b><P>
<p>
<a href="http://sourceforge.net/projects/pythoncad/">PythonCAD Web Site On Sourceforge</a>
<p>
<a href="http://pythoncad.sourceforge.net/dokuwiki/doku.php">PythonCAD Wiki Page</a>
<p>
由 KMOL 改為 Python3 與 PyQt5 版本
""")
return
# ########################################## CALL COMMAND
# ##########################################################
def callCommand(self, commandName, commandFrom=None):
"""
call a document command (kernel)
"""
try:
if commandFrom==None or commandFrom=='kernel':
self.scene.activeKernelCommand=self.__application.getCommand(commandName)
elif commandFrom=='document':
self.scene.activeKernelCommand=self.getCommand(commandName)
else:
return
self.scene.activeICommand=ICommand(self.scene)
self.scene.activeICommand.updateInput+=self.updateInput
self.updateInput(self.scene.activeKernelCommand.activeMessage)
except EntityMissing:
self.scene.cancelCommand()
self.critical("You need to have an active document to perform this command")
#checks if scene has selected items and launches them directly to the ICommand
#if it's first prompt it's "give me entities"
if len(self.scene.selectedItems())>0:
if self.scene.activeKernelCommand.activeException()==ExcMultiEntity:
qtItems=[item for item in self.scene.selectedItems() if isinstance(item, BaseEntity)]
self.scene.activeICommand.addMauseEvent(point=None,
entity=qtItems,
force=None)
else:
self.scene.clearSelection()
def getCommand(self, name):
"""
get an interface command
"""
if name in INTERFACE_COMMAND:
return INTERFACE_COMMAND[name](self.mdiArea.activeSubWindow().document,
self.mdiArea.activeSubWindow())
else:
self.critical("Wrong command")
def updateInput(self, message):
self.__cmd_intf.commandLine.printMsg(str(message))
self.statusBar().showMessage(str(message))
@staticmethod
def critical(text):
'''
Shows an critical message dialog
'''
dlg = QtWidgets.QMessageBox()
dlg.setText(text)
dlg.setIcon(QtWidgets.QMessageBox.Critical)
dlg.exec_()
return
# ########################################## SETTINGS STORAGE
# ##########################################################
def readSettings(self):
settings = QtCore.QSettings('PythonCAD', 'MDI Settings')
settings.beginGroup("CadWindow")
max = settings.value("maximized", False)
# settings.setValue("size", QtCore.QSize(800, 600))
# settings.setValue("pos", QtCore.QPoint(400, 300))
if max == True: # if cadwindow was maximized set it maximized again
self.showMaximized()
else: # else set it to the previous position and size
try:
self.resize(settings.value("size").toSize()) # self.resize(settings.value("size", QtCore.QSize(800, 600)).toSize())
self.move(settings.value("pos").toPoint()) # self.move(settings.value("pos", QtCore.QPoint(400, 300)).toPoint())+
#self.resize(settings.value("size", QtCore.QSize(800, 600)))
#self.move(settings.value("pos", QtCore.QPoint(400, 300)))
#self.resize(settings.value("size"))
#self.move(settings.value("pos"))
except:
print("Warning: unable to set the previews values")
settings.endGroup()
settings.beginGroup("CadWindowState")
try:
self.restoreState(settings.value('State').toByteArray())
except:
print("Warning: Unable to set state")
settings.endGroup()
# ########################################## END SETTINGS STORAGE
# ##########################################################
def activeMdiChild(self):
activeSubWindow = self.mdiArea.activeSubWindow()
if activeSubWindow:
return activeSubWindow.widget()
return None
def switchLayoutDirection(self):
if self.layoutDirection() == QtCore.Qt.LeftToRight:
QtWidgets.QApplication.setLayoutDirection(QtCore.Qt.RightToLeft)
else:
QtWidgets.QApplication.setLayoutDirection(QtCore.Qt.LeftToRight)
def setActiveSubWindow(self, window):
if window:
self.mdiArea.setActiveSubWindow(window)
def _getIcon(self, cmd):
'''
Create an QIcon object based on the command name.
The name of the icon is ':/images/' + cmd + '.png'.
If the cmd = 'Open', the name of the icon is ':/images/Open.png'.
'''
icon_name = cmd + '.png'
icon_path = os.path.join(os.path.join(os.getcwd(), 'icons'), icon_name)
# check if icon exist
if os.path.exists(icon_path):
icon = QtGui.QIcon(icon_path)
return icon
# icon not found, don't use an icon, return None
return None
def keyPressEvent(self, event):
if event.key()==QtCore.Qt.Key_Escape:
self.resetCommand()
super(CadWindowMdi, self).keyPressEvent(event)
# ########################################## SYMPY INTEGRATION
# ##########################################################
def plotFromSympy(self, objects):
"""
plot the sympy Object into PythonCAD
"""
if self.mdiArea.currentSubWindow()==None:
self._onNewDrawing()
for obj in objects:
self.plotSympyEntity(obj)
def plotSympyEntity(self, sympyEntity):
"""
plot the sympy entity
"""
self.mdiArea.currentSubWindow().document.saveSympyEnt(sympyEntity)
def createSympyDocument(self):
"""
create a new document to be used by sympy plugin
"""
self._onNewDrawing()
def getSympyObject(self):
"""
get an array of sympy object
"""
#if self.Application.ActiveDocument==None:
if self.mdiArea.currentSubWindow()==None:
raise StructuralError("unable to get the active document")
ents=self.mdiArea.currentSubWindow().scene.getAllBaseEntity()
return [ents[ent].geoItem.getSympy() for ent in ents if ent!=None]
# ########################################## CLASS STATUSBUTTON
# ##########################################################
# ##########################################################
# ##########################################################
class statusButton(QtWidgets.QToolButton):
def __init__(self, icon=None, tooltip=None):
super(statusButton, self).__init__()
self.setCheckable(True)
self.setFixedSize(30, 20)
if icon:
self.getIcon(icon)
self.setToolTip(tooltip)
def getIcon(self, fileName):
iconpath=os.path.join(os.getcwd(), 'icons', fileName)
self.setIcon(QtGui.QIcon(iconpath))
def mousePressEvent(self, event):
if event.button()==QtCore.Qt.LeftButton:
self.click()
elif event.button()==QtCore.Qt.RightButton:
self.showMenu()
| gpl-2.0 |
alexmorozov/django | tests/prefetch_related/models.py | 255 | 7972 | import uuid
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Basic tests
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=50, unique=True)
first_book = models.ForeignKey('Book', models.CASCADE, related_name='first_time_authors')
favorite_authors = models.ManyToManyField(
'self', through='FavoriteAuthors', symmetrical=False, related_name='favors_me')
def __str__(self):
return self.name
class Meta:
ordering = ['id']
class AuthorWithAge(Author):
author = models.OneToOneField(Author, models.CASCADE, parent_link=True)
age = models.IntegerField()
class FavoriteAuthors(models.Model):
author = models.ForeignKey(Author, models.CASCADE, to_field='name', related_name='i_like')
likes_author = models.ForeignKey(Author, models.CASCADE, to_field='name', related_name='likes_me')
class Meta:
ordering = ['id']
@python_2_unicode_compatible
class AuthorAddress(models.Model):
author = models.ForeignKey(Author, models.CASCADE, to_field='name', related_name='addresses')
address = models.TextField()
class Meta:
ordering = ['id']
def __str__(self):
return self.address
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=255)
authors = models.ManyToManyField(Author, related_name='books')
def __str__(self):
return self.title
class Meta:
ordering = ['id']
class BookWithYear(Book):
book = models.OneToOneField(Book, models.CASCADE, parent_link=True)
published_year = models.IntegerField()
aged_authors = models.ManyToManyField(
AuthorWithAge, related_name='books_with_year')
class Bio(models.Model):
author = models.OneToOneField(Author, models.CASCADE)
books = models.ManyToManyField(Book, blank=True)
@python_2_unicode_compatible
class Reader(models.Model):
name = models.CharField(max_length=50)
books_read = models.ManyToManyField(Book, related_name='read_by')
def __str__(self):
return self.name
class Meta:
ordering = ['id']
class BookReview(models.Model):
book = models.ForeignKey(BookWithYear, models.CASCADE)
notes = models.TextField(null=True, blank=True)
# Models for default manager tests
class Qualification(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['id']
class TeacherManager(models.Manager):
def get_queryset(self):
return super(TeacherManager, self).get_queryset().prefetch_related('qualifications')
@python_2_unicode_compatible
class Teacher(models.Model):
name = models.CharField(max_length=50)
qualifications = models.ManyToManyField(Qualification)
objects = TeacherManager()
def __str__(self):
return "%s (%s)" % (self.name, ", ".join(q.name for q in self.qualifications.all()))
class Meta:
ordering = ['id']
class Department(models.Model):
name = models.CharField(max_length=50)
teachers = models.ManyToManyField(Teacher)
class Meta:
ordering = ['id']
# GenericRelation/GenericForeignKey tests
@python_2_unicode_compatible
class TaggedItem(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(
ContentType,
models.CASCADE,
related_name="taggeditem_set2",
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
created_by_ct = models.ForeignKey(
ContentType,
models.SET_NULL,
null=True,
related_name='taggeditem_set3',
)
created_by_fkey = models.PositiveIntegerField(null=True)
created_by = GenericForeignKey('created_by_ct', 'created_by_fkey',)
favorite_ct = models.ForeignKey(
ContentType,
models.SET_NULL,
null=True,
related_name='taggeditem_set4',
)
favorite_fkey = models.CharField(max_length=64, null=True)
favorite = GenericForeignKey('favorite_ct', 'favorite_fkey')
def __str__(self):
return self.tag
class Meta:
ordering = ['id']
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem, related_query_name='bookmarks')
favorite_tags = GenericRelation(TaggedItem,
content_type_field='favorite_ct',
object_id_field='favorite_fkey',
related_query_name='favorite_bookmarks')
class Meta:
ordering = ['id']
class Comment(models.Model):
comment = models.TextField()
# Content-object field
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_pk = models.TextField()
content_object = GenericForeignKey(ct_field="content_type", fk_field="object_pk")
class Meta:
ordering = ['id']
# Models for lookup ordering tests
class House(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=255)
owner = models.ForeignKey('Person', models.SET_NULL, null=True)
main_room = models.OneToOneField('Room', models.SET_NULL, related_name='main_room_of', null=True)
class Meta:
ordering = ['id']
class Room(models.Model):
name = models.CharField(max_length=50)
house = models.ForeignKey(House, models.CASCADE, related_name='rooms')
class Meta:
ordering = ['id']
class Person(models.Model):
name = models.CharField(max_length=50)
houses = models.ManyToManyField(House, related_name='occupants')
@property
def primary_house(self):
# Assume business logic forces every person to have at least one house.
return sorted(self.houses.all(), key=lambda house: -house.rooms.count())[0]
@property
def all_houses(self):
return list(self.houses.all())
class Meta:
ordering = ['id']
# Models for nullable FK tests
@python_2_unicode_compatible
class Employee(models.Model):
name = models.CharField(max_length=50)
boss = models.ForeignKey('self', models.SET_NULL, null=True, related_name='serfs')
def __str__(self):
return self.name
class Meta:
ordering = ['id']
# Ticket #19607
@python_2_unicode_compatible
class LessonEntry(models.Model):
name1 = models.CharField(max_length=200)
name2 = models.CharField(max_length=200)
def __str__(self):
return "%s %s" % (self.name1, self.name2)
@python_2_unicode_compatible
class WordEntry(models.Model):
lesson_entry = models.ForeignKey(LessonEntry, models.CASCADE)
name = models.CharField(max_length=200)
def __str__(self):
return "%s (%s)" % (self.name, self.id)
# Ticket #21410: Regression when related_name="+"
@python_2_unicode_compatible
class Author2(models.Model):
name = models.CharField(max_length=50, unique=True)
first_book = models.ForeignKey('Book', models.CASCADE, related_name='first_time_authors+')
favorite_books = models.ManyToManyField('Book', related_name='+')
def __str__(self):
return self.name
class Meta:
ordering = ['id']
# Models for many-to-many with UUID pk test:
class Pet(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=20)
people = models.ManyToManyField(Person, related_name='pets')
class Flea(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
current_room = models.ForeignKey(Room, models.SET_NULL, related_name='fleas', null=True)
pets_visited = models.ManyToManyField(Pet, related_name='fleas_hosted')
people_visited = models.ManyToManyField(Person, related_name='fleas_hosted')
| bsd-3-clause |
zlfben/gem5 | src/mem/ruby/system/RubySystem.py | 13 | 2490 | # Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from ClockedObject import ClockedObject
from SimpleMemory import *
class RubySystem(ClockedObject):
type = 'RubySystem'
cxx_header = "mem/ruby/system/System.hh"
random_seed = Param.Int(1234, "random seed used by the simulation");
randomization = Param.Bool(False,
"insert random delays on message enqueue times");
block_size_bytes = Param.UInt32(64,
"default cache block size; must be a power of two");
memory_size_bits = Param.UInt32(64,
"number of bits that a memory address requires");
# Profiler related configuration variables
hot_lines = Param.Bool(False, "")
all_instructions = Param.Bool(False, "")
num_of_sequencers = Param.Int("")
phys_mem = Param.SimpleMemory(NULL, "")
access_backing_store = Param.Bool(False, "Use phys_mem as the functional \
store and only use ruby for timing.")
| bsd-3-clause |
bbiiggppiigg/NTHUOJ_web | team/models.py | 3 | 2090 | '''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.db import models
from datetime import date
from users.models import User
# Create your models here.
class Team(models.Model):
team_name = models.CharField(max_length=15, default='', unique=True)
leader = models.ForeignKey(User)
description = models.TextField(blank=True)
note = models.TextField(blank=True)
creation_time = models.DateTimeField(default=date.today, auto_now_add=True)
def __unicode__(self):
return self.team_name
class TeamMember(models.Model):
team = models.ForeignKey(Team)
member = models.ForeignKey(User)
VALID = 'VALID'
INVITED = 'INVITED'
APPLY = 'APPLY'
MEMBER_STATUS_CHOICE = (
(VALID, 'Valid'), (INVITED, 'Invited'), (APPLY, 'Apply'),
)
status = models.CharField(max_length=7, choices=MEMBER_STATUS_CHOICE, default='')
class Meta:
unique_together = (('team', 'member'),)
def __unicode__(self):
return self.member.username + ' in ' + self.team.team_name
| mit |
BackupTheBerlios/pixies-svn | pixies/reportlab/lib/logger.py | 1 | 1712 | #!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/logger.py
__version__=''' $Id$ '''
from sys import stderr
class Logger:
'''
An extended file type thing initially equivalent to sys.stderr
You can add/remove file type things; it has a write method
'''
def __init__(self):
self._fps = [stderr]
self._fns = {}
def add(self,fp):
'''add the file/string fp to the destinations'''
if type(fp) is StringType:
if fp in self._fns: return
fp = open(fn,'wb')
self._fns[fn] = fp
self._fps.append(fp)
def remove(self,fp):
'''remove the file/string fp from the destinations'''
if type(fp) is StringType:
if fp not in self._fns: return
fn = fp
fp = self._fns[fn]
del self.fns[fn]
if fp in self._fps:
del self._fps[self._fps.index(fp)]
def write(self,text):
'''write text to all the destinations'''
if text[-1]!='\n': text=text+'\n'
map(lambda fp,t=text: fp.write(t),self._fps)
def __call__(self,text):
self.write(text)
logger=Logger()
class WarnOnce:
def __init__(self,kind='Warn'):
self.uttered = {}
self.pfx = '%s: '%kind
self.enabled = 1
def once(self,warning):
if not self.uttered.has_key(warning):
if self.enabled: logger.write(self.pfx + warning)
self.uttered[warning] = 1
def __call__(self,warning):
self.once(warning)
warnOnce=WarnOnce()
infoOnce=WarnOnce('Info') | gpl-2.0 |
cindyyu/kuma | kuma/attachments/utils.py | 10 | 3728 | import calendar
from datetime import datetime
import hashlib
from django.conf import settings
from django.core.files import temp as tempfile
from django.template import loader
from django.utils import timezone
from django.utils.http import http_date
from django.utils.safestring import mark_safe
from kuma.core.urlresolvers import reverse
def full_attachment_url(attachment_id, filename):
path = reverse('attachments.raw_file', kwargs={
'attachment_id': attachment_id,
'filename': filename,
})
return '%s%s%s' % (settings.PROTOCOL, settings.ATTACHMENT_HOST, path)
def convert_to_http_date(dt):
"""
Given a timezone naive or aware datetime return the HTTP date
formatted string to be used in HTTP response headers.
"""
# first check if the given dt is timezone aware and if not make it aware
if timezone.is_naive(dt):
default_timezone = timezone.get_default_timezone()
dt = timezone.make_aware(dt, default_timezone)
# then convert the datetime to UTC (which epoch time is based on)
utc_dt = dt.astimezone(timezone.utc)
# convert the UTC time to the seconds since the epch
epoch_dt = calendar.timegm(utc_dt.utctimetuple())
# format the thing as a RFC1123 datetime
return http_date(epoch_dt)
def attachment_upload_to(instance, filename):
"""Generate a path to store a file attachment."""
# TODO: We could probably just get away with strftime formatting
# in the 'upload_to' argument here, but this does a bit more to be
# extra-safe with potential duplicate filenames.
#
# For now, the filesystem storage path will look like this:
#
# attachments/year/month/day/attachment_id/md5/filename
#
# The md5 hash here is of the full timestamp, down to the
# microsecond, of when the path is generated.
now = datetime.now()
return "attachments/%(date)s/%(id)s/%(md5)s/%(filename)s" % {
'date': now.strftime('%Y/%m/%d'),
'id': instance.attachment.id,
'md5': hashlib.md5(str(now)).hexdigest(),
'filename': filename
}
def attachments_json(attachments):
"""
Given a list of Attachments (e.g., from a Document), make some
nice JSON out of them for easy display.
"""
attachments_list = []
for attachment in attachments:
obj = {
'title': attachment.title,
'date': str(attachment.current_revision.created),
'description': attachment.current_revision.description,
'url': attachment.get_file_url(),
'size': 0,
'creator': attachment.current_revision.creator.username,
'creator_url': attachment.current_revision.creator.get_absolute_url(),
'revision': attachment.current_revision.id,
'id': attachment.id,
'mime': attachment.current_revision.mime_type
}
# Adding this to prevent "UnicodeEncodeError" for certain media
try:
obj['size'] = attachment.current_revision.file.size
except:
pass
obj['html'] = mark_safe(loader.render_to_string('attachments/includes/attachment_row.html',
{'attachment': obj}))
attachments_list.append(obj)
return attachments_list
def make_test_file(content=None):
"""Create a fake file for testing purposes."""
if content is None:
content = 'I am a test file for upload.'
# Shamelessly stolen from Django's own file-upload tests.
tdir = tempfile.gettempdir()
file_for_upload = tempfile.NamedTemporaryFile(suffix=".txt", dir=tdir)
file_for_upload.write(content)
file_for_upload.seek(0)
return file_for_upload
| mpl-2.0 |
klunwebale/odoo | addons/account_voucher/account_voucher.py | 132 | 85482 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp.tools import float_compare
from openerp.report import report_sxw
import openerp
class res_currency(osv.osv):
_inherit = "res.currency"
def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None):
if context is None:
context = {}
res = super(res_currency, self)._get_current_rate(cr, uid, ids, raise_on_no_rate, context=context)
if context.get('voucher_special_currency') in ids and context.get('voucher_special_currency_rate'):
res[context.get('voucher_special_currency')] = context.get('voucher_special_currency_rate')
return res
class account_voucher(osv.osv):
def _check_paid(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = any([((line.account_id.type, 'in', ('receivable', 'payable')) and line.reconcile_id) for line in voucher.move_ids])
return res
def _get_type(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('type', False)
def _get_period(self, cr, uid, context=None):
if context is None: context = {}
if context.get('period_id', False):
return context.get('period_id')
periods = self.pool.get('account.period').find(cr, uid, context=context)
return periods and periods[0] or False
def _make_journal_search(self, cr, uid, ttype, context=None):
journal_pool = self.pool.get('account.journal')
return journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
def _get_journal(self, cr, uid, context=None):
if context is None: context = {}
invoice_pool = self.pool.get('account.invoice')
journal_pool = self.pool.get('account.journal')
if context.get('invoice_id', False):
invoice = invoice_pool.browse(cr, uid, context['invoice_id'], context=context)
journal_id = journal_pool.search(cr, uid, [
('currency', '=', invoice.currency_id.id), ('company_id', '=', invoice.company_id.id)
], limit=1, context=context)
return journal_id and journal_id[0] or False
if context.get('journal_id', False):
return context.get('journal_id')
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
return context.get('search_default_journal_id')
ttype = context.get('type', 'bank')
if ttype in ('payment', 'receipt'):
ttype = 'bank'
res = self._make_journal_search(cr, uid, ttype, context=context)
return res and res[0] or False
def _get_tax(self, cr, uid, context=None):
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if not journal_id:
ttype = context.get('type', 'bank')
res = journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
if not res:
return False
journal_id = res[0]
if not journal_id:
return False
journal = journal_pool.browse(cr, uid, journal_id, context=context)
account_id = journal.default_credit_account_id or journal.default_debit_account_id
if account_id and account_id.tax_ids:
tax_id = account_id.tax_ids[0].id
return tax_id
return False
def _get_payment_rate_currency(self, cr, uid, context=None):
"""
Return the default value for field payment_rate_currency_id: the currency of the journal
if there is one, otherwise the currency of the user's company
"""
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if journal_id:
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.currency:
return journal.currency.id
#no journal given in the context, use company currency as default
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
def _get_currency(self, cr, uid, context=None):
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if journal_id:
if isinstance(journal_id, (list, tuple)):
# sometimes journal_id is a pair (id, display_name)
journal_id = journal_id[0]
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.currency:
return journal.currency.id
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
def _get_partner(self, cr, uid, context=None):
if context is None: context = {}
return context.get('partner_id', False)
def _get_reference(self, cr, uid, context=None):
if context is None: context = {}
return context.get('reference', False)
def _get_narration(self, cr, uid, context=None):
if context is None: context = {}
return context.get('narration', False)
def _get_amount(self, cr, uid, context=None):
if context is None:
context= {}
return context.get('amount', 0.0)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if context is None: context = {}
return [(r['id'], (r['number'] or _('Voucher'))) for r in self.read(cr, uid, ids, ['number'], context, load='_classic_write')]
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
mod_obj = self.pool.get('ir.model.data')
if context is None: context = {}
if view_type == 'form':
if not view_id and context.get('invoice_type'):
if context.get('invoice_type') in ('out_invoice', 'out_refund'):
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form')
else:
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form')
result = result and result[1] or False
view_id = result
if not view_id and context.get('line_type'):
if context.get('line_type') == 'customer':
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form')
else:
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form')
result = result and result[1] or False
view_id = result
res = super(account_voucher, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
if context.get('type', 'sale') in ('purchase', 'payment'):
nodes = doc.xpath("//field[@name='partner_id']")
for node in nodes:
node.set('context', "{'default_customer': 0, 'search_default_supplier': 1, 'default_supplier': 1}")
if context.get('invoice_type','') in ('in_invoice', 'in_refund'):
node.set('string', _("Supplier"))
res['arch'] = etree.tostring(doc)
return res
def _compute_writeoff_amount(self, cr, uid, line_dr_ids, line_cr_ids, amount, type):
debit = credit = 0.0
sign = type == 'payment' and -1 or 1
for l in line_dr_ids:
if isinstance(l, dict):
debit += l['amount']
for l in line_cr_ids:
if isinstance(l, dict):
credit += l['amount']
return amount - sign * (credit - debit)
def onchange_line_ids(self, cr, uid, ids, line_dr_ids, line_cr_ids, amount, voucher_currency, type, context=None):
context = context or {}
if not line_dr_ids and not line_cr_ids:
return {'value':{'writeoff_amount': 0.0}}
# resolve lists of commands into lists of dicts
line_dr_ids = self.resolve_2many_commands(cr, uid, 'line_dr_ids', line_dr_ids, ['amount'], context)
line_cr_ids = self.resolve_2many_commands(cr, uid, 'line_cr_ids', line_cr_ids, ['amount'], context)
#compute the field is_multi_currency that is used to hide/display options linked to secondary currency on the voucher
is_multi_currency = False
#loop on the voucher lines to see if one of these has a secondary currency. If yes, we need to see the options
for voucher_line in line_dr_ids+line_cr_ids:
line_id = voucher_line.get('id') and self.pool.get('account.voucher.line').browse(cr, uid, voucher_line['id'], context=context).move_line_id.id or voucher_line.get('move_line_id')
if line_id and self.pool.get('account.move.line').browse(cr, uid, line_id, context=context).currency_id:
is_multi_currency = True
break
return {'value': {'writeoff_amount': self._compute_writeoff_amount(cr, uid, line_dr_ids, line_cr_ids, amount, type), 'is_multi_currency': is_multi_currency}}
def _get_journal_currency(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = voucher.journal_id.currency and voucher.journal_id.currency.id or voucher.company_id.currency_id.id
return res
def _get_writeoff_amount(self, cr, uid, ids, name, args, context=None):
if not ids: return {}
currency_obj = self.pool.get('res.currency')
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
debit = credit = 0.0
sign = voucher.type == 'payment' and -1 or 1
for l in voucher.line_dr_ids:
debit += l.amount
for l in voucher.line_cr_ids:
credit += l.amount
currency = voucher.currency_id or voucher.company_id.currency_id
res[voucher.id] = currency_obj.round(cr, uid, currency, voucher.amount - sign * (credit - debit))
return res
def _paid_amount_in_company_currency(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
res = {}
ctx = context.copy()
for v in self.browse(cr, uid, ids, context=context):
ctx.update({'date': v.date})
#make a new call to browse in order to have the right date in the context, to get the right currency rate
voucher = self.browse(cr, uid, v.id, context=ctx)
ctx.update({
'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False,
'voucher_special_currency_rate': voucher.currency_id.rate * voucher.payment_rate,})
res[voucher.id] = self.pool.get('res.currency').compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, voucher.amount, context=ctx)
return res
def _get_currency_help_label(self, cr, uid, currency_id, payment_rate, payment_rate_currency_id, context=None):
"""
This function builds a string to help the users to understand the behavior of the payment rate fields they can specify on the voucher.
This string is only used to improve the usability in the voucher form view and has no other effect.
:param currency_id: the voucher currency
:type currency_id: integer
:param payment_rate: the value of the payment_rate field of the voucher
:type payment_rate: float
:param payment_rate_currency_id: the value of the payment_rate_currency_id field of the voucher
:type payment_rate_currency_id: integer
:return: translated string giving a tip on what's the effect of the current payment rate specified
:rtype: str
"""
rml_parser = report_sxw.rml_parse(cr, uid, 'currency_help_label', context=context)
currency_pool = self.pool.get('res.currency')
currency_str = payment_rate_str = ''
if currency_id:
currency_str = rml_parser.formatLang(1, currency_obj=currency_pool.browse(cr, uid, currency_id, context=context))
if payment_rate_currency_id:
payment_rate_str = rml_parser.formatLang(payment_rate, currency_obj=currency_pool.browse(cr, uid, payment_rate_currency_id, context=context))
currency_help_label = _('At the operation date, the exchange rate was\n%s = %s') % (currency_str, payment_rate_str)
return currency_help_label
def _fnct_currency_help_label(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = self._get_currency_help_label(cr, uid, voucher.currency_id.id, voucher.payment_rate, voucher.payment_rate_currency_id.id, context=context)
return res
_name = 'account.voucher'
_description = 'Accounting Voucher'
_inherit = ['mail.thread']
_order = "date desc, id desc"
# _rec_name = 'number'
_track = {
'state': {
'account_voucher.mt_voucher_state_change': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'type':fields.selection([
('sale','Sale'),
('purchase','Purchase'),
('payment','Payment'),
('receipt','Receipt'),
],'Default Type', readonly=True, states={'draft':[('readonly',False)]}),
'name':fields.char('Memo', readonly=True, states={'draft':[('readonly',False)]}),
'date':fields.date('Date', readonly=True, select=True, states={'draft':[('readonly',False)]},
help="Effective date for accounting entries", copy=False),
'journal_id':fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'account_id':fields.many2one('account.account', 'Account', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'line_ids':fields.one2many('account.voucher.line', 'voucher_id', 'Voucher Lines',
readonly=True, copy=True,
states={'draft':[('readonly',False)]}),
'line_cr_ids':fields.one2many('account.voucher.line','voucher_id','Credits',
domain=[('type','=','cr')], context={'default_type':'cr'}, readonly=True, states={'draft':[('readonly',False)]}),
'line_dr_ids':fields.one2many('account.voucher.line','voucher_id','Debits',
domain=[('type','=','dr')], context={'default_type':'dr'}, readonly=True, states={'draft':[('readonly',False)]}),
'period_id': fields.many2one('account.period', 'Period', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'narration':fields.text('Notes', readonly=True, states={'draft':[('readonly',False)]}),
'currency_id': fields.function(_get_journal_currency, type='many2one', relation='res.currency', string='Currency', readonly=True, required=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'state':fields.selection(
[('draft','Draft'),
('cancel','Cancelled'),
('proforma','Pro-forma'),
('posted','Posted')
], 'Status', readonly=True, track_visibility='onchange', copy=False,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Voucher. \
\n* The \'Pro-forma\' when voucher is in Pro-forma status,voucher does not have an voucher number. \
\n* The \'Posted\' status is used when user create voucher,a voucher number is generated and voucher entries are created in account \
\n* The \'Cancelled\' status is used when user cancel voucher.'),
'amount': fields.float('Total', digits_compute=dp.get_precision('Account'), required=True, readonly=True, states={'draft':[('readonly',False)]}),
'tax_amount':fields.float('Tax Amount', digits_compute=dp.get_precision('Account'), readonly=True),
'reference': fields.char('Ref #', readonly=True, states={'draft':[('readonly',False)]},
help="Transaction reference number.", copy=False),
'number': fields.char('Number', readonly=True, copy=False),
'move_id':fields.many2one('account.move', 'Account Entry', copy=False),
'move_ids': fields.related('move_id','line_id', type='one2many', relation='account.move.line', string='Journal Items', readonly=True),
'partner_id':fields.many2one('res.partner', 'Partner', change_default=1, readonly=True, states={'draft':[('readonly',False)]}),
'audit': fields.related('move_id','to_check', type='boolean', help='Check this box if you are unsure of that journal entry and if you want to note it as \'to be reviewed\' by an accounting expert.', relation='account.move', string='To Review'),
'paid': fields.function(_check_paid, string='Paid', type='boolean', help="The Voucher has been totally paid."),
'pay_now':fields.selection([
('pay_now','Pay Directly'),
('pay_later','Pay Later or Group Funds'),
],'Payment', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'tax_id': fields.many2one('account.tax', 'Tax', readonly=True, states={'draft':[('readonly',False)]}, domain=[('price_include','=', False)], help="Only for tax excluded from price"),
'pre_line':fields.boolean('Previous Payments ?', required=False),
'date_due': fields.date('Due Date', readonly=True, select=True, states={'draft':[('readonly',False)]}),
'payment_option':fields.selection([
('without_writeoff', 'Keep Open'),
('with_writeoff', 'Reconcile Payment Balance'),
], 'Payment Difference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="This field helps you to choose what you want to do with the eventual difference between the paid amount and the sum of allocated amounts. You can either choose to keep open this difference on the partner's account, or reconcile it with the payment(s)"),
'writeoff_acc_id': fields.many2one('account.account', 'Counterpart Account', readonly=True, states={'draft': [('readonly', False)]}),
'comment': fields.char('Counterpart Comment', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'analytic_id': fields.many2one('account.analytic.account','Write-Off Analytic Account', readonly=True, states={'draft': [('readonly', False)]}),
'writeoff_amount': fields.function(_get_writeoff_amount, string='Difference Amount', type='float', readonly=True, help="Computed as the difference between the amount stated in the voucher and the sum of allocation on the voucher lines."),
'payment_rate_currency_id': fields.many2one('res.currency', 'Payment Rate Currency', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'payment_rate': fields.float('Exchange Rate', digits=(12,6), required=True, readonly=True, states={'draft': [('readonly', False)]},
help='The specific rate that will be used, in this voucher, between the selected currency (in \'Payment Rate Currency\' field) and the voucher currency.'),
'paid_amount_in_company_currency': fields.function(_paid_amount_in_company_currency, string='Paid Amount in Company Currency', type='float', readonly=True),
'is_multi_currency': fields.boolean('Multi Currency Voucher', help='Fields with internal purpose only that depicts if the voucher is a multi currency one or not'),
'currency_help_label': fields.function(_fnct_currency_help_label, type='text', string="Helping Sentence", help="This sentence helps you to know how to specify the payment rate by giving you the direct effect it has"),
}
_defaults = {
'period_id': _get_period,
'partner_id': _get_partner,
'journal_id':_get_journal,
'currency_id': _get_currency,
'reference': _get_reference,
'narration':_get_narration,
'amount': _get_amount,
'type':_get_type,
'state': 'draft',
'pay_now': 'pay_now',
'name': '',
'date': fields.date.context_today,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.voucher',context=c),
'tax_id': _get_tax,
'payment_option': 'without_writeoff',
'comment': _('Write-Off'),
'payment_rate': 1.0,
'payment_rate_currency_id': _get_payment_rate_currency,
}
def compute_tax(self, cr, uid, ids, context=None):
tax_pool = self.pool.get('account.tax')
partner_pool = self.pool.get('res.partner')
position_pool = self.pool.get('account.fiscal.position')
voucher_line_pool = self.pool.get('account.voucher.line')
voucher_pool = self.pool.get('account.voucher')
if context is None: context = {}
for voucher in voucher_pool.browse(cr, uid, ids, context=context):
voucher_amount = 0.0
for line in voucher.line_ids:
voucher_amount += line.untax_amount or line.amount
line.amount = line.untax_amount or line.amount
voucher_line_pool.write(cr, uid, [line.id], {'amount':line.amount, 'untax_amount':line.untax_amount})
if not voucher.tax_id:
self.write(cr, uid, [voucher.id], {'amount':voucher_amount, 'tax_amount':0.0})
continue
tax = [tax_pool.browse(cr, uid, voucher.tax_id.id, context=context)]
partner = partner_pool.browse(cr, uid, voucher.partner_id.id, context=context) or False
taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax)
tax = tax_pool.browse(cr, uid, taxes, context=context)
total = voucher_amount
total_tax = 0.0
if not tax[0].price_include:
for line in voucher.line_ids:
for tax_line in tax_pool.compute_all(cr, uid, tax, line.amount, 1).get('taxes', []):
total_tax += tax_line.get('amount', 0.0)
total += total_tax
else:
for line in voucher.line_ids:
line_total = 0.0
line_tax = 0.0
for tax_line in tax_pool.compute_all(cr, uid, tax, line.untax_amount or line.amount, 1).get('taxes', []):
line_tax += tax_line.get('amount', 0.0)
line_total += tax_line.get('price_unit')
total_tax += line_tax
untax_amount = line.untax_amount or line.amount
voucher_line_pool.write(cr, uid, [line.id], {'amount':line_total, 'untax_amount':untax_amount})
self.write(cr, uid, [voucher.id], {'amount':total, 'tax_amount':total_tax})
return True
def onchange_price(self, cr, uid, ids, line_ids, tax_id, partner_id=False, context=None):
context = context or {}
tax_pool = self.pool.get('account.tax')
partner_pool = self.pool.get('res.partner')
position_pool = self.pool.get('account.fiscal.position')
if not line_ids:
line_ids = []
res = {
'tax_amount': False,
'amount': False,
}
voucher_total = 0.0
# resolve the list of commands into a list of dicts
line_ids = self.resolve_2many_commands(cr, uid, 'line_ids', line_ids, ['amount'], context)
total_tax = 0.0
for line in line_ids:
line_amount = 0.0
line_amount = line.get('amount',0.0)
if tax_id:
tax = [tax_pool.browse(cr, uid, tax_id, context=context)]
if partner_id:
partner = partner_pool.browse(cr, uid, partner_id, context=context) or False
taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax)
tax = tax_pool.browse(cr, uid, taxes, context=context)
if not tax[0].price_include:
for tax_line in tax_pool.compute_all(cr, uid, tax, line_amount, 1).get('taxes', []):
total_tax += tax_line.get('amount')
voucher_total += line_amount
total = voucher_total + total_tax
res.update({
'amount': total or voucher_total,
'tax_amount': total_tax
})
return {
'value': res
}
def onchange_term_id(self, cr, uid, ids, term_id, amount):
term_pool = self.pool.get('account.payment.term')
terms = False
due_date = False
default = {'date_due':False}
if term_id and amount:
terms = term_pool.compute(cr, uid, term_id, amount)
if terms:
due_date = terms[-1][0]
default.update({
'date_due':due_date
})
return {'value':default}
def onchange_journal_voucher(self, cr, uid, ids, line_ids=False, tax_id=False, price=0.0, partner_id=False, journal_id=False, ttype=False, company_id=False, context=None):
"""price
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
default = {
'value':{},
}
if not partner_id or not journal_id:
return default
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
account_id = False
tr_type = False
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
tr_type = 'sale'
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
tr_type = 'purchase'
else:
if not journal.default_credit_account_id or not journal.default_debit_account_id:
raise osv.except_osv(_('Error!'), _('Please define default credit/debit accounts on the journal "%s".') % (journal.name))
if ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id.id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
tr_type = 'receipt'
default['value']['account_id'] = account_id
default['value']['type'] = ttype or tr_type
vals = self.onchange_journal(cr, uid, ids, journal_id, line_ids, tax_id, partner_id, time.strftime('%Y-%m-%d'), price, ttype, company_id, context)
default['value'].update(vals.get('value'))
return default
def onchange_rate(self, cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=None):
res = {'value': {'paid_amount_in_company_currency': amount, 'currency_help_label': self._get_currency_help_label(cr, uid, currency_id, rate, payment_rate_currency_id, context=context)}}
if rate and amount and currency_id:
company_currency = self.pool.get('res.company').browse(cr, uid, company_id, context=context).currency_id
#context should contain the date, the payment currency and the payment rate specified on the voucher
amount_in_company_currency = self.pool.get('res.currency').compute(cr, uid, currency_id, company_currency.id, amount, context=context)
res['value']['paid_amount_in_company_currency'] = amount_in_company_currency
return res
def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency': payment_rate_currency_id,
'voucher_special_currency_rate': rate * voucher_rate})
res = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals = self.onchange_rate(cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in vals.keys():
res[key].update(vals[key])
return res
def recompute_payment_rate(self, cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=None):
if context is None:
context = {}
#on change of the journal, we need to set also the default value for payment_rate and payment_rate_currency_id
currency_obj = self.pool.get('res.currency')
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
company_id = journal.company_id.id
payment_rate = 1.0
currency_id = currency_id or journal.company_id.currency_id.id
payment_rate_currency_id = currency_id
ctx = context.copy()
ctx.update({'date': date})
o2m_to_loop = False
if ttype == 'receipt':
o2m_to_loop = 'line_cr_ids'
elif ttype == 'payment':
o2m_to_loop = 'line_dr_ids'
if o2m_to_loop and 'value' in vals and o2m_to_loop in vals['value']:
for voucher_line in vals['value'][o2m_to_loop]:
if not isinstance(voucher_line, dict):
continue
if voucher_line['currency_id'] != currency_id:
# we take as default value for the payment_rate_currency_id, the currency of the first invoice that
# is not in the voucher currency
payment_rate_currency_id = voucher_line['currency_id']
tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate
payment_rate = tmp / currency_obj.browse(cr, uid, currency_id, context=ctx).rate
break
vals['value'].update({
'payment_rate': payment_rate,
'currency_id': currency_id,
'payment_rate_currency_id': payment_rate_currency_id
})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency_rate': payment_rate * voucher_rate,
'voucher_special_currency': payment_rate_currency_id})
res = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in res.keys():
vals[key].update(res[key])
return vals
def basic_onchange_partner(self, cr, uid, ids, partner_id, journal_id, ttype, context=None):
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
res = {'value': {'account_id': False}}
if not partner_id or not journal_id:
return res
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
account_id = False
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
elif ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id.id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
res['value']['account_id'] = account_id
return res
def onchange_partner_id(self, cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=None):
if not journal_id:
return {}
if context is None:
context = {}
#TODO: comment me and use me directly in the sales/purchases views
res = self.basic_onchange_partner(cr, uid, ids, partner_id, journal_id, ttype, context=context)
if ttype in ['sale', 'purchase']:
return res
ctx = context.copy()
# not passing the payment_rate currency and the payment_rate in the context but it's ok because they are reset in recompute_payment_rate
ctx.update({'date': date})
vals = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals2 = self.recompute_payment_rate(cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=context)
for key in vals.keys():
res[key].update(vals[key])
for key in vals2.keys():
res[key].update(vals2[key])
#TODO: can probably be removed now
#TODO: onchange_partner_id() should not returns [pre_line, line_dr_ids, payment_rate...] for type sale, and not
# [pre_line, line_cr_ids, payment_rate...] for type purchase.
# We should definitively split account.voucher object in two and make distinct on_change functions. In the
# meanwhile, bellow lines must be there because the fields aren't present in the view, what crashes if the
# onchange returns a value for them
if ttype == 'sale':
del(res['value']['line_dr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
elif ttype == 'purchase':
del(res['value']['line_cr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
return res
def recompute_voucher_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, ttype, date, context=None):
"""
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
def _remove_noise_in_o2m():
"""if the line is partially reconciled, then we must pay attention to display it only once and
in the good o2m.
This function returns True if the line is considered as noise and should not be displayed
"""
if line.reconcile_partial_id:
if currency_id == line.currency_id.id:
if line.amount_residual_currency <= 0:
return True
else:
if line.amount_residual <= 0:
return True
return False
if context is None:
context = {}
context_multi_currency = context.copy()
currency_pool = self.pool.get('res.currency')
move_line_pool = self.pool.get('account.move.line')
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
line_pool = self.pool.get('account.voucher.line')
#set default values
default = {
'value': {'line_dr_ids': [], 'line_cr_ids': [], 'pre_line': False},
}
# drop existing lines
line_ids = ids and line_pool.search(cr, uid, [('voucher_id', '=', ids[0])])
for line in line_pool.browse(cr, uid, line_ids, context=context):
if line.type == 'cr':
default['value']['line_cr_ids'].append((2, line.id))
else:
default['value']['line_dr_ids'].append((2, line.id))
if not partner_id or not journal_id:
return default
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
currency_id = currency_id or journal.company_id.currency_id.id
total_credit = 0.0
total_debit = 0.0
account_type = None
if context.get('account_id'):
account_type = self.pool['account.account'].browse(cr, uid, context['account_id'], context=context).type
if ttype == 'payment':
if not account_type:
account_type = 'payable'
total_debit = price or 0.0
else:
total_credit = price or 0.0
if not account_type:
account_type = 'receivable'
if not context.get('move_line_ids', False):
ids = move_line_pool.search(cr, uid, [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner_id)], context=context)
else:
ids = context['move_line_ids']
invoice_id = context.get('invoice_id', False)
company_currency = journal.company_id.currency_id.id
move_lines_found = []
#order the lines by most old first
ids.reverse()
account_move_lines = move_line_pool.browse(cr, uid, ids, context=context)
#compute the total debit/credit and look for a matching open amount or invoice
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if invoice_id:
if line.invoice.id == invoice_id:
#if the invoice linked to the voucher line is equal to the invoice_id in context
#then we assign the amount on that line, whatever the other voucher lines
move_lines_found.append(line.id)
elif currency_id == company_currency:
#otherwise treatments is the same but with other field names
if line.amount_residual == price:
#if the amount residual is equal the amount voucher, we assign it to that voucher
#line, whatever the other voucher lines
move_lines_found.append(line.id)
break
#otherwise we will split the voucher amount on each line (by most old first)
total_credit += line.credit or 0.0
total_debit += line.debit or 0.0
elif currency_id == line.currency_id.id:
if line.amount_residual_currency == price:
move_lines_found.append(line.id)
break
total_credit += line.credit and line.amount_currency or 0.0
total_debit += line.debit and line.amount_currency or 0.0
remaining_amount = price
#voucher line creation
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if line.currency_id and currency_id == line.currency_id.id:
amount_original = abs(line.amount_currency)
amount_unreconciled = abs(line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or line.debit or 0.0, context=context_multi_currency)
amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual), context=context_multi_currency)
line_currency_id = line.currency_id and line.currency_id.id or company_currency
rs = {
'name':line.move_id.name,
'type': line.credit and 'dr' or 'cr',
'move_line_id':line.id,
'account_id':line.account_id.id,
'amount_original': amount_original,
'amount': (line.id in move_lines_found) and min(abs(remaining_amount), amount_unreconciled) or 0.0,
'date_original':line.date,
'date_due':line.date_maturity,
'amount_unreconciled': amount_unreconciled,
'currency_id': line_currency_id,
}
remaining_amount -= rs['amount']
#in case a corresponding move_line hasn't been found, we now try to assign the voucher amount
#on existing invoices: we split voucher amount by most old first, but only for lines in the same currency
if not move_lines_found:
if currency_id == line_currency_id:
if line.credit:
amount = min(amount_unreconciled, abs(total_debit))
rs['amount'] = amount
total_debit -= amount
else:
amount = min(amount_unreconciled, abs(total_credit))
rs['amount'] = amount
total_credit -= amount
if rs['amount_unreconciled'] == rs['amount']:
rs['reconcile'] = True
if rs['type'] == 'cr':
default['value']['line_cr_ids'].append(rs)
else:
default['value']['line_dr_ids'].append(rs)
if len(default['value']['line_cr_ids']) > 0:
default['value']['pre_line'] = 1
elif len(default['value']['line_dr_ids']) > 0:
default['value']['pre_line'] = 1
default['value']['writeoff_amount'] = self._compute_writeoff_amount(cr, uid, default['value']['line_dr_ids'], default['value']['line_cr_ids'], price, ttype)
return default
def onchange_payment_rate_currency(self, cr, uid, ids, currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=None):
if context is None:
context = {}
res = {'value': {}}
if currency_id:
#set the default payment rate of the voucher and compute the paid amount in company currency
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency_rate': payment_rate * voucher_rate,
'voucher_special_currency': payment_rate_currency_id})
vals = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in vals.keys():
res[key].update(vals[key])
return res
def onchange_date(self, cr, uid, ids, date, currency_id, payment_rate_currency_id, amount, company_id, context=None):
"""
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
if context is None:
context ={}
res = {'value': {}}
#set the period of the voucher
period_pool = self.pool.get('account.period')
currency_obj = self.pool.get('res.currency')
ctx = context.copy()
ctx.update({'company_id': company_id, 'account_period_prefer_normal': True})
voucher_currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id
pids = period_pool.find(cr, uid, date, context=ctx)
if pids:
res['value'].update({'period_id':pids[0]})
if payment_rate_currency_id:
ctx.update({'date': date})
payment_rate = 1.0
if payment_rate_currency_id != currency_id:
tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate
payment_rate = tmp / currency_obj.browse(cr, uid, voucher_currency_id, context=ctx).rate
vals = self.onchange_payment_rate_currency(cr, uid, ids, voucher_currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=context)
vals['value'].update({'payment_rate': payment_rate})
for key in vals.keys():
res[key].update(vals[key])
return res
def onchange_journal(self, cr, uid, ids, journal_id, line_ids, tax_id, partner_id, date, amount, ttype, company_id, context=None):
if context is None:
context = {}
if not journal_id:
return False
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id
else:
account_id = journal.default_credit_account_id or journal.default_debit_account_id
tax_id = False
if account_id and account_id.tax_ids:
tax_id = account_id.tax_ids[0].id
vals = {'value':{} }
if ttype in ('sale', 'purchase'):
vals = self.onchange_price(cr, uid, ids, line_ids, tax_id, partner_id, context)
vals['value'].update({'tax_id':tax_id,'amount': amount})
currency_id = False
if journal.currency:
currency_id = journal.currency.id
else:
currency_id = journal.company_id.currency_id.id
period_ids = self.pool['account.period'].find(cr, uid, dt=date, context=dict(context, company_id=company_id))
vals['value'].update({
'currency_id': currency_id,
'payment_rate_currency_id': currency_id,
'period_id': period_ids and period_ids[0] or False
})
#in case we want to register the payment directly from an invoice, it's confusing to allow to switch the journal
#without seeing that the amount is expressed in the journal currency, and not in the invoice currency. So to avoid
#this common mistake, we simply reset the amount to 0 if the currency is not the invoice currency.
if context.get('payment_expected_currency') and currency_id != context.get('payment_expected_currency'):
vals['value']['amount'] = 0
amount = 0
if partner_id:
res = self.onchange_partner_id(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context)
for key in res.keys():
vals[key].update(res[key])
return vals
def onchange_company(self, cr, uid, ids, partner_id, journal_id, currency_id, company_id, context=None):
"""
If the company changes, check that the journal is in the right company.
If not, fetch a new journal.
"""
journal_pool = self.pool['account.journal']
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.company_id.id != company_id:
# can not guess type of journal, better remove it
return {'value': {'journal_id': False}}
return {}
def button_proforma_voucher(self, cr, uid, ids, context=None):
self.signal_workflow(cr, uid, ids, 'proforma_voucher')
return {'type': 'ir.actions.act_window_close'}
def proforma_voucher(self, cr, uid, ids, context=None):
self.action_move_line_create(cr, uid, ids, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
self.create_workflow(cr, uid, ids)
self.write(cr, uid, ids, {'state':'draft'})
return True
def cancel_voucher(self, cr, uid, ids, context=None):
reconcile_pool = self.pool.get('account.move.reconcile')
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
# refresh to make sure you don't unlink an already removed move
voucher.refresh()
for line in voucher.move_ids:
# refresh to make sure you don't unreconcile an already unreconciled entry
line.refresh()
if line.reconcile_id:
move_lines = [move_line.id for move_line in line.reconcile_id.line_id]
move_lines.remove(line.id)
reconcile_pool.unlink(cr, uid, [line.reconcile_id.id])
if len(move_lines) >= 2:
move_line_pool.reconcile_partial(cr, uid, move_lines, 'auto',context=context)
if voucher.move_id:
move_pool.button_cancel(cr, uid, [voucher.move_id.id])
move_pool.unlink(cr, uid, [voucher.move_id.id])
res = {
'state':'cancel',
'move_id':False,
}
self.write(cr, uid, ids, res)
return True
def unlink(self, cr, uid, ids, context=None):
for t in self.read(cr, uid, ids, ['state'], context=context):
if t['state'] not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete voucher(s) which are already opened or paid.'))
return super(account_voucher, self).unlink(cr, uid, ids, context=context)
def onchange_payment(self, cr, uid, ids, pay_now, journal_id, partner_id, ttype='sale'):
res = {}
if not partner_id:
return res
res = {}
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
if pay_now == 'pay_later':
partner = partner_pool.browse(cr, uid, partner_id)
journal = journal_pool.browse(cr, uid, journal_id)
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
elif ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id.id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
if account_id:
res['account_id'] = account_id
return {'value':res}
def _sel_context(self, cr, uid, voucher_id, context=None):
"""
Select the context to use accordingly if it needs to be multicurrency or not.
:param voucher_id: Id of the actual voucher
:return: The returned context will be the same as given in parameter if the voucher currency is the same
than the company currency, otherwise it's a copy of the parameter with an extra key 'date' containing
the date of the voucher.
:rtype: dict
"""
company_currency = self._get_company_currency(cr, uid, voucher_id, context)
current_currency = self._get_current_currency(cr, uid, voucher_id, context)
if current_currency <> company_currency:
context_multi_currency = context.copy()
voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context)
context_multi_currency.update({'date': voucher.date})
return context_multi_currency
return context
def first_move_line_get(self, cr, uid, voucher_id, move_id, company_currency, current_currency, context=None):
'''
Return a dict to be use to create the first account move line of given voucher.
:param voucher_id: Id of voucher what we are creating account_move.
:param move_id: Id of account move where this line will be added.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: mapping between fieldname and value of account move line to create
:rtype: dict
'''
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
debit = credit = 0.0
# TODO: is there any other alternative then the voucher type ??
# ANSWER: We can have payment and receipt "In Advance".
# TODO: Make this logic available.
# -for sale, purchase we have but for the payment and receipt we do not have as based on the bank/cash journal we can not know its payment or receipt
if voucher.type in ('purchase', 'payment'):
credit = voucher.paid_amount_in_company_currency
elif voucher.type in ('sale', 'receipt'):
debit = voucher.paid_amount_in_company_currency
if debit < 0: credit = -debit; debit = 0.0
if credit < 0: debit = -credit; credit = 0.0
sign = debit - credit < 0 and -1 or 1
#set the first line of the voucher
move_line = {
'name': voucher.name or '/',
'debit': debit,
'credit': credit,
'account_id': voucher.account_id.id,
'move_id': move_id,
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'partner_id': voucher.partner_id.id,
'currency_id': company_currency <> current_currency and current_currency or False,
'amount_currency': (sign * abs(voucher.amount) # amount < 0 for refunds
if company_currency != current_currency else 0.0),
'date': voucher.date,
'date_maturity': voucher.date_due
}
return move_line
def account_move_get(self, cr, uid, voucher_id, context=None):
'''
This method prepare the creation of the account move related to the given voucher.
:param voucher_id: Id of voucher for which we are creating account_move.
:return: mapping between fieldname and value of account move to create
:rtype: dict
'''
seq_obj = self.pool.get('ir.sequence')
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
if voucher.number:
name = voucher.number
elif voucher.journal_id.sequence_id:
if not voucher.journal_id.sequence_id.active:
raise osv.except_osv(_('Configuration Error !'),
_('Please activate the sequence of selected journal !'))
c = dict(context)
c.update({'fiscalyear_id': voucher.period_id.fiscalyear_id.id})
name = seq_obj.next_by_id(cr, uid, voucher.journal_id.sequence_id.id, context=c)
else:
raise osv.except_osv(_('Error!'),
_('Please define a sequence on the journal.'))
if not voucher.reference:
ref = name.replace('/','')
else:
ref = voucher.reference
move = {
'name': name,
'journal_id': voucher.journal_id.id,
'narration': voucher.narration,
'date': voucher.date,
'ref': ref,
'period_id': voucher.period_id.id,
}
return move
def _get_exchange_lines(self, cr, uid, line, move_id, amount_residual, company_currency, current_currency, context=None):
'''
Prepare the two lines in company currency due to currency rate difference.
:param line: browse record of the voucher.line for which we want to create currency rate difference accounting
entries
:param move_id: Account move wher the move lines will be.
:param amount_residual: Amount to be posted.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: the account move line and its counterpart to create, depicted as mapping between fieldname and value
:rtype: tuple of dict
'''
if amount_residual > 0:
account_id = line.voucher_id.company_id.expense_currency_exchange_account_id
if not account_id:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form')
msg = _("You should configure the 'Loss Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
account_id = line.voucher_id.company_id.income_currency_exchange_account_id
if not account_id:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form')
msg = _("You should configure the 'Gain Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
# Even if the amount_currency is never filled, we need to pass the foreign currency because otherwise
# the receivable/payable account may have a secondary currency, which render this field mandatory
if line.account_id.currency_id:
account_currency_id = line.account_id.currency_id.id
else:
account_currency_id = company_currency <> current_currency and current_currency or False
move_line = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': account_currency_id,
'amount_currency': 0.0,
'quantity': 1,
'credit': amount_residual > 0 and amount_residual or 0.0,
'debit': amount_residual < 0 and -amount_residual or 0.0,
'date': line.voucher_id.date,
}
move_line_counterpart = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': account_id.id,
'move_id': move_id,
'amount_currency': 0.0,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': account_currency_id,
'quantity': 1,
'debit': amount_residual > 0 and amount_residual or 0.0,
'credit': amount_residual < 0 and -amount_residual or 0.0,
'date': line.voucher_id.date,
}
return (move_line, move_line_counterpart)
def _convert_amount(self, cr, uid, amount, voucher_id, context=None):
'''
This function convert the amount given in company currency. It takes either the rate in the voucher (if the
payment_rate_currency_id is relevant) either the rate encoded in the system.
:param amount: float. The amount to convert
:param voucher: id of the voucher on which we want the conversion
:param context: to context to use for the conversion. It may contain the key 'date' set to the voucher date
field in order to select the good rate to use.
:return: the amount in the currency of the voucher's company
:rtype: float
'''
if context is None:
context = {}
currency_obj = self.pool.get('res.currency')
voucher = self.browse(cr, uid, voucher_id, context=context)
return currency_obj.compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, amount, context=context)
def voucher_move_line_create(self, cr, uid, voucher_id, line_total, move_id, company_currency, current_currency, context=None):
'''
Create one account move line, on the given account move, per voucher line where amount is not 0.0.
It returns Tuple with tot_line what is total of difference between debit and credit and
a list of lists with ids to be reconciled with this format (total_deb_cred,list_of_lists).
:param voucher_id: Voucher id what we are working with
:param line_total: Amount of the first line, which correspond to the amount we should totally split among all voucher lines.
:param move_id: Account move wher those lines will be joined.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: Tuple build as (remaining amount not allocated on voucher lines, list of account_move_line created in this method)
:rtype: tuple(float, list of int)
'''
if context is None:
context = {}
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
tot_line = line_total
rec_lst_ids = []
date = self.read(cr, uid, [voucher_id], ['date'], context=context)[0]['date']
ctx = context.copy()
ctx.update({'date': date})
voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context=ctx)
voucher_currency = voucher.journal_id.currency or voucher.company_id.currency_id
ctx.update({
'voucher_special_currency_rate': voucher_currency.rate * voucher.payment_rate ,
'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False,})
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
for line in voucher.line_ids:
#create one move line per voucher line where amount is not 0.0
# AND (second part of the clause) only if the original move line was not having debit = credit = 0 (which is a legal value)
if not line.amount and not (line.move_line_id and not float_compare(line.move_line_id.debit, line.move_line_id.credit, precision_digits=prec) and not float_compare(line.move_line_id.debit, 0.0, precision_digits=prec)):
continue
# convert the amount set on the voucher line into the currency of the voucher's company
# this calls res_curreny.compute() with the right context, so that it will take either the rate on the voucher if it is relevant or will use the default behaviour
amount = self._convert_amount(cr, uid, line.untax_amount or line.amount, voucher.id, context=ctx)
# if the amount encoded in voucher is equal to the amount unreconciled, we need to compute the
# currency rate difference
if line.amount == line.amount_unreconciled:
if not line.move_line_id:
raise osv.except_osv(_('Wrong voucher line'),_("The invoice you are willing to pay is not valid anymore."))
sign = line.type =='dr' and -1 or 1
currency_rate_difference = sign * (line.move_line_id.amount_residual - amount)
else:
currency_rate_difference = 0.0
move_line = {
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'name': line.name or '/',
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': voucher.partner_id.id,
'currency_id': line.move_line_id and (company_currency <> line.move_line_id.currency_id.id and line.move_line_id.currency_id.id) or False,
'analytic_account_id': line.account_analytic_id and line.account_analytic_id.id or False,
'quantity': 1,
'credit': 0.0,
'debit': 0.0,
'date': voucher.date
}
if amount < 0:
amount = -amount
if line.type == 'dr':
line.type = 'cr'
else:
line.type = 'dr'
if (line.type=='dr'):
tot_line += amount
move_line['debit'] = amount
else:
tot_line -= amount
move_line['credit'] = amount
if voucher.tax_id and voucher.type in ('sale', 'purchase'):
move_line.update({
'account_tax_id': voucher.tax_id.id,
})
# compute the amount in foreign currency
foreign_currency_diff = 0.0
amount_currency = False
if line.move_line_id:
# We want to set it on the account move line as soon as the original line had a foreign currency
if line.move_line_id.currency_id and line.move_line_id.currency_id.id != company_currency:
# we compute the amount in that foreign currency.
if line.move_line_id.currency_id.id == current_currency:
# if the voucher and the voucher line share the same currency, there is no computation to do
sign = (move_line['debit'] - move_line['credit']) < 0 and -1 or 1
amount_currency = sign * (line.amount)
else:
# if the rate is specified on the voucher, it will be used thanks to the special keys in the context
# otherwise we use the rates of the system
amount_currency = currency_obj.compute(cr, uid, company_currency, line.move_line_id.currency_id.id, move_line['debit']-move_line['credit'], context=ctx)
if line.amount == line.amount_unreconciled:
foreign_currency_diff = line.move_line_id.amount_residual_currency - abs(amount_currency)
move_line['amount_currency'] = amount_currency
voucher_line = move_line_obj.create(cr, uid, move_line)
rec_ids = [voucher_line, line.move_line_id.id]
if not currency_obj.is_zero(cr, uid, voucher.company_id.currency_id, currency_rate_difference):
# Change difference entry in company currency
exch_lines = self._get_exchange_lines(cr, uid, line, move_id, currency_rate_difference, company_currency, current_currency, context=context)
new_id = move_line_obj.create(cr, uid, exch_lines[0],context)
move_line_obj.create(cr, uid, exch_lines[1], context)
rec_ids.append(new_id)
if line.move_line_id and line.move_line_id.currency_id and not currency_obj.is_zero(cr, uid, line.move_line_id.currency_id, foreign_currency_diff):
# Change difference entry in voucher currency
move_line_foreign_currency = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': line.move_line_id.currency_id.id,
'amount_currency': -1 * foreign_currency_diff,
'quantity': 1,
'credit': 0.0,
'debit': 0.0,
'date': line.voucher_id.date,
}
new_id = move_line_obj.create(cr, uid, move_line_foreign_currency, context=context)
rec_ids.append(new_id)
if line.move_line_id.id:
rec_lst_ids.append(rec_ids)
return (tot_line, rec_lst_ids)
def writeoff_move_line_get(self, cr, uid, voucher_id, line_total, move_id, name, company_currency, current_currency, context=None):
'''
Set a dict to be use to create the writeoff move line.
:param voucher_id: Id of voucher what we are creating account_move.
:param line_total: Amount remaining to be allocated on lines.
:param move_id: Id of account move where this line will be added.
:param name: Description of account move line.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: mapping between fieldname and value of account move line to create
:rtype: dict
'''
currency_obj = self.pool.get('res.currency')
move_line = {}
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
current_currency_obj = voucher.currency_id or voucher.journal_id.company_id.currency_id
if not currency_obj.is_zero(cr, uid, current_currency_obj, line_total):
diff = line_total
account_id = False
write_off_name = ''
if voucher.payment_option == 'with_writeoff':
account_id = voucher.writeoff_acc_id.id
write_off_name = voucher.comment
elif voucher.partner_id:
if voucher.type in ('sale', 'receipt'):
account_id = voucher.partner_id.property_account_receivable.id
else:
account_id = voucher.partner_id.property_account_payable.id
else:
# fallback on account of voucher
account_id = voucher.account_id.id
sign = voucher.type == 'payment' and -1 or 1
move_line = {
'name': write_off_name or name,
'account_id': account_id,
'move_id': move_id,
'partner_id': voucher.partner_id.id,
'date': voucher.date,
'credit': diff > 0 and diff or 0.0,
'debit': diff < 0 and -diff or 0.0,
'amount_currency': company_currency <> current_currency and (sign * -1 * voucher.writeoff_amount) or 0.0,
'currency_id': company_currency <> current_currency and current_currency or False,
'analytic_account_id': voucher.analytic_id and voucher.analytic_id.id or False,
}
return move_line
def _get_company_currency(self, cr, uid, voucher_id, context=None):
'''
Get the currency of the actual company.
:param voucher_id: Id of the voucher what i want to obtain company currency.
:return: currency id of the company of the voucher
:rtype: int
'''
return self.pool.get('account.voucher').browse(cr,uid,voucher_id,context).journal_id.company_id.currency_id.id
def _get_current_currency(self, cr, uid, voucher_id, context=None):
'''
Get the currency of the voucher.
:param voucher_id: Id of the voucher what i want to obtain current currency.
:return: currency id of the voucher
:rtype: int
'''
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
return voucher.currency_id.id or self._get_company_currency(cr,uid,voucher.id,context)
def action_move_line_create(self, cr, uid, ids, context=None):
'''
Confirm the vouchers given in ids and create the journal entries for each of them
'''
if context is None:
context = {}
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
local_context = dict(context, force_company=voucher.journal_id.company_id.id)
if voucher.move_id:
continue
company_currency = self._get_company_currency(cr, uid, voucher.id, context)
current_currency = self._get_current_currency(cr, uid, voucher.id, context)
# we select the context to use accordingly if it's a multicurrency case or not
context = self._sel_context(cr, uid, voucher.id, context)
# But for the operations made by _convert_amount, we always need to give the date in the context
ctx = context.copy()
ctx.update({'date': voucher.date})
# Create the account move record.
move_id = move_pool.create(cr, uid, self.account_move_get(cr, uid, voucher.id, context=context), context=context)
# Get the name of the account_move just created
name = move_pool.browse(cr, uid, move_id, context=context).name
# Create the first line of the voucher
move_line_id = move_line_pool.create(cr, uid, self.first_move_line_get(cr,uid,voucher.id, move_id, company_currency, current_currency, local_context), local_context)
move_line_brw = move_line_pool.browse(cr, uid, move_line_id, context=context)
line_total = move_line_brw.debit - move_line_brw.credit
rec_list_ids = []
if voucher.type == 'sale':
line_total = line_total - self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
elif voucher.type == 'purchase':
line_total = line_total + self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
# Create one move line per voucher line where amount is not 0.0
line_total, rec_list_ids = self.voucher_move_line_create(cr, uid, voucher.id, line_total, move_id, company_currency, current_currency, context)
# Create the writeoff line if needed
ml_writeoff = self.writeoff_move_line_get(cr, uid, voucher.id, line_total, move_id, name, company_currency, current_currency, local_context)
if ml_writeoff:
move_line_pool.create(cr, uid, ml_writeoff, local_context)
# We post the voucher.
self.write(cr, uid, [voucher.id], {
'move_id': move_id,
'state': 'posted',
'number': name,
})
if voucher.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context={})
# We automatically reconcile the account move lines.
reconcile = False
for rec_ids in rec_list_ids:
if len(rec_ids) >= 2:
reconcile = move_line_pool.reconcile_partial(cr, uid, rec_ids, writeoff_acc_id=voucher.writeoff_acc_id.id, writeoff_period_id=voucher.period_id.id, writeoff_journal_id=voucher.journal_id.id)
return True
class account_voucher_line(osv.osv):
_name = 'account.voucher.line'
_description = 'Voucher Lines'
_order = "move_line_id"
# If the payment is in the same currency than the invoice, we keep the same amount
# Otherwise, we compute from invoice currency to payment currency
def _compute_balance(self, cr, uid, ids, name, args, context=None):
currency_pool = self.pool.get('res.currency')
rs_data = {}
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
ctx.update({'date': line.voucher_id.date})
voucher_rate = self.pool.get('res.currency').read(cr, uid, line.voucher_id.currency_id.id, ['rate'], context=ctx)['rate']
ctx.update({
'voucher_special_currency': line.voucher_id.payment_rate_currency_id and line.voucher_id.payment_rate_currency_id.id or False,
'voucher_special_currency_rate': line.voucher_id.payment_rate * voucher_rate})
res = {}
company_currency = line.voucher_id.journal_id.company_id.currency_id.id
voucher_currency = line.voucher_id.currency_id and line.voucher_id.currency_id.id or company_currency
move_line = line.move_line_id or False
if not move_line:
res['amount_original'] = 0.0
res['amount_unreconciled'] = 0.0
elif move_line.currency_id and voucher_currency==move_line.currency_id.id:
res['amount_original'] = abs(move_line.amount_currency)
res['amount_unreconciled'] = abs(move_line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
res['amount_original'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, move_line.credit or move_line.debit or 0.0, context=ctx)
res['amount_unreconciled'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, abs(move_line.amount_residual), context=ctx)
rs_data[line.id] = res
return rs_data
def _currency_id(self, cr, uid, ids, name, args, context=None):
'''
This function returns the currency id of a voucher line. It's either the currency of the
associated move line (if any) or the currency of the voucher or the company currency.
'''
res = {}
for line in self.browse(cr, uid, ids, context=context):
move_line = line.move_line_id
if move_line:
res[line.id] = move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id
else:
res[line.id] = line.voucher_id.currency_id and line.voucher_id.currency_id.id or line.voucher_id.company_id.currency_id.id
return res
_columns = {
'voucher_id':fields.many2one('account.voucher', 'Voucher', required=1, ondelete='cascade'),
'name':fields.char('Description',),
'account_id':fields.many2one('account.account','Account', required=True),
'partner_id':fields.related('voucher_id', 'partner_id', type='many2one', relation='res.partner', string='Partner'),
'untax_amount':fields.float('Untax Amount'),
'amount':fields.float('Amount', digits_compute=dp.get_precision('Account')),
'reconcile': fields.boolean('Full Reconcile'),
'type':fields.selection([('dr','Debit'),('cr','Credit')], 'Dr/Cr'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'move_line_id': fields.many2one('account.move.line', 'Journal Item', copy=False),
'date_original': fields.related('move_line_id','date', type='date', relation='account.move.line', string='Date', readonly=1),
'date_due': fields.related('move_line_id','date_maturity', type='date', relation='account.move.line', string='Due Date', readonly=1),
'amount_original': fields.function(_compute_balance, multi='dc', type='float', string='Original Amount', store=True, digits_compute=dp.get_precision('Account')),
'amount_unreconciled': fields.function(_compute_balance, multi='dc', type='float', string='Open Balance', store=True, digits_compute=dp.get_precision('Account')),
'company_id': fields.related('voucher_id','company_id', relation='res.company', type='many2one', string='Company', store=True, readonly=True),
'currency_id': fields.function(_currency_id, string='Currency', type='many2one', relation='res.currency', readonly=True),
}
_defaults = {
'name': '',
}
def onchange_reconcile(self, cr, uid, ids, reconcile, amount, amount_unreconciled, context=None):
vals = {'amount': 0.0}
if reconcile:
vals = { 'amount': amount_unreconciled}
return {'value': vals}
def onchange_amount(self, cr, uid, ids, amount, amount_unreconciled, context=None):
vals = {}
if amount:
vals['reconcile'] = (amount == amount_unreconciled)
return {'value': vals}
def onchange_move_line_id(self, cr, user, ids, move_line_id, context=None):
"""
Returns a dict that contains new values and context
@param move_line_id: latest value from user input for field move_line_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
move_line_pool = self.pool.get('account.move.line')
if move_line_id:
move_line = move_line_pool.browse(cr, user, move_line_id, context=context)
if move_line.credit:
ttype = 'dr'
else:
ttype = 'cr'
res.update({
'account_id': move_line.account_id.id,
'type': ttype,
'currency_id': move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id,
})
return {
'value':res,
}
def default_get(self, cr, user, fields_list, context=None):
"""
Returns default values for fields
@param fields_list: list of fields, for which default values are required to be read
@param context: context arguments, like lang, time zone
@return: Returns a dict that contains default values for fields
"""
if context is None:
context = {}
journal_id = context.get('journal_id', False)
partner_id = context.get('partner_id', False)
journal_pool = self.pool.get('account.journal')
partner_pool = self.pool.get('res.partner')
values = super(account_voucher_line, self).default_get(cr, user, fields_list, context=context)
if (not journal_id) or ('account_id' not in fields_list):
return values
journal = journal_pool.browse(cr, user, journal_id, context=context)
account_id = False
ttype = 'cr'
if journal.type in ('sale', 'sale_refund'):
account_id = journal.default_credit_account_id and journal.default_credit_account_id.id or False
ttype = 'cr'
elif journal.type in ('purchase', 'expense', 'purchase_refund'):
account_id = journal.default_debit_account_id and journal.default_debit_account_id.id or False
ttype = 'dr'
elif partner_id:
partner = partner_pool.browse(cr, user, partner_id, context=context)
if context.get('type') == 'payment':
ttype = 'dr'
account_id = partner.property_account_payable.id
elif context.get('type') == 'receipt':
account_id = partner.property_account_receivable.id
values.update({
'account_id':account_id,
'type':ttype
})
return values
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
brightchen/h2o-3 | h2o-py/tests/testdir_misc/pyunit_hit_ratio.py | 5 | 1279 | import sys
sys.path.insert(1, "../../")
import h2o, tests
def hit_ratio_test():
air_train = h2o.import_file(path=h2o.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
air_valid = h2o.import_file(path=h2o.locate("smalldata/airlines/AirlinesTest.csv.zip"))
air_test = h2o.import_file(path=h2o.locate("smalldata/airlines/AirlinesTest.csv.zip"))
gbm_mult = h2o.gbm(x=air_train[["Origin", "Dest", "Distance", "UniqueCarrier", "IsDepDelayed", "fDayofMonth","fMonth"]],
y=air_train["fDayOfWeek"].asfactor(),
validation_x=air_valid[["Origin", "Dest", "Distance", "UniqueCarrier", "IsDepDelayed", "fDayofMonth",
"fMonth"]],
validation_y=air_valid["fDayOfWeek"].asfactor(),
distribution="multinomial")
training_hit_ratio_table = gbm_mult.hit_ratio_table(train=True)
training_hit_ratio_table.show()
validation_hit_ratio_table = gbm_mult.hit_ratio_table(valid=True)
validation_hit_ratio_table.show()
perf = gbm_mult.model_performance(air_test)
test_hit_ratio_table = perf.hit_ratio_table()
test_hit_ratio_table.show()
if __name__ == "__main__":
tests.run_test(sys.argv, hit_ratio_test)
| apache-2.0 |
apple/swift-lldb | examples/python/gdb_disassemble.py | 13 | 1047 | import lldb
def disassemble(debugger, command, result, dict):
if lldb.frame.function:
instructions = lldb.frame.function.instructions
start_addr = lldb.frame.function.addr.load_addr
name = lldb.frame.function.name
elif lldb.frame.symbol:
instructions = lldb.frame.symbol.instructions
start_addr = lldb.frame.symbol.addr.load_addr
name = lldb.frame.symbol.name
for inst in instructions:
inst_addr = inst.addr.load_addr
inst_offset = inst_addr - start_addr
comment = inst.comment
if comment:
print("<%s + %-4u> 0x%x %8s %s ; %s" % (name, inst_offset, inst_addr, inst.mnemonic, inst.operands, comment))
else:
print("<%s + %-4u> 0x%x %8s %s" % (name, inst_offset, inst_addr, inst.mnemonic, inst.operands))
# Install the command when the module gets imported
lldb.debugger.HandleCommand(
'command script add -f gdb_disassemble.disassemble gdb-disassemble')
print('Installed "gdb-disassemble" command for disassembly')
| apache-2.0 |
EmreAtes/spack | var/spack/repos/builtin/packages/r-proxy/package.py | 5 | 1714 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RProxy(RPackage):
"""Provides an extensible framework for the efficient calculation of
auto- and cross-proximities, along with implementations of the most
popular ones."""
homepage = "https://cran.r-project.org/package=proxy"
url = "https://cran.rstudio.com/src/contrib/proxy_0.4-19.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/proxy"
version('0.4-19', '279a01a1cc12ed50208c98196d78a5d7')
| lgpl-2.1 |
sobodash/libpirate | lib/pirromvboy.py | 1 | 9822 | '''
Virtual Boy ROM Object
A part of the libPirate (Python) library
Version: 0.2
Author: Derrick Sobodash <derrick@sobodash.com>
Copyright: (c) 2008, 2012 Derrick Sobodash
Web site: https://github.com/sobodash/libpirate/
License: BSD License <http://opensource.org/licenses/bsd-license.php>
'''
try:
import re
import pirstring
from struct import *
from time import mktime
import sys
except ImportError, err:
print "Could not load %s module." % (err)
raise SystemExit
'''
object RomVboy(data)
An object representation of a Virtual Boy ROM image.
The object contains a Virtual Boy ROM image with member variables and methods
to store and edit the game's internal header.
Inherits the libPirate File Stream functions.
'''
class RomVboy(object):
## Contains a tuple of all regions
_region = (
"Japan", "North America",
"Europe", "Scandinavia",
"France", "Holland",
"Spain", "Germany",
"Italy", "China",
"Korea", "Common",
"Canada", "Brazil",
"Australia", "Other",
"Other", "Other"
)
## Contains a tuple of all known Nintendo licensees.
_maker = ("Invalid[00]",
'Nintendo', 'Ajinomoto', 'Imagineer-Zoom',
'Electronic Arts', 'Zamuse', 'Falcom',
'Infogrames', 'Capcom', 'HOT B Co. Ltd',
'Jaleco', 'Coconuts', 'Coconuts Japan',
'Micronet', 'Technos', 'Mebio Software',
'Shouei' , 'Starfish', 'Gremlin Graphics',
'Electronic Arts', 'NCS', 'Cobra Team',
'Human/Field', 'KOEI', 'Hudson Soft',
'Sheisha/Kousaka Koji', 'Yanoman/Max Entertainment','Unknown[27]',
'Tecmo', 'Nippon Clary Business', 'Open System',
'Virgin Games', 'KSS/Rumic Soft', 'Sunsoft',
'POW', 'Micro World', 'Bullet-Proof Software',
'San-X', 'Enix', 'Loriciel/Electro Brain',
'Kemco/Kotobuki System', 'Seta', 'Culture Brain',
'Irem', 'Sotsu Agency', 'Visit',
'INTEC', 'System Sacom', 'Viacom International',
'Carrozzeria', 'Dynamic', 'Unknown[51]',
'Magifact', 'Hect', 'Unknown[54]',
'Unknown[55]', 'Capcom/Laguna Games', 'Nintendo',
'Unknown[58]', 'Arcade Zone', 'Empire Software',
'Loriciel', 'Gremlin Graphics', 'Unknown[63]',
'Seika', 'UBI Soft', 'Unknown[66]',
'Unknown[67]', 'Life Fitness', 'Gremlin',
'System 3', 'Spectrum Holobyte', 'Unknown[72]',
'Irem', 'Unknown[74]', 'Raya Systems',
'Renovation Products', 'Malibu Games', 'Unknown[78]',
'U.S. Gold', 'Absolute Entertainment', 'Acclaim',
'Activision', 'American Sammy', 'GameTek',
'Hi-Tech Expressions', 'LJN Toys', 'Matchbox International',
'Mattel', 'Unknown[89]', 'Mindscape',
'Romstar', 'Imagineer', 'Tradewest/Williams Entertainment',
'Unknown[94]', 'American Softworks Corp.', 'Titus',
'Virgin Games', 'Maxis', 'Origin',
'Unknown[100]', 'Playmates Entertainment', 'Unknown[102]',
'Ocean', 'Unknown[104]', 'Electronic Arts',
'Unknown[106]', 'Laser Beam', 'Unknown[108]',
'Unknown[109]', 'Elite', 'Electro Brain',
'Infogrames', 'Interplay', 'JVC/LucasArts',
'Parker Brothers.', 'Konami', 'Storm/The Sales Curve',
'Unknown[118]', 'Unknown[119]', 'T*HQ Software',
'Accolade', 'Triffix Entertainment', 'Unknown[123]',
'Microprose', 'Unknown[125]', 'Unknown[126]',
'Kemco', 'Misawa Entertainment', 'Teichiku Co. Ltd.',
'Namco', 'Lozc', 'KOEI',
'Unknown[133]', 'Tokuma Shoten Intermedia', 'Tsukuda Original',
'DATAM-Polystar', 'Unknown[137]', 'Unknown[138]',
'Bullet-Proof Software', 'Vic Tokai', 'Unknown[141]',
'Character Soft', "I'Max", 'Takara',
'Chun Soft', 'Video System', 'BEC',
'Unknown[148]', 'Varie', "Yonezawa/S'Pal",
'Kaneco', 'Unknown[152]', 'Pack-In-Video',
'Nichibutsu', 'Tecmo', 'Imagineer',
'Unknown[157]', 'Unknown[158]', 'Nova Co.,Ltd',
'Telenet', 'Hori Electric', 'Unknown[162]',
'TGL', 'Konami', 'K.Amusement Leasing Co.',
'Kawada', 'Takara', 'Unknown[168]',
'Technos Japan', 'JVC/LucasArts', 'Unknown[171]',
'TOEI Animation', 'Toho', 'Unknown[174]',
'Namco', 'Probe Entertainment', 'ASCII Co.',
'Bandai', 'Pioneer LDC', 'Enix',
'Unknown[181]', 'Halken', 'SNK',
'Unknown[184]', 'Pony Canyon', 'Culture Brain',
'Sunsoft', 'Toshiba EMI', 'Sony Imagesoft',
'Unknown[190]', 'Sammy', 'Taito',
'Unknown[193]', 'Kemco', 'SquareSoft',
'Tokuma Soft', 'Data East', 'Tonkin House',
'Unknown[199]', 'KOEI', 'UPL Co.,Ltd.',
'Konami', 'NTVIC/VAP', 'USE Co.,Ltd.',
'Meldac', 'Pony Canyon', 'Sotsu Agency/Sunrise',
'Disco/Taito', 'Sofel', 'Quest',
'Sigma', 'ASK Kodansha', 'Unknown[213]',
'Naxat', 'Copia System', 'Capcom',
'Banpresto', 'Tomy', 'Acclaim',
'Unknown[220]', 'NCS', 'Human',
'Altron', 'Jaleco', 'Gaps Inc.',
'Yutaka', 'Varie', 'T&ESoft',
'Epoch', 'Unknown[230]', 'Athena',
'Asmik', 'Natsume', 'King Records',
'Atlus', 'Sony Music Entertainment', 'Unknown[237]',
'IGS', 'Unknown[239]', 'A Wave, Inc',
'Motown Software', 'Left Field Entertainment', 'Beam Software',
'Tec Magik', 'Unknown[245]', 'Unknown[246]',
'Unknown[247]', 'YuMedia', 'Cybersoft',
'Unknown[250]', 'Psygnosis', 'Unknown[252]',
'Unknown[253]', 'Davidson', 'Hudson Soft'
)
'''
object RomVboy(data)
Initialize the object from given data. Returns False if data is not
found to be a Virtual Boy binary.
'''
def __init__(self, data):
## Check for a header. If present, remove and store it.
if len(data) % 0x400 == 0x200:
self._header = data[0:0x200]
data = data[0x200: ]
self._data = data
self._infoblock = (len(data) - 1) ^ 0x21f
'''
string getcode()
Returns the ROM code.
'''
def getcode(self):
code = self._data[self._infoblock + 0x1b:self._infoblock + 0x1f]
if re.compile("[A-Z0-9][A-Z0-9][A-Z0-9][A-Z0-9]").match(code) != None:
return(code)
else:
return(False)
'''
bool setcode(code)
Sets the ROM code.
'''
def setcode(self, code):
## Pad code to 4 and upper-case it
if len(code) < 4:
code = code.ljust(4, "0")
code = code.upper()
if re.compile("[A-Z0-9][A-Z0-9][A-Z0-9][A-Z0-9]").match(code) != None:
self._data = pirstring.strsub(self._data, self._infoblock + 0x1b, code)
return(True)
else:
return(False)
'''
string getname()
Returns the name of the ROM image.
'''
def getname(self):
return(self._data[self._infoblock:self._infoblock + 0x14].rstrip())
'''
bool setname(name)
Sets the name of the ROM image.
'''
def setname(self, name):
name = name.ljust(0x14, " ")
name = name[0:0x14]
self._data = pirstring.strsub(self._data, self._infoblock, name)
return(True)
'''
int getrevision()
Returns the ROM's revision number.
'''
def getrevision(self):
return(ord(self._data[self._infoblock + 0x1f]));
'''
bool setrevision(number)
Sets the ROM's revision number.
'''
def setrevision(self, number):
if number > 255:
return(False)
self._data = pirstring.strsub(self._data, self._infoblock + 0x1f, chr(number))
return(True)
'''
string getmaker()
Returns a string with the ROM's maker/licensee.
'''
def getmaker(self):
code = self._data[self._infoblock + 0x19:self._infoblock + 0x1b]
if re.compile("[A-F0-9][A-F0-9]").match(code) != None:
return(self._maker[int(code, 16)])
else:
return(False)
'''
bool setmaker(maker)
Sets the ROM's maker from an input integer 0-255.
'''
def setmaker(self, maker):
if maker > 255 == True:
return(False)
self._data = pirstring.strsub(self._data, self._infoblock + 0x19, hex(maker)[2: ])
return(True)
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.